Fix #13839: GHCi warnings do not respect the default module header
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 def use_specs( specs ):
193 """
194 use_specs allows one to override files based on suffixes. e.g. 'stdout',
195 'stderr', 'asm', 'prof.sample', etc.
196
197 Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
198 prof002.stdout.
199
200 Full Example:
201 test('T5889', [only_ways(['normal']), req_profiling,
202 extra_files(['T5889/A.hs', 'T5889/B.hs']),
203 use_specs({'stdout' : 'prof002.stdout'})],
204 multimod_compile,
205 ['A B', '-O -prof -fno-prof-count-entries -v0'])
206
207 """
208 return lambda name, opts, s=specs: _use_specs( name, opts, s )
209
210 def _use_specs( name, opts, specs ):
211 opts.extra_files.extend(specs.values ())
212 opts.use_specs = specs
213
214 # -----
215
216 def expect_fail_for( ways ):
217 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
218
219 def _expect_fail_for( name, opts, ways ):
220 opts.expect_fail_for = ways
221
222 def expect_broken( bug ):
223 # This test is a expected not to work due to the indicated trac bug
224 # number.
225 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
226
227 def _expect_broken( name, opts, bug ):
228 record_broken(name, opts, bug)
229 opts.expect = 'fail';
230
231 def expect_broken_for( bug, ways ):
232 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
233
234 def _expect_broken_for( name, opts, bug, ways ):
235 record_broken(name, opts, bug)
236 opts.expect_fail_for = ways
237
238 def record_broken(name, opts, bug):
239 me = (bug, opts.testdir, name)
240 if not me in brokens:
241 brokens.append(me)
242
243 def _expect_pass(way):
244 # Helper function. Not intended for use in .T files.
245 opts = getTestOpts()
246 return opts.expect == 'pass' and way not in opts.expect_fail_for
247
248 # -----
249
250 def fragile( bug ):
251 """
252 Indicates that the test should be skipped due to fragility documented in
253 the given ticket.
254 """
255 def helper( name, opts, bug=bug ):
256 record_broken(name, opts, bug)
257 opts.skip = True
258
259 return helper
260
261 def fragile_for( name, opts, bug, ways ):
262 """
263 Indicates that the test should be skipped due to fragility in the given
264 test ways as documented in the given ticket.
265 """
266 def helper( name, opts, bug=bug, ways=ways ):
267 record_broken(name, opts, bug)
268 opts.omit_ways = ways
269
270 return helper
271
272 # -----
273
274 def omit_ways( ways ):
275 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
276
277 def _omit_ways( name, opts, ways ):
278 opts.omit_ways = ways
279
280 # -----
281
282 def only_ways( ways ):
283 return lambda name, opts, w=ways: _only_ways( name, opts, w )
284
285 def _only_ways( name, opts, ways ):
286 opts.only_ways = ways
287
288 # -----
289
290 def extra_ways( ways ):
291 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
292
293 def _extra_ways( name, opts, ways ):
294 opts.extra_ways = ways
295
296 # -----
297
298 def set_stdin( file ):
299 return lambda name, opts, f=file: _set_stdin(name, opts, f);
300
301 def _set_stdin( name, opts, f ):
302 opts.stdin = f
303
304 # -----
305
306 def exit_code( val ):
307 return lambda name, opts, v=val: _exit_code(name, opts, v);
308
309 def _exit_code( name, opts, v ):
310 opts.exit_code = v
311
312 def signal_exit_code( val ):
313 if opsys('solaris2'):
314 return exit_code( val )
315 else:
316 # When application running on Linux receives fatal error
317 # signal, then its exit code is encoded as 128 + signal
318 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
319 # I assume that Mac OS X behaves in the same way at least Mac
320 # OS X builder behavior suggests this.
321 return exit_code( val+128 )
322
323 # -----
324
325 def compile_timeout_multiplier( val ):
326 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
327
328 def _compile_timeout_multiplier( name, opts, v ):
329 opts.compile_timeout_multiplier = v
330
331 def run_timeout_multiplier( val ):
332 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
333
334 def _run_timeout_multiplier( name, opts, v ):
335 opts.run_timeout_multiplier = v
336
337 # -----
338
339 def extra_run_opts( val ):
340 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
341
342 def _extra_run_opts( name, opts, v ):
343 opts.extra_run_opts = v
344
345 # -----
346
347 def extra_hc_opts( val ):
348 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
349
350 def _extra_hc_opts( name, opts, v ):
351 opts.extra_hc_opts = v
352
353 # -----
354
355 def extra_clean( files ):
356 # TODO. Remove all calls to extra_clean.
357 return lambda _name, _opts: None
358
359 def extra_files(files):
360 return lambda name, opts: _extra_files(name, opts, files)
361
362 def _extra_files(name, opts, files):
363 opts.extra_files.extend(files)
364
365 # -----
366
367 # Defaults to "test everything, and only break on extreme cases"
368 #
369 # The inputs to this function are slightly interesting:
370 # metric can be either:
371 # - 'all', in which case all 3 possible metrics are collected and compared.
372 # - The specific metric one wants to use in the test.
373 # - A list of the metrics one wants to use in the test.
374 #
375 # Deviation defaults to 20% because the goal is correctness over performance.
376 # The testsuite should avoid breaking when there is not an actual error.
377 # Instead, the testsuite should notify of regressions in a non-breaking manner.
378 #
379 # collect_compiler_stats is used when the metrics collected are about the compiler.
380 # collect_stats is used in the majority case when the metrics to be collected
381 # are about the performance of the runtime code generated by the compiler.
382 def collect_compiler_stats(metric='all',deviation=20):
383 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
384
385 def collect_stats(metric='all', deviation=20):
386 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
387
388 def testing_metrics():
389 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
390
391 # This is an internal function that is used only in the implementation.
392 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
393 # If the boolean is set to true, it indicates that this test is one that
394 # measures the performance numbers of the compiler.
395 # As this is a fairly rare case in the testsuite, it defaults to false to
396 # indicate that it is a 'normal' performance test.
397 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
398 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
399 failBecause('This test has an invalid name.')
400
401 # Normalize metrics to a list of strings.
402 if isinstance(metrics, str):
403 if metrics == 'all':
404 metrics = testing_metrics()
405 else:
406 metrics = [metrics]
407
408 opts.is_stats_test = True
409 if is_compiler_stats_test:
410 opts.is_compiler_stats_test = True
411
412 # Compiler performance numbers change when debugging is on, making the results
413 # useless and confusing. Therefore, skip if debugging is on.
414 if config.compiler_debugged and is_compiler_stats_test:
415 opts.skip = 1
416
417 for metric in metrics:
418 def baselineByWay(way, target_commit, metric=metric):
419 return Perf.baseline_metric( \
420 target_commit, name, config.test_env, metric, way)
421
422 opts.stats_range_fields[metric] = (baselineByWay, deviation)
423
424 # -----
425
426 def when(b, f):
427 # When list_brokens is on, we want to see all expect_broken calls,
428 # so we always do f
429 if b or config.list_broken:
430 return f
431 else:
432 return normal
433
434 def unless(b, f):
435 return when(not b, f)
436
437 def doing_ghci():
438 return 'ghci' in config.run_ways
439
440 def ghc_dynamic():
441 return config.ghc_dynamic
442
443 def fast():
444 return config.speed == 2
445
446 def platform( plat ):
447 return config.platform == plat
448
449 def opsys( os ):
450 return config.os == os
451
452 def arch( arch ):
453 return config.arch == arch
454
455 def wordsize( ws ):
456 return config.wordsize == str(ws)
457
458 def msys( ):
459 return config.msys
460
461 def cygwin( ):
462 return config.cygwin
463
464 def have_vanilla( ):
465 return config.have_vanilla
466
467 def have_ncg( ):
468 return config.have_ncg
469
470 def have_dynamic( ):
471 return config.have_dynamic
472
473 def have_profiling( ):
474 return config.have_profiling
475
476 def in_tree_compiler( ):
477 return config.in_tree_compiler
478
479 def unregisterised( ):
480 return config.unregisterised
481
482 def compiler_profiled( ):
483 return config.compiler_profiled
484
485 def compiler_debugged( ):
486 return config.compiler_debugged
487
488 def have_gdb( ):
489 return config.have_gdb
490
491 def have_readelf( ):
492 return config.have_readelf
493
494 def integer_gmp( ):
495 return have_library("integer-gmp")
496
497 def integer_simple( ):
498 return have_library("integer-simple")
499
500 def llvm_build ( ):
501 return config.ghc_built_by_llvm
502
503 # ---
504
505 def high_memory_usage(name, opts):
506 opts.alone = True
507
508 # If a test is for a multi-CPU race, then running the test alone
509 # increases the chance that we'll actually see it.
510 def multi_cpu_race(name, opts):
511 opts.alone = True
512
513 # ---
514 def literate( name, opts ):
515 opts.literate = True
516
517 def c_src( name, opts ):
518 opts.c_src = True
519
520 def objc_src( name, opts ):
521 opts.objc_src = True
522
523 def objcpp_src( name, opts ):
524 opts.objcpp_src = True
525
526 def cmm_src( name, opts ):
527 opts.cmm_src = True
528
529 def outputdir( odir ):
530 return lambda name, opts, d=odir: _outputdir(name, opts, d)
531
532 def _outputdir( name, opts, odir ):
533 opts.outputdir = odir;
534
535 # ----
536
537 def pre_cmd( cmd ):
538 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
539
540 def _pre_cmd( name, opts, cmd ):
541 opts.pre_cmd = cmd
542
543 # ----
544
545 def cmd_prefix( prefix ):
546 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
547
548 def _cmd_prefix( name, opts, prefix ):
549 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
550
551 # ----
552
553 def cmd_wrapper( fun ):
554 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
555
556 def _cmd_wrapper( name, opts, fun ):
557 opts.cmd_wrapper = fun
558
559 # ----
560
561 def compile_cmd_prefix( prefix ):
562 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
563
564 def _compile_cmd_prefix( name, opts, prefix ):
565 opts.compile_cmd_prefix = prefix
566
567 # ----
568
569 def check_stdout( f ):
570 return lambda name, opts, f=f: _check_stdout(name, opts, f)
571
572 def _check_stdout( name, opts, f ):
573 opts.check_stdout = f
574
575 def no_check_hp(name, opts):
576 opts.check_hp = False
577
578 # ----
579
580 def filter_stdout_lines( regex ):
581 """ Filter lines of stdout with the given regular expression """
582 def f( name, opts ):
583 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
584 return f
585
586 def normalise_slashes( name, opts ):
587 _normalise_fun(name, opts, normalise_slashes_)
588
589 def normalise_exe( name, opts ):
590 _normalise_fun(name, opts, normalise_exe_)
591
592 def normalise_fun( *fs ):
593 return lambda name, opts: _normalise_fun(name, opts, fs)
594
595 def _normalise_fun( name, opts, *fs ):
596 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
597
598 def normalise_errmsg_fun( *fs ):
599 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
600
601 def _normalise_errmsg_fun( name, opts, *fs ):
602 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
603
604 def check_errmsg(needle):
605 def norm(str):
606 if needle in str:
607 return "%s contained in -ddump-simpl\n" % needle
608 else:
609 return "%s not contained in -ddump-simpl\n" % needle
610 return normalise_errmsg_fun(norm)
611
612 def grep_errmsg(needle):
613 def norm(str):
614 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
615 return normalise_errmsg_fun(norm)
616
617 def normalise_whitespace_fun(f):
618 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
619
620 def _normalise_whitespace_fun(name, opts, f):
621 opts.whitespace_normaliser = f
622
623 def normalise_version_( *pkgs ):
624 def normalise_version__( str ):
625 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
626 '\\1-<VERSION>', str)
627 return normalise_version__
628
629 def normalise_version( *pkgs ):
630 def normalise_version__( name, opts ):
631 _normalise_fun(name, opts, normalise_version_(*pkgs))
632 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
633 return normalise_version__
634
635 def normalise_drive_letter(name, opts):
636 # Windows only. Change D:\\ to C:\\.
637 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
638
639 def keep_prof_callstacks(name, opts):
640 """Keep profiling callstacks.
641
642 Use together with `only_ways(prof_ways)`.
643 """
644 opts.keep_prof_callstacks = True
645
646 def join_normalisers(*a):
647 """
648 Compose functions, flattening sequences.
649
650 join_normalisers(f1,[f2,f3],f4)
651
652 is the same as
653
654 lambda x: f1(f2(f3(f4(x))))
655 """
656
657 def flatten(l):
658 """
659 Taken from http://stackoverflow.com/a/2158532/946226
660 """
661 for el in l:
662 if (isinstance(el, collections.Iterable)
663 and not isinstance(el, (bytes, str))):
664 for sub in flatten(el):
665 yield sub
666 else:
667 yield el
668
669 a = flatten(a)
670
671 fn = lambda x:x # identity function
672 for f in a:
673 assert callable(f)
674 fn = lambda x,f=f,fn=fn: fn(f(x))
675 return fn
676
677 # ----
678 # Function for composing two opt-fns together
679
680 def executeSetups(fs, name, opts):
681 if type(fs) is list:
682 # If we have a list of setups, then execute each one
683 for f in fs:
684 executeSetups(f, name, opts)
685 else:
686 # fs is a single function, so just apply it
687 fs(name, opts)
688
689 # -----------------------------------------------------------------------------
690 # The current directory of tests
691
692 def newTestDir(tempdir, dir):
693
694 global thisdir_settings
695 # reset the options for this test directory
696 def settings(name, opts, tempdir=tempdir, dir=dir):
697 return _newTestDir(name, opts, tempdir, dir)
698 thisdir_settings = settings
699
700 # Should be equal to entry in toplevel .gitignore.
701 testdir_suffix = '.run'
702
703 def _newTestDir(name, opts, tempdir, dir):
704 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
705 opts.srcdir = os.path.join(os.getcwd(), dir)
706 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
707 opts.compiler_always_flags = config.compiler_always_flags
708
709 # -----------------------------------------------------------------------------
710 # Actually doing tests
711
712 parallelTests = []
713 aloneTests = []
714 allTestNames = set([])
715
716 def runTest(watcher, opts, name, func, args):
717 if config.use_threads:
718 pool_sema.acquire()
719 t = threading.Thread(target=test_common_thread,
720 name=name,
721 args=(watcher, name, opts, func, args))
722 t.daemon = False
723 t.start()
724 else:
725 test_common_work(watcher, name, opts, func, args)
726
727 # name :: String
728 # setup :: [TestOpt] -> IO ()
729 def test(name, setup, func, args):
730 global aloneTests
731 global parallelTests
732 global allTestNames
733 global thisdir_settings
734 if name in allTestNames:
735 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
736 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
737 framework_fail(name, 'bad_name', 'This test has an invalid name')
738
739 if config.run_only_some_tests:
740 if name not in config.only:
741 return
742 else:
743 # Note [Mutating config.only]
744 # config.only is initially the set of tests requested by
745 # the user (via 'make TEST='). We then remove all tests that
746 # we've already seen (in .T files), so that we can later
747 # report on any tests we couldn't find and error out.
748 config.only.remove(name)
749
750 # Make a deep copy of the default_testopts, as we need our own copy
751 # of any dictionaries etc inside it. Otherwise, if one test modifies
752 # them, all tests will see the modified version!
753 myTestOpts = copy.deepcopy(default_testopts)
754
755 executeSetups([thisdir_settings, setup], name, myTestOpts)
756
757 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
758 if myTestOpts.alone:
759 aloneTests.append(thisTest)
760 else:
761 parallelTests.append(thisTest)
762 allTestNames.add(name)
763
764 if config.use_threads:
765 def test_common_thread(watcher, name, opts, func, args):
766 try:
767 test_common_work(watcher, name, opts, func, args)
768 finally:
769 pool_sema.release()
770
771 def get_package_cache_timestamp():
772 if config.package_conf_cache_file == '':
773 return 0.0
774 else:
775 try:
776 return os.stat(config.package_conf_cache_file).st_mtime
777 except:
778 return 0.0
779
780 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
781
782 def test_common_work(watcher, name, opts, func, args):
783 try:
784 t.total_tests += 1
785 setLocalTestOpts(opts)
786
787 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
788
789 # All the ways we might run this test
790 if func == compile or func == multimod_compile:
791 all_ways = config.compile_ways
792 elif func == compile_and_run or func == multimod_compile_and_run:
793 all_ways = config.run_ways
794 elif func == ghci_script:
795 if 'ghci' in config.run_ways:
796 all_ways = ['ghci']
797 else:
798 all_ways = []
799 else:
800 all_ways = ['normal']
801
802 # A test itself can request extra ways by setting opts.extra_ways
803 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
804
805 t.total_test_cases += len(all_ways)
806
807 ok_way = lambda way: \
808 not getTestOpts().skip \
809 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
810 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
811 and (not (config.skip_perf_tests and isStatsTest())) \
812 and (not (config.only_perf_tests and not isStatsTest())) \
813 and way not in getTestOpts().omit_ways
814
815 # Which ways we are asked to skip
816 do_ways = list(filter (ok_way,all_ways))
817
818 # Only run all ways in slow mode.
819 # See Note [validate and testsuite speed] in toplevel Makefile.
820 if config.accept:
821 # Only ever run one way
822 do_ways = do_ways[:1]
823 elif config.speed > 0:
824 # However, if we EXPLICITLY asked for a way (with extra_ways)
825 # please test it!
826 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
827 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
828 do_ways = other_ways[:1] + explicit_ways
829
830 # Find all files in the source directory that this test
831 # depends on. Do this only once for all ways.
832 # Generously add all filenames that start with the name of
833 # the test to this set, as a convenience to test authors.
834 # They will have to use the `extra_files` setup function to
835 # specify all other files that their test depends on (but
836 # this seems to be necessary for only about 10% of all
837 # tests).
838 files = set(f for f in os.listdir(opts.srcdir)
839 if f.startswith(name) and not f == name and
840 not f.endswith(testdir_suffix) and
841 not os.path.splitext(f)[1] in do_not_copy)
842 for filename in (opts.extra_files + extra_src_files.get(name, [])):
843 if filename.startswith('/'):
844 framework_fail(name, 'whole-test',
845 'no absolute paths in extra_files please: ' + filename)
846
847 elif '*' in filename:
848 # Don't use wildcards in extra_files too much, as
849 # globbing is slow.
850 files.update((os.path.relpath(f, opts.srcdir)
851 for f in glob.iglob(in_srcdir(filename))))
852
853 elif filename:
854 files.add(filename)
855
856 else:
857 framework_fail(name, 'whole-test', 'extra_file is empty string')
858
859 # Run the required tests...
860 for way in do_ways:
861 if stopping():
862 break
863 try:
864 do_test(name, way, func, args, files)
865 except KeyboardInterrupt:
866 stopNow()
867 except Exception as e:
868 framework_fail(name, way, str(e))
869 traceback.print_exc()
870
871 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
872
873 if config.cleanup and do_ways:
874 try:
875 cleanup()
876 except Exception as e:
877 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
878
879 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
880
881 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
882 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
883
884 except Exception as e:
885 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
886 finally:
887 watcher.notify()
888
889 def do_test(name, way, func, args, files):
890 opts = getTestOpts()
891
892 full_name = name + '(' + way + ')'
893
894 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
895 full_name, t.total_tests, len(allTestNames),
896 [len(t.unexpected_passes),
897 len(t.unexpected_failures),
898 len(t.framework_failures)]))
899
900 # Clean up prior to the test, so that we can't spuriously conclude
901 # that it passed on the basis of old run outputs.
902 cleanup()
903 os.makedirs(opts.testdir)
904
905 # Link all source files for this test into a new directory in
906 # /tmp, and run the test in that directory. This makes it
907 # possible to run tests in parallel, without modification, that
908 # would otherwise (accidentally) write to the same output file.
909 # It also makes it easier to keep the testsuite clean.
910
911 for extra_file in files:
912 src = in_srcdir(extra_file)
913 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
914 if os.path.isfile(src):
915 link_or_copy_file(src, dst)
916 elif os.path.isdir(src):
917 if os.path.exists(dst):
918 shutil.rmtree(dst)
919 os.mkdir(dst)
920 lndir(src, dst)
921 else:
922 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
923 # When using a ghc built without haddock support, .t
924 # files are rightfully missing. Don't
925 # framework_fail. Test will be skipped later.
926 pass
927 else:
928 framework_fail(name, way,
929 'extra_file does not exist: ' + extra_file)
930
931 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
932 # When running 'MAKE' make sure 'TOP' still points to the
933 # root of the testsuite.
934 src_makefile = in_srcdir('Makefile')
935 dst_makefile = in_testdir('Makefile')
936 if os.path.exists(src_makefile):
937 with io.open(src_makefile, 'r', encoding='utf8') as src:
938 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
939 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
940 dst.write(makefile)
941
942 if opts.pre_cmd:
943 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
944 stderr = subprocess.STDOUT,
945 print_output = config.verbose >= 3)
946
947 # If user used expect_broken then don't record failures of pre_cmd
948 if exit_code != 0 and opts.expect not in ['fail']:
949 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
950 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
951
952 result = func(*[name,way] + args)
953
954 if opts.expect not in ['pass', 'fail', 'missing-lib']:
955 framework_fail(name, way, 'bad expected ' + opts.expect)
956
957 try:
958 passFail = result['passFail']
959 except (KeyError, TypeError):
960 passFail = 'No passFail found'
961
962 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
963
964 if passFail == 'pass':
965 if _expect_pass(way):
966 t.expected_passes.append(TestResult(directory, name, "", way))
967 t.n_expected_passes += 1
968 else:
969 if_verbose(1, '*** unexpected pass for %s' % full_name)
970 t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
971 elif passFail == 'fail':
972 if _expect_pass(way):
973 reason = result['reason']
974 tag = result.get('tag')
975 if tag == 'stat':
976 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
977 t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
978 else:
979 if_verbose(1, '*** unexpected failure for %s' % full_name)
980 result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
981 t.unexpected_failures.append(result)
982 else:
983 if opts.expect == 'missing-lib':
984 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
985 else:
986 t.n_expected_failures += 1
987 else:
988 framework_fail(name, way, 'bad result ' + passFail)
989
990 # Make is often invoked with -s, which means if it fails, we get
991 # no feedback at all. This is annoying. So let's remove the option
992 # if found and instead have the testsuite decide on what to do
993 # with the output.
994 def override_options(pre_cmd):
995 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
996 return pre_cmd.replace('-s' , '') \
997 .replace('--silent', '') \
998 .replace('--quiet' , '')
999
1000 return pre_cmd
1001
1002 def framework_fail(name, way, reason):
1003 opts = getTestOpts()
1004 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1005 full_name = name + '(' + way + ')'
1006 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
1007 t.framework_failures.append(TestResult(directory, name, reason, way))
1008
1009 def framework_warn(name, way, reason):
1010 opts = getTestOpts()
1011 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1012 full_name = name + '(' + way + ')'
1013 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
1014 t.framework_warnings.append(TestResult(directory, name, reason, way))
1015
1016 def badResult(result):
1017 try:
1018 if result['passFail'] == 'pass':
1019 return False
1020 return True
1021 except (KeyError, TypeError):
1022 return True
1023
1024 # -----------------------------------------------------------------------------
1025 # Generic command tests
1026
1027 # A generic command test is expected to run and exit successfully.
1028 #
1029 # The expected exit code can be changed via exit_code() as normal, and
1030 # the expected stdout/stderr are stored in <testname>.stdout and
1031 # <testname>.stderr. The output of the command can be ignored
1032 # altogether by using the setup function ignore_stdout instead of
1033 # run_command.
1034
1035 def run_command( name, way, cmd ):
1036 return simple_run( name, '', override_options(cmd), '' )
1037
1038 def makefile_test( name, way, target=None ):
1039 if target is None:
1040 target = name
1041
1042 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1043 return run_command(name, way, cmd)
1044
1045 # -----------------------------------------------------------------------------
1046 # GHCi tests
1047
1048 def ghci_script( name, way, script):
1049 flags = ' '.join(get_compiler_flags())
1050 way_flags = ' '.join(config.way_flags[way])
1051
1052 # We pass HC and HC_OPTS as environment variables, so that the
1053 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1054 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1055 ).format(flags=flags, way_flags=way_flags)
1056 # NB: put way_flags before flags so that flags in all.T can overrie others
1057
1058 getTestOpts().stdin = script
1059 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1060
1061 # -----------------------------------------------------------------------------
1062 # Compile-only tests
1063
1064 def compile( name, way, extra_hc_opts ):
1065 return do_compile( name, way, 0, '', [], extra_hc_opts )
1066
1067 def compile_fail( name, way, extra_hc_opts ):
1068 return do_compile( name, way, 1, '', [], extra_hc_opts )
1069
1070 def backpack_typecheck( name, way, extra_hc_opts ):
1071 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1072
1073 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1074 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1075
1076 def backpack_compile( name, way, extra_hc_opts ):
1077 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1078
1079 def backpack_compile_fail( name, way, extra_hc_opts ):
1080 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1081
1082 def backpack_run( name, way, extra_hc_opts ):
1083 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1084
1085 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1086 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1087
1088 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1089 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1090
1091 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1092 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1093
1094 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1095 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1096
1097 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1098 # print 'Compile only, extra args = ', extra_hc_opts
1099
1100 result = extras_build( way, extra_mods, extra_hc_opts )
1101 if badResult(result):
1102 return result
1103 extra_hc_opts = result['hc_opts']
1104
1105 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1106
1107 if badResult(result):
1108 return result
1109
1110 # the actual stderr should always match the expected, regardless
1111 # of whether we expected the compilation to fail or not (successful
1112 # compilations may generate warnings).
1113
1114 expected_stderr_file = find_expected_file(name, 'stderr')
1115 actual_stderr_file = add_suffix(name, 'comp.stderr')
1116 diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
1117
1118 if not compare_outputs(way, 'stderr',
1119 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1120 normalise_errmsg),
1121 expected_stderr_file, actual_stderr_file,
1122 diff_file=diff_file_name,
1123 whitespace_normaliser=getattr(getTestOpts(),
1124 "whitespace_normaliser",
1125 normalise_whitespace)):
1126 stderr = open(diff_file_name, 'rb').read()
1127 os.remove(diff_file_name)
1128 return failBecauseStderr('stderr mismatch', stderr=stderr )
1129
1130
1131 # no problems found, this test passed
1132 return passed()
1133
1134 def compile_cmp_asm( name, way, extra_hc_opts ):
1135 print('Compile only, extra args = ', extra_hc_opts)
1136 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1137
1138 if badResult(result):
1139 return result
1140
1141 # the actual stderr should always match the expected, regardless
1142 # of whether we expected the compilation to fail or not (successful
1143 # compilations may generate warnings).
1144
1145 expected_asm_file = find_expected_file(name, 'asm')
1146 actual_asm_file = add_suffix(name, 's')
1147
1148 if not compare_outputs(way, 'asm',
1149 join_normalisers(normalise_errmsg, normalise_asm),
1150 expected_asm_file, actual_asm_file):
1151 return failBecause('asm mismatch')
1152
1153 # no problems found, this test passed
1154 return passed()
1155
1156 # -----------------------------------------------------------------------------
1157 # Compile-and-run tests
1158
1159 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1160 # print 'Compile and run, extra args = ', extra_hc_opts
1161
1162 result = extras_build( way, extra_mods, extra_hc_opts )
1163 if badResult(result):
1164 return result
1165 extra_hc_opts = result['hc_opts']
1166
1167 if way.startswith('ghci'): # interpreted...
1168 return interpreter_run(name, way, extra_hc_opts, top_mod)
1169 else: # compiled...
1170 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1171 if badResult(result):
1172 return result
1173
1174 cmd = './' + name;
1175
1176 # we don't check the compiler's stderr for a compile-and-run test
1177 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1178
1179 def compile_and_run( name, way, extra_hc_opts ):
1180 return compile_and_run__( name, way, '', [], extra_hc_opts)
1181
1182 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1183 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1184
1185 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1186 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1187
1188 def stats( name, way, stats_file ):
1189 opts = getTestOpts()
1190 return check_stats(name, way, stats_file, opts.stats_range_fields)
1191
1192 def metric_dict(name, way, metric, value):
1193 return Perf.PerfStat(
1194 test_env = config.test_env,
1195 test = name,
1196 way = way,
1197 metric = metric,
1198 value = value)
1199
1200 # -----------------------------------------------------------------------------
1201 # Check test stats. This prints the results for the user.
1202 # name: name of the test.
1203 # way: the way.
1204 # stats_file: the path of the stats_file containing the stats for the test.
1205 # range_fields: see TestOptions.stats_range_fields
1206 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1207 # This prints the results for the user.
1208 def check_stats(name, way, stats_file, range_fields):
1209 head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
1210 result = passed()
1211 if range_fields:
1212 try:
1213 f = open(in_testdir(stats_file))
1214 except IOError as e:
1215 return failBecause(str(e))
1216 stats_file_contents = f.read()
1217 f.close()
1218
1219 for (metric, baseline_and_dev) in range_fields.items():
1220 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1221 if field_match == None:
1222 print('Failed to find metric: ', metric)
1223 metric_result = failBecause('no such stats metric')
1224 else:
1225 actual_val = int(field_match.group(1))
1226
1227 # Store the metric so it can later be stored in a git note.
1228 perf_stat = metric_dict(name, way, metric, actual_val)
1229 change = None
1230
1231 # If this is the first time running the benchmark, then pass.
1232 baseline = baseline_and_dev[0](way, head_commit) \
1233 if Perf.inside_git_repo() else None
1234 if baseline == None:
1235 metric_result = passed()
1236 change = MetricChange.NewMetric
1237 else:
1238 tolerance_dev = baseline_and_dev[1]
1239 (change, metric_result) = Perf.check_stats_change(
1240 perf_stat,
1241 baseline,
1242 tolerance_dev,
1243 config.allowed_perf_changes,
1244 config.verbose >= 4)
1245 t.metrics.append((change, perf_stat))
1246
1247 # If any metric fails then the test fails.
1248 # Note, the remaining metrics are still run so that
1249 # a complete list of changes can be presented to the user.
1250 if metric_result['passFail'] == 'fail':
1251 result = metric_result
1252
1253 return result
1254
1255 # -----------------------------------------------------------------------------
1256 # Build a single-module program
1257
1258 def extras_build( way, extra_mods, extra_hc_opts ):
1259 for mod, opts in extra_mods:
1260 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1261 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1262 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1263 if badResult(result):
1264 return result
1265
1266 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1267
1268 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1269 opts = getTestOpts()
1270
1271 # Redirect stdout and stderr to the same file
1272 stdout = in_testdir(name, 'comp.stderr')
1273 stderr = subprocess.STDOUT
1274
1275 if top_mod != '':
1276 srcname = top_mod
1277 elif addsuf:
1278 if backpack:
1279 srcname = add_suffix(name, 'bkp')
1280 else:
1281 srcname = add_hs_lhs_suffix(name)
1282 else:
1283 srcname = name
1284
1285 if top_mod != '':
1286 to_do = '--make '
1287 if link:
1288 to_do = to_do + '-o ' + name
1289 elif backpack:
1290 if link:
1291 to_do = '-o ' + name + ' '
1292 else:
1293 to_do = ''
1294 to_do = to_do + '--backpack '
1295 elif link:
1296 to_do = '-o ' + name
1297 else:
1298 to_do = '-c' # just compile
1299
1300 stats_file = name + '.comp.stats'
1301 if isCompilerStatsTest():
1302 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1303 if backpack:
1304 extra_hc_opts += ' -outputdir ' + name + '.out'
1305
1306 # Required by GHC 7.3+, harmless for earlier versions:
1307 if (getTestOpts().c_src or
1308 getTestOpts().objc_src or
1309 getTestOpts().objcpp_src or
1310 getTestOpts().cmm_src):
1311 extra_hc_opts += ' -no-hs-main '
1312
1313 if getTestOpts().compile_cmd_prefix == '':
1314 cmd_prefix = ''
1315 else:
1316 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1317
1318 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1319
1320 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1321 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1322 ).format(**locals())
1323
1324 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1325
1326 actual_stderr_path = in_testdir(name, 'comp.stderr')
1327
1328 if exit_code != 0 and not should_fail:
1329 if config.verbose >= 1 and _expect_pass(way):
1330 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1331 dump_file(actual_stderr_path)
1332
1333 # ToDo: if the sub-shell was killed by ^C, then exit
1334
1335 if isCompilerStatsTest():
1336 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1337 if badResult(statsResult):
1338 return statsResult
1339
1340 if should_fail:
1341 if exit_code == 0:
1342 stderr_contents = open(actual_stderr_path, 'rb').read()
1343 return failBecauseStderr('exit code 0', stderr_contents)
1344 else:
1345 if exit_code != 0:
1346 stderr_contents = open(actual_stderr_path, 'rb').read()
1347 return failBecauseStderr('exit code non-0', stderr_contents)
1348
1349 return passed()
1350
1351 # -----------------------------------------------------------------------------
1352 # Run a program and check its output
1353 #
1354 # If testname.stdin exists, route input from that, else
1355 # from /dev/null. Route output to testname.run.stdout and
1356 # testname.run.stderr. Returns the exit code of the run.
1357
1358 def simple_run(name, way, prog, extra_run_opts):
1359 opts = getTestOpts()
1360
1361 # figure out what to use for stdin
1362 if opts.stdin:
1363 stdin = in_testdir(opts.stdin)
1364 elif os.path.exists(in_testdir(name, 'stdin')):
1365 stdin = in_testdir(name, 'stdin')
1366 else:
1367 stdin = None
1368
1369 stdout = in_testdir(name, 'run.stdout')
1370 if opts.combined_output:
1371 stderr = subprocess.STDOUT
1372 else:
1373 stderr = in_testdir(name, 'run.stderr')
1374
1375 my_rts_flags = rts_flags(way)
1376
1377 # Collect stats if necessary:
1378 # isStatsTest and not isCompilerStatsTest():
1379 # assume we are running a ghc compiled program. Collect stats.
1380 # isStatsTest and way == 'ghci':
1381 # assume we are running a program via ghci. Collect stats
1382 stats_file = name + '.stats'
1383 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1384 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1385 else:
1386 stats_args = ''
1387
1388 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1389 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1390
1391 if opts.cmd_wrapper != None:
1392 cmd = opts.cmd_wrapper(cmd)
1393
1394 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1395
1396 # run the command
1397 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1398
1399 # check the exit code
1400 if exit_code != opts.exit_code:
1401 if config.verbose >= 1 and _expect_pass(way):
1402 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1403 dump_stdout(name)
1404 dump_stderr(name)
1405 return failBecause('bad exit code')
1406
1407 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1408 return failBecause('bad stderr')
1409 if not (opts.ignore_stdout or stdout_ok(name, way)):
1410 return failBecause('bad stdout')
1411
1412 check_hp = '-h' in my_rts_flags and opts.check_hp
1413 check_prof = '-p' in my_rts_flags
1414
1415 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1416 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1417 return failBecause('bad heap profile')
1418 if check_prof and not check_prof_ok(name, way):
1419 return failBecause('bad profile')
1420
1421 return check_stats(name, way, stats_file, opts.stats_range_fields)
1422
1423 def rts_flags(way):
1424 args = config.way_rts_flags.get(way, [])
1425 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1426
1427 # -----------------------------------------------------------------------------
1428 # Run a program in the interpreter and check its output
1429
1430 def interpreter_run(name, way, extra_hc_opts, top_mod):
1431 opts = getTestOpts()
1432
1433 stdout = in_testdir(name, 'interp.stdout')
1434 stderr = in_testdir(name, 'interp.stderr')
1435 script = in_testdir(name, 'genscript')
1436
1437 if opts.combined_output:
1438 framework_fail(name, 'unsupported',
1439 'WAY=ghci and combined_output together is not supported')
1440
1441 if (top_mod == ''):
1442 srcname = add_hs_lhs_suffix(name)
1443 else:
1444 srcname = top_mod
1445
1446 delimiter = '===== program output begins here\n'
1447
1448 with io.open(script, 'w', encoding='utf8') as f:
1449 # set the prog name and command-line args to match the compiled
1450 # environment.
1451 f.write(':set prog ' + name + '\n')
1452 f.write(':set args ' + opts.extra_run_opts + '\n')
1453 # Add marker lines to the stdout and stderr output files, so we
1454 # can separate GHCi's output from the program's.
1455 f.write(':! echo ' + delimiter)
1456 f.write(':! echo 1>&2 ' + delimiter)
1457 # Set stdout to be line-buffered to match the compiled environment.
1458 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1459 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1460 # in the event of an exception as for the compiled program.
1461 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1462
1463 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1464 if os.path.exists(stdin):
1465 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1466
1467 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1468
1469 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1470 ).format(**locals())
1471
1472 if getTestOpts().cmd_wrapper != None:
1473 cmd = opts.cmd_wrapper(cmd);
1474
1475 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1476
1477 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1478
1479 # split the stdout into compilation/program output
1480 split_file(stdout, delimiter,
1481 in_testdir(name, 'comp.stdout'),
1482 in_testdir(name, 'run.stdout'))
1483 split_file(stderr, delimiter,
1484 in_testdir(name, 'comp.stderr'),
1485 in_testdir(name, 'run.stderr'))
1486
1487 # check the exit code
1488 if exit_code != getTestOpts().exit_code:
1489 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1490 dump_stdout(name)
1491 dump_stderr(name)
1492 return failBecause('bad exit code')
1493
1494 # ToDo: if the sub-shell was killed by ^C, then exit
1495
1496 if not (opts.ignore_stderr or stderr_ok(name, way)):
1497 return failBecause('bad stderr')
1498 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1499 return failBecause('bad stdout')
1500 else:
1501 return passed()
1502
1503 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1504 # See Note [Universal newlines].
1505 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1506 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1507 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1508 line = infile.readline()
1509 while re.sub('^\s*','',line) != delimiter and line != '':
1510 out1.write(line)
1511 line = infile.readline()
1512
1513 line = infile.readline()
1514 while line != '':
1515 out2.write(line)
1516 line = infile.readline()
1517
1518 # -----------------------------------------------------------------------------
1519 # Utils
1520 def get_compiler_flags():
1521 opts = getTestOpts()
1522
1523 flags = copy.copy(opts.compiler_always_flags)
1524
1525 flags.append(opts.extra_hc_opts)
1526
1527 if opts.outputdir != None:
1528 flags.extend(["-outputdir", opts.outputdir])
1529
1530 return flags
1531
1532 def stdout_ok(name, way):
1533 actual_stdout_file = add_suffix(name, 'run.stdout')
1534 expected_stdout_file = find_expected_file(name, 'stdout')
1535
1536 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1537
1538 check_stdout = getTestOpts().check_stdout
1539 if check_stdout:
1540 actual_stdout_path = in_testdir(actual_stdout_file)
1541 return check_stdout(actual_stdout_path, extra_norm)
1542
1543 return compare_outputs(way, 'stdout', extra_norm,
1544 expected_stdout_file, actual_stdout_file)
1545
1546 def dump_stdout( name ):
1547 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1548 str = f.read().strip()
1549 if str:
1550 print("Stdout (", name, "):")
1551 print(str)
1552
1553 def stderr_ok(name, way):
1554 actual_stderr_file = add_suffix(name, 'run.stderr')
1555 expected_stderr_file = find_expected_file(name, 'stderr')
1556
1557 return compare_outputs(way, 'stderr',
1558 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1559 expected_stderr_file, actual_stderr_file,
1560 whitespace_normaliser=normalise_whitespace)
1561
1562 def dump_stderr( name ):
1563 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1564 str = f.read().strip()
1565 if str:
1566 print("Stderr (", name, "):")
1567 print(str)
1568
1569 def read_no_crs(file):
1570 str = ''
1571 try:
1572 # See Note [Universal newlines].
1573 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1574 str = h.read()
1575 except Exception:
1576 # On Windows, if the program fails very early, it seems the
1577 # files stdout/stderr are redirected to may not get created
1578 pass
1579 return str
1580
1581 def write_file(file, str):
1582 # See Note [Universal newlines].
1583 with io.open(file, 'w', encoding='utf8', newline='') as h:
1584 h.write(str)
1585
1586 # Note [Universal newlines]
1587 #
1588 # We don't want to write any Windows style line endings ever, because
1589 # it would mean that `make accept` would touch every line of the file
1590 # when switching between Linux and Windows.
1591 #
1592 # Furthermore, when reading a file, it is convenient to translate all
1593 # Windows style endings to '\n', as it simplifies searching or massaging
1594 # the content.
1595 #
1596 # Solution: use `io.open` instead of `open`
1597 # * when reading: use newline=None to translate '\r\n' to '\n'
1598 # * when writing: use newline='' to not translate '\n' to '\r\n'
1599 #
1600 # See https://docs.python.org/2/library/io.html#io.open.
1601 #
1602 # This should work with both python2 and python3, and with both mingw*
1603 # as msys2 style Python.
1604 #
1605 # Do note that io.open returns unicode strings. So we have to specify
1606 # the expected encoding. But there is at least one file which is not
1607 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1608 # Another solution would be to open files in binary mode always, and
1609 # operate on bytes.
1610
1611 def check_hp_ok(name):
1612 opts = getTestOpts()
1613
1614 # do not qualify for hp2ps because we should be in the right directory
1615 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1616
1617 hp2psResult = runCmd(hp2psCmd)
1618
1619 actual_ps_path = in_testdir(name, 'ps')
1620
1621 if hp2psResult == 0:
1622 if os.path.exists(actual_ps_path):
1623 if gs_working:
1624 gsResult = runCmd(genGSCmd(actual_ps_path))
1625 if (gsResult == 0):
1626 return (True)
1627 else:
1628 print("hp2ps output for " + name + " is not valid PostScript")
1629 else: return (True) # assume postscript is valid without ghostscript
1630 else:
1631 print("hp2ps did not generate PostScript for " + name)
1632 return (False)
1633 else:
1634 print("hp2ps error when processing heap profile for " + name)
1635 return(False)
1636
1637 def check_prof_ok(name, way):
1638 expected_prof_file = find_expected_file(name, 'prof.sample')
1639 expected_prof_path = in_testdir(expected_prof_file)
1640
1641 # Check actual prof file only if we have an expected prof file to
1642 # compare it with.
1643 if not os.path.exists(expected_prof_path):
1644 return True
1645
1646 actual_prof_file = add_suffix(name, 'prof')
1647 actual_prof_path = in_testdir(actual_prof_file)
1648
1649 if not os.path.exists(actual_prof_path):
1650 print(actual_prof_path + " does not exist")
1651 return(False)
1652
1653 if os.path.getsize(actual_prof_path) == 0:
1654 print(actual_prof_path + " is empty")
1655 return(False)
1656
1657 return compare_outputs(way, 'prof', normalise_prof,
1658 expected_prof_file, actual_prof_file,
1659 whitespace_normaliser=normalise_whitespace)
1660
1661 # Compare expected output to actual output, and optionally accept the
1662 # new output. Returns true if output matched or was accepted, false
1663 # otherwise. See Note [Output comparison] for the meaning of the
1664 # normaliser and whitespace_normaliser parameters.
1665 def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
1666 whitespace_normaliser=lambda x:x):
1667
1668 expected_path = in_srcdir(expected_file)
1669 actual_path = in_testdir(actual_file)
1670
1671 if os.path.exists(expected_path):
1672 expected_str = normaliser(read_no_crs(expected_path))
1673 # Create the .normalised file in the testdir, not in the srcdir.
1674 expected_normalised_file = add_suffix(expected_file, 'normalised')
1675 expected_normalised_path = in_testdir(expected_normalised_file)
1676 else:
1677 expected_str = ''
1678 expected_normalised_path = '/dev/null'
1679
1680 actual_raw = read_no_crs(actual_path)
1681 actual_str = normaliser(actual_raw)
1682
1683 # See Note [Output comparison].
1684 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1685 return True
1686 else:
1687 if config.verbose >= 1 and _expect_pass(way):
1688 print('Actual ' + kind + ' output differs from expected:')
1689
1690 if expected_normalised_path != '/dev/null':
1691 write_file(expected_normalised_path, expected_str)
1692
1693 actual_normalised_path = add_suffix(actual_path, 'normalised')
1694 write_file(actual_normalised_path, actual_str)
1695
1696 if config.verbose >= 1 and _expect_pass(way):
1697 # See Note [Output comparison].
1698 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1699 actual_normalised_path),
1700 stdout=diff_file,
1701 print_output=True)
1702
1703 # If for some reason there were no non-whitespace differences,
1704 # then do a full diff
1705 if r == 0:
1706 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1707 actual_normalised_path),
1708 stdout=diff_file,
1709 print_output=True)
1710 elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
1711 # we will try to read it later
1712
1713 if config.accept and (getTestOpts().expect == 'fail' or
1714 way in getTestOpts().expect_fail_for):
1715 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1716 return False
1717 elif config.accept and actual_raw:
1718 if config.accept_platform:
1719 if_verbose(1, 'Accepting new output for platform "'
1720 + config.platform + '".')
1721 expected_path += '-' + config.platform
1722 elif config.accept_os:
1723 if_verbose(1, 'Accepting new output for os "'
1724 + config.os + '".')
1725 expected_path += '-' + config.os
1726 else:
1727 if_verbose(1, 'Accepting new output.')
1728
1729 write_file(expected_path, actual_raw)
1730 return True
1731 elif config.accept:
1732 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1733 os.remove(expected_path)
1734 return True
1735 else:
1736 return False
1737
1738 # Note [Output comparison]
1739 #
1740 # We do two types of output comparison:
1741 #
1742 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1743 # optional `whitespace_normaliser` to the expected and the actual
1744 # output, before comparing the two.
1745 #
1746 # 2. To show as a diff to the user when the test indeed failed. We apply
1747 # the same `normaliser` function to the outputs, to make the diff as
1748 # small as possible (only showing the actual problem). But we don't
1749 # apply the `whitespace_normaliser` here, because it might completely
1750 # squash all whitespace, making the diff unreadable. Instead we rely
1751 # on the `diff` program to ignore whitespace changes as much as
1752 # possible (#10152).
1753
1754 def normalise_whitespace( str ):
1755 # Merge contiguous whitespace characters into a single space.
1756 return ' '.join(str.split())
1757
1758 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1759
1760 def normalise_callstacks(s):
1761 opts = getTestOpts()
1762 def repl(matches):
1763 location = matches.group(1)
1764 location = normalise_slashes_(location)
1765 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1766 # Ignore line number differences in call stacks (#10834).
1767 s = re.sub(callSite_re, repl, s)
1768 # Ignore the change in how we identify implicit call-stacks
1769 s = s.replace('from ImplicitParams', 'from HasCallStack')
1770 if not opts.keep_prof_callstacks:
1771 # Don't output prof callstacks. Test output should be
1772 # independent from the WAY we run the test.
1773 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1774 return s
1775
1776 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1777
1778 def normalise_type_reps(str):
1779 """ Normalise out fingerprints from Typeable TyCon representations """
1780 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1781
1782 def normalise_errmsg( str ):
1783 """Normalise error-messages emitted via stderr"""
1784 # IBM AIX's `ld` is a bit chatty
1785 if opsys('aix'):
1786 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1787 # remove " error:" and lower-case " Warning:" to make patch for
1788 # trac issue #10021 smaller
1789 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1790 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1791 str = normalise_callstacks(str)
1792 str = normalise_type_reps(str)
1793
1794 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1795 # the colon is there because it appears in error messages; this
1796 # hacky solution is used in place of more sophisticated filename
1797 # mangling
1798 str = re.sub('([^\\s])\\.exe', '\\1', str)
1799
1800 # normalise slashes, minimise Windows/Unix filename differences
1801 str = re.sub('\\\\', '/', str)
1802
1803 # The inplace ghc's are called ghc-stage[123] to avoid filename
1804 # collisions, so we need to normalise that to just "ghc"
1805 str = re.sub('ghc-stage[123]', 'ghc', str)
1806
1807 # Error messages sometimes contain integer implementation package
1808 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1809
1810 # Error messages sometimes contain this blurb which can vary
1811 # spuriously depending upon build configuration (e.g. based on integer
1812 # backend)
1813 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1814 '...plus N instances involving out-of-scope types', str)
1815
1816 # Also filter out bullet characters. This is because bullets are used to
1817 # separate error sections, and tests shouldn't be sensitive to how the
1818 # the division happens.
1819 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1820 str = str.replace(bullet, '')
1821
1822 # Windows only, this is a bug in hsc2hs but it is preventing
1823 # stable output for the testsuite. See Trac #9775. For now we filter out this
1824 # warning message to get clean output.
1825 if config.msys:
1826 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1827 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1828
1829 return str
1830
1831 # normalise a .prof file, so that we can reasonably compare it against
1832 # a sample. This doesn't compare any of the actual profiling data,
1833 # only the shape of the profile and the number of entries.
1834 def normalise_prof (str):
1835 # strip everything up to the line beginning "COST CENTRE"
1836 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1837
1838 # strip results for CAFs, these tend to change unpredictably
1839 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1840
1841 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1842 # sometimes under MAIN.
1843 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1844
1845 # We have something like this:
1846 #
1847 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1848 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1849 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1850 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1851 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1852 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1853 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1854 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1855 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1856 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1857 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1858 #
1859 # then we remove all the specific profiling data, leaving only the cost
1860 # centre name, module, src, and entries, to end up with this: (modulo
1861 # whitespace between columns)
1862 #
1863 # MAIN MAIN <built-in> 0
1864 # readPrec Main Main_1.hs:7:13-16 1
1865 # readPrec Main Main_1.hs:4:13-16 1
1866 # == Main Main_1.hs:7:25-26 1
1867 # == Main Main_1.hs:4:25-26 1
1868 # showsPrec Main Main_1.hs:7:19-22 2
1869 # showsPrec Main Main_1.hs:4:19-22 2
1870 # readPrec Main Main_1.hs:7:13-16 0
1871 # readPrec Main Main_1.hs:4:13-16 0
1872
1873 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1874 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1875 # this works fine.
1876 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1877 '\\1 \\2 \\3 \\5\n', str)
1878 return str
1879
1880 def normalise_slashes_( str ):
1881 str = re.sub('\\\\', '/', str)
1882 str = re.sub('//', '/', str)
1883 return str
1884
1885 def normalise_exe_( str ):
1886 str = re.sub('\.exe', '', str)
1887 return str
1888
1889 def normalise_output( str ):
1890 # remove " error:" and lower-case " Warning:" to make patch for
1891 # trac issue #10021 smaller
1892 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1893 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1894 # Remove a .exe extension (for Windows)
1895 # This can occur in error messages generated by the program.
1896 str = re.sub('([^\\s])\\.exe', '\\1', str)
1897 str = normalise_callstacks(str)
1898 str = normalise_type_reps(str)
1899 return str
1900
1901 def normalise_asm( str ):
1902 lines = str.split('\n')
1903 # Only keep instructions and labels not starting with a dot.
1904 metadata = re.compile('^[ \t]*\\..*$')
1905 out = []
1906 for line in lines:
1907 # Drop metadata directives (e.g. ".type")
1908 if not metadata.match(line):
1909 line = re.sub('@plt', '', line)
1910 instr = line.lstrip().split()
1911 # Drop empty lines.
1912 if not instr:
1913 continue
1914 # Drop operands, except for call instructions.
1915 elif instr[0] == 'call':
1916 out.append(instr[0] + ' ' + instr[1])
1917 else:
1918 out.append(instr[0])
1919 out = '\n'.join(out)
1920 return out
1921
1922 def if_verbose( n, s ):
1923 if config.verbose >= n:
1924 print(s)
1925
1926 def dump_file(f):
1927 try:
1928 with io.open(f) as file:
1929 print(file.read())
1930 except Exception:
1931 print('')
1932
1933 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1934 timeout_prog = strip_quotes(config.timeout_prog)
1935 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1936
1937 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1938 cmd = cmd.format(**config.__dict__)
1939 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1940
1941 stdin_file = io.open(stdin, 'rb') if stdin else None
1942 stdout_buffer = b''
1943 stderr_buffer = b''
1944
1945 hStdErr = subprocess.PIPE
1946 if stderr is subprocess.STDOUT:
1947 hStdErr = subprocess.STDOUT
1948
1949 try:
1950 # cmd is a complex command in Bourne-shell syntax
1951 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1952 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1953 # to invoke the Bourne shell
1954
1955 r = subprocess.Popen([timeout_prog, timeout, cmd],
1956 stdin=stdin_file,
1957 stdout=subprocess.PIPE,
1958 stderr=hStdErr,
1959 env=ghc_env)
1960
1961 stdout_buffer, stderr_buffer = r.communicate()
1962 finally:
1963 if stdin_file:
1964 stdin_file.close()
1965 if config.verbose >= 1 and print_output:
1966 if stdout_buffer:
1967 sys.stdout.buffer.write(stdout_buffer)
1968 if stderr_buffer:
1969 sys.stderr.buffer.write(stderr_buffer)
1970
1971 if stdout:
1972 with io.open(stdout, 'wb') as f:
1973 f.write(stdout_buffer)
1974 if stderr:
1975 if stderr is not subprocess.STDOUT:
1976 with io.open(stderr, 'wb') as f:
1977 f.write(stderr_buffer)
1978
1979 if r.returncode == 98:
1980 # The python timeout program uses 98 to signal that ^C was pressed
1981 stopNow()
1982 if r.returncode == 99 and getTestOpts().exit_code != 99:
1983 # Only print a message when timeout killed the process unexpectedly.
1984 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1985 return r.returncode
1986
1987 # -----------------------------------------------------------------------------
1988 # checking if ghostscript is available for checking the output of hp2ps
1989
1990 def genGSCmd(psfile):
1991 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1992
1993 def gsNotWorking():
1994 global gs_working
1995 print("GhostScript not available for hp2ps tests")
1996
1997 global gs_working
1998 gs_working = False
1999 if config.have_profiling:
2000 if config.gs != '':
2001 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
2002 if resultGood == 0:
2003 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
2004 ' >/dev/null 2>&1')
2005 if resultBad != 0:
2006 print("GhostScript available for hp2ps tests")
2007 gs_working = True
2008 else:
2009 gsNotWorking();
2010 else:
2011 gsNotWorking();
2012 else:
2013 gsNotWorking();
2014
2015 def add_suffix( name, suffix ):
2016 if suffix == '':
2017 return name
2018 else:
2019 return name + '.' + suffix
2020
2021 def add_hs_lhs_suffix(name):
2022 if getTestOpts().c_src:
2023 return add_suffix(name, 'c')
2024 elif getTestOpts().cmm_src:
2025 return add_suffix(name, 'cmm')
2026 elif getTestOpts().objc_src:
2027 return add_suffix(name, 'm')
2028 elif getTestOpts().objcpp_src:
2029 return add_suffix(name, 'mm')
2030 elif getTestOpts().literate:
2031 return add_suffix(name, 'lhs')
2032 else:
2033 return add_suffix(name, 'hs')
2034
2035 def replace_suffix( name, suffix ):
2036 base, suf = os.path.splitext(name)
2037 return base + '.' + suffix
2038
2039 def in_testdir(name, suffix=''):
2040 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2041
2042 def in_srcdir(name, suffix=''):
2043 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2044
2045 # Finding the sample output. The filename is of the form
2046 #
2047 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
2048 #
2049 def find_expected_file(name, suff):
2050 basename = add_suffix(name, suff)
2051 # Override the basename if the user has specified one, this will then be
2052 # subjected to the same name mangling scheme as normal to allow platform
2053 # specific overrides to work.
2054 basename = getTestOpts().use_specs.get (suff, basename)
2055
2056 files = [basename + ws + plat
2057 for plat in ['-' + config.platform, '-' + config.os, '']
2058 for ws in ['-ws-' + config.wordsize, '']]
2059
2060 for f in files:
2061 if os.path.exists(in_srcdir(f)):
2062 return f
2063
2064 return basename
2065
2066 if config.msys:
2067 import stat
2068 def cleanup():
2069 testdir = getTestOpts().testdir
2070 max_attempts = 5
2071 retries = max_attempts
2072 def on_error(function, path, excinfo):
2073 # At least one test (T11489) removes the write bit from a file it
2074 # produces. Windows refuses to delete read-only files with a
2075 # permission error. Try setting the write bit and try again.
2076 os.chmod(path, stat.S_IWRITE)
2077 function(path)
2078
2079 # On Windows we have to retry the delete a couple of times.
2080 # The reason for this is that a FileDelete command just marks a
2081 # file for deletion. The file is really only removed when the last
2082 # handle to the file is closed. Unfortunately there are a lot of
2083 # system services that can have a file temporarily opened using a shared
2084 # readonly lock, such as the built in AV and search indexer.
2085 #
2086 # We can't really guarantee that these are all off, so what we can do is
2087 # whenever after a rmtree the folder still exists to try again and wait a bit.
2088 #
2089 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2090 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2091 # still locked then abort the current test by throwing an exception, this so it won't fail
2092 # with an even more cryptic error.
2093 #
2094 # See Trac #13162
2095 exception = None
2096 while retries > 0 and os.path.exists(testdir):
2097 time.sleep((max_attempts-retries)*6)
2098 try:
2099 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2100 except Exception as e:
2101 exception = e
2102 retries -= 1
2103
2104 if retries == 0 and os.path.exists(testdir):
2105 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2106 % (testdir, exception))
2107 else:
2108 def cleanup():
2109 testdir = getTestOpts().testdir
2110 if os.path.exists(testdir):
2111 shutil.rmtree(testdir, ignore_errors=False)
2112
2113
2114 # -----------------------------------------------------------------------------
2115 # Return a list of all the files ending in '.T' below directories roots.
2116
2117 def findTFiles(roots):
2118 for root in roots:
2119 for path, dirs, files in os.walk(root, topdown=True):
2120 # Never pick up .T files in uncleaned .run directories.
2121 dirs[:] = [dir for dir in sorted(dirs)
2122 if not dir.endswith(testdir_suffix)]
2123 for filename in files:
2124 if filename.endswith('.T'):
2125 yield os.path.join(path, filename)
2126
2127 # -----------------------------------------------------------------------------
2128 # Output a test summary to the specified file object
2129
2130 def summary(t, file, short=False, color=False):
2131
2132 file.write('\n')
2133 printUnexpectedTests(file,
2134 [t.unexpected_passes, t.unexpected_failures,
2135 t.unexpected_stat_failures, t.framework_failures])
2136
2137 if short:
2138 # Only print the list of unexpected tests above.
2139 return
2140
2141 colorize = lambda s: s
2142 if color:
2143 if len(t.unexpected_failures) > 0 or \
2144 len(t.unexpected_stat_failures) > 0 or \
2145 len(t.unexpected_passes) > 0 or \
2146 len(t.framework_failures) > 0:
2147 colorize = str_fail
2148 else:
2149 colorize = str_pass
2150
2151 file.write(colorize('SUMMARY') + ' for test run started at '
2152 + time.strftime("%c %Z", t.start_time) + '\n'
2153 + str(datetime.timedelta(seconds=
2154 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2155 + ' spent to go through\n'
2156 + repr(t.total_tests).rjust(8)
2157 + ' total tests, which gave rise to\n'
2158 + repr(t.total_test_cases).rjust(8)
2159 + ' test cases, of which\n'
2160 + repr(t.n_tests_skipped).rjust(8)
2161 + ' were skipped\n'
2162 + '\n'
2163 + repr(len(t.missing_libs)).rjust(8)
2164 + ' had missing libraries\n'
2165 + repr(t.n_expected_passes).rjust(8)
2166 + ' expected passes\n'
2167 + repr(t.n_expected_failures).rjust(8)
2168 + ' expected failures\n'
2169 + '\n'
2170 + repr(len(t.framework_failures)).rjust(8)
2171 + ' caused framework failures\n'
2172 + repr(len(t.framework_warnings)).rjust(8)
2173 + ' caused framework warnings\n'
2174 + repr(len(t.unexpected_passes)).rjust(8)
2175 + ' unexpected passes\n'
2176 + repr(len(t.unexpected_failures)).rjust(8)
2177 + ' unexpected failures\n'
2178 + repr(len(t.unexpected_stat_failures)).rjust(8)
2179 + ' unexpected stat failures\n'
2180 + '\n')
2181
2182 if t.unexpected_passes:
2183 file.write('Unexpected passes:\n')
2184 printTestInfosSummary(file, t.unexpected_passes)
2185
2186 if t.unexpected_failures:
2187 file.write('Unexpected failures:\n')
2188 printTestInfosSummary(file, t.unexpected_failures)
2189
2190 if t.unexpected_stat_failures:
2191 file.write('Unexpected stat failures:\n')
2192 printTestInfosSummary(file, t.unexpected_stat_failures)
2193
2194 if t.framework_failures:
2195 file.write('Framework failures:\n')
2196 printTestInfosSummary(file, t.framework_failures)
2197
2198 if t.framework_warnings:
2199 file.write('Framework warnings:\n')
2200 printTestInfosSummary(file, t.framework_warnings)
2201
2202 if stopping():
2203 file.write('WARNING: Testsuite run was terminated early\n')
2204
2205 def printUnexpectedTests(file, testInfoss):
2206 unexpected = set(result.testname
2207 for testInfos in testInfoss
2208 for result in testInfos
2209 if not result.testname.endswith('.T'))
2210 if unexpected:
2211 file.write('Unexpected results from:\n')
2212 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2213 file.write('\n')
2214
2215 def printTestInfosSummary(file, testInfos):
2216 maxDirLen = max(len(tr.directory) for tr in testInfos)
2217 for result in testInfos:
2218 directory = result.directory.ljust(maxDirLen)
2219 file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
2220 r = result,
2221 directory = directory))
2222 file.write('\n')
2223
2224 def modify_lines(s, f):
2225 s = '\n'.join([f(l) for l in s.splitlines()])
2226 if s and s[-1] != '\n':
2227 # Prevent '\ No newline at end of file' warnings when diffing.
2228 s += '\n'
2229 return s