Exit with exit code 1 when tests unexpectedly pass
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 def use_specs( specs ):
193 """
194 use_specs allows one to override files based on suffixes. e.g. 'stdout',
195 'stderr', 'asm', 'prof.sample', etc.
196
197 Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
198 prof002.stdout.
199
200 Full Example:
201 test('T5889', [only_ways(['normal']), req_profiling,
202 extra_files(['T5889/A.hs', 'T5889/B.hs']),
203 use_specs({'stdout' : 'prof002.stdout'})],
204 multimod_compile,
205 ['A B', '-O -prof -fno-prof-count-entries -v0'])
206
207 """
208 return lambda name, opts, s=specs: _use_specs( name, opts, s )
209
210 def _use_specs( name, opts, specs ):
211 opts.extra_files.extend(specs.values ())
212 opts.use_specs = specs
213
214 # -----
215
216 def expect_fail_for( ways ):
217 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
218
219 def _expect_fail_for( name, opts, ways ):
220 opts.expect_fail_for = ways
221
222 def expect_broken( bug ):
223 # This test is a expected not to work due to the indicated trac bug
224 # number.
225 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
226
227 def _expect_broken( name, opts, bug ):
228 record_broken(name, opts, bug)
229 opts.expect = 'fail';
230
231 def expect_broken_for( bug, ways ):
232 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
233
234 def _expect_broken_for( name, opts, bug, ways ):
235 record_broken(name, opts, bug)
236 opts.expect_fail_for = ways
237
238 def record_broken(name, opts, bug):
239 me = (bug, opts.testdir, name)
240 if not me in brokens:
241 brokens.append(me)
242
243 def _expect_pass(way):
244 # Helper function. Not intended for use in .T files.
245 opts = getTestOpts()
246 return opts.expect == 'pass' and way not in opts.expect_fail_for
247
248 # -----
249
250 def omit_ways( ways ):
251 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
252
253 def _omit_ways( name, opts, ways ):
254 opts.omit_ways = ways
255
256 # -----
257
258 def only_ways( ways ):
259 return lambda name, opts, w=ways: _only_ways( name, opts, w )
260
261 def _only_ways( name, opts, ways ):
262 opts.only_ways = ways
263
264 # -----
265
266 def extra_ways( ways ):
267 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
268
269 def _extra_ways( name, opts, ways ):
270 opts.extra_ways = ways
271
272 # -----
273
274 def set_stdin( file ):
275 return lambda name, opts, f=file: _set_stdin(name, opts, f);
276
277 def _set_stdin( name, opts, f ):
278 opts.stdin = f
279
280 # -----
281
282 def exit_code( val ):
283 return lambda name, opts, v=val: _exit_code(name, opts, v);
284
285 def _exit_code( name, opts, v ):
286 opts.exit_code = v
287
288 def signal_exit_code( val ):
289 if opsys('solaris2'):
290 return exit_code( val )
291 else:
292 # When application running on Linux receives fatal error
293 # signal, then its exit code is encoded as 128 + signal
294 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
295 # I assume that Mac OS X behaves in the same way at least Mac
296 # OS X builder behavior suggests this.
297 return exit_code( val+128 )
298
299 # -----
300
301 def compile_timeout_multiplier( val ):
302 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
303
304 def _compile_timeout_multiplier( name, opts, v ):
305 opts.compile_timeout_multiplier = v
306
307 def run_timeout_multiplier( val ):
308 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
309
310 def _run_timeout_multiplier( name, opts, v ):
311 opts.run_timeout_multiplier = v
312
313 # -----
314
315 def extra_run_opts( val ):
316 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
317
318 def _extra_run_opts( name, opts, v ):
319 opts.extra_run_opts = v
320
321 # -----
322
323 def extra_hc_opts( val ):
324 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
325
326 def _extra_hc_opts( name, opts, v ):
327 opts.extra_hc_opts = v
328
329 # -----
330
331 def extra_clean( files ):
332 # TODO. Remove all calls to extra_clean.
333 return lambda _name, _opts: None
334
335 def extra_files(files):
336 return lambda name, opts: _extra_files(name, opts, files)
337
338 def _extra_files(name, opts, files):
339 opts.extra_files.extend(files)
340
341 # -----
342
343 # Defaults to "test everything, and only break on extreme cases"
344 #
345 # The inputs to this function are slightly interesting:
346 # metric can be either:
347 # - 'all', in which case all 3 possible metrics are collected and compared.
348 # - The specific metric one wants to use in the test.
349 # - A list of the metrics one wants to use in the test.
350 #
351 # Deviation defaults to 20% because the goal is correctness over performance.
352 # The testsuite should avoid breaking when there is not an actual error.
353 # Instead, the testsuite should notify of regressions in a non-breaking manner.
354 #
355 # collect_compiler_stats is used when the metrics collected are about the compiler.
356 # collect_stats is used in the majority case when the metrics to be collected
357 # are about the performance of the runtime code generated by the compiler.
358 def collect_compiler_stats(metric='all',deviation=20):
359 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
360
361 def collect_stats(metric='all', deviation=20):
362 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
363
364 def testing_metrics():
365 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
366
367 # This is an internal function that is used only in the implementation.
368 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
369 # If the boolean is set to true, it indicates that this test is one that
370 # measures the performance numbers of the compiler.
371 # As this is a fairly rare case in the testsuite, it defaults to false to
372 # indicate that it is a 'normal' performance test.
373 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
374 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
375 failBecause('This test has an invalid name.')
376
377 # Normalize metrics to a list of strings.
378 if isinstance(metrics, str):
379 if metrics == 'all':
380 metrics = testing_metrics()
381 else:
382 metrics = [metrics]
383
384 opts.is_stats_test = True
385 if is_compiler_stats_test:
386 opts.is_compiler_stats_test = True
387
388 # Compiler performance numbers change when debugging is on, making the results
389 # useless and confusing. Therefore, skip if debugging is on.
390 if config.compiler_debugged and is_compiler_stats_test:
391 opts.skip = 1
392
393 for metric in metrics:
394 def baselineByWay(way, target_commit, metric=metric):
395 return Perf.baseline_metric( \
396 target_commit, name, config.test_env, metric, way)
397
398 opts.stats_range_fields[metric] = (baselineByWay, deviation)
399
400 # -----
401
402 def when(b, f):
403 # When list_brokens is on, we want to see all expect_broken calls,
404 # so we always do f
405 if b or config.list_broken:
406 return f
407 else:
408 return normal
409
410 def unless(b, f):
411 return when(not b, f)
412
413 def doing_ghci():
414 return 'ghci' in config.run_ways
415
416 def ghc_dynamic():
417 return config.ghc_dynamic
418
419 def fast():
420 return config.speed == 2
421
422 def platform( plat ):
423 return config.platform == plat
424
425 def opsys( os ):
426 return config.os == os
427
428 def arch( arch ):
429 return config.arch == arch
430
431 def wordsize( ws ):
432 return config.wordsize == str(ws)
433
434 def msys( ):
435 return config.msys
436
437 def cygwin( ):
438 return config.cygwin
439
440 def have_vanilla( ):
441 return config.have_vanilla
442
443 def have_ncg( ):
444 return config.have_ncg
445
446 def have_dynamic( ):
447 return config.have_dynamic
448
449 def have_profiling( ):
450 return config.have_profiling
451
452 def in_tree_compiler( ):
453 return config.in_tree_compiler
454
455 def unregisterised( ):
456 return config.unregisterised
457
458 def compiler_profiled( ):
459 return config.compiler_profiled
460
461 def compiler_debugged( ):
462 return config.compiler_debugged
463
464 def have_gdb( ):
465 return config.have_gdb
466
467 def have_readelf( ):
468 return config.have_readelf
469
470 def integer_gmp( ):
471 return have_library("integer-gmp")
472
473 def integer_simple( ):
474 return have_library("integer-simple")
475
476 def llvm_build ( ):
477 return config.ghc_built_by_llvm
478
479 # ---
480
481 def high_memory_usage(name, opts):
482 opts.alone = True
483
484 # If a test is for a multi-CPU race, then running the test alone
485 # increases the chance that we'll actually see it.
486 def multi_cpu_race(name, opts):
487 opts.alone = True
488
489 # ---
490 def literate( name, opts ):
491 opts.literate = True
492
493 def c_src( name, opts ):
494 opts.c_src = True
495
496 def objc_src( name, opts ):
497 opts.objc_src = True
498
499 def objcpp_src( name, opts ):
500 opts.objcpp_src = True
501
502 def cmm_src( name, opts ):
503 opts.cmm_src = True
504
505 def outputdir( odir ):
506 return lambda name, opts, d=odir: _outputdir(name, opts, d)
507
508 def _outputdir( name, opts, odir ):
509 opts.outputdir = odir;
510
511 # ----
512
513 def pre_cmd( cmd ):
514 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
515
516 def _pre_cmd( name, opts, cmd ):
517 opts.pre_cmd = cmd
518
519 # ----
520
521 def cmd_prefix( prefix ):
522 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
523
524 def _cmd_prefix( name, opts, prefix ):
525 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
526
527 # ----
528
529 def cmd_wrapper( fun ):
530 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
531
532 def _cmd_wrapper( name, opts, fun ):
533 opts.cmd_wrapper = fun
534
535 # ----
536
537 def compile_cmd_prefix( prefix ):
538 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
539
540 def _compile_cmd_prefix( name, opts, prefix ):
541 opts.compile_cmd_prefix = prefix
542
543 # ----
544
545 def check_stdout( f ):
546 return lambda name, opts, f=f: _check_stdout(name, opts, f)
547
548 def _check_stdout( name, opts, f ):
549 opts.check_stdout = f
550
551 def no_check_hp(name, opts):
552 opts.check_hp = False
553
554 # ----
555
556 def filter_stdout_lines( regex ):
557 """ Filter lines of stdout with the given regular expression """
558 def f( name, opts ):
559 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
560 return f
561
562 def normalise_slashes( name, opts ):
563 _normalise_fun(name, opts, normalise_slashes_)
564
565 def normalise_exe( name, opts ):
566 _normalise_fun(name, opts, normalise_exe_)
567
568 def normalise_fun( *fs ):
569 return lambda name, opts: _normalise_fun(name, opts, fs)
570
571 def _normalise_fun( name, opts, *fs ):
572 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
573
574 def normalise_errmsg_fun( *fs ):
575 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
576
577 def _normalise_errmsg_fun( name, opts, *fs ):
578 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
579
580 def check_errmsg(needle):
581 def norm(str):
582 if needle in str:
583 return "%s contained in -ddump-simpl\n" % needle
584 else:
585 return "%s not contained in -ddump-simpl\n" % needle
586 return normalise_errmsg_fun(norm)
587
588 def grep_errmsg(needle):
589 def norm(str):
590 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
591 return normalise_errmsg_fun(norm)
592
593 def normalise_whitespace_fun(f):
594 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
595
596 def _normalise_whitespace_fun(name, opts, f):
597 opts.whitespace_normaliser = f
598
599 def normalise_version_( *pkgs ):
600 def normalise_version__( str ):
601 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
602 '\\1-<VERSION>', str)
603 return normalise_version__
604
605 def normalise_version( *pkgs ):
606 def normalise_version__( name, opts ):
607 _normalise_fun(name, opts, normalise_version_(*pkgs))
608 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
609 return normalise_version__
610
611 def normalise_drive_letter(name, opts):
612 # Windows only. Change D:\\ to C:\\.
613 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
614
615 def keep_prof_callstacks(name, opts):
616 """Keep profiling callstacks.
617
618 Use together with `only_ways(prof_ways)`.
619 """
620 opts.keep_prof_callstacks = True
621
622 def join_normalisers(*a):
623 """
624 Compose functions, flattening sequences.
625
626 join_normalisers(f1,[f2,f3],f4)
627
628 is the same as
629
630 lambda x: f1(f2(f3(f4(x))))
631 """
632
633 def flatten(l):
634 """
635 Taken from http://stackoverflow.com/a/2158532/946226
636 """
637 for el in l:
638 if (isinstance(el, collections.Iterable)
639 and not isinstance(el, (bytes, str))):
640 for sub in flatten(el):
641 yield sub
642 else:
643 yield el
644
645 a = flatten(a)
646
647 fn = lambda x:x # identity function
648 for f in a:
649 assert callable(f)
650 fn = lambda x,f=f,fn=fn: fn(f(x))
651 return fn
652
653 # ----
654 # Function for composing two opt-fns together
655
656 def executeSetups(fs, name, opts):
657 if type(fs) is list:
658 # If we have a list of setups, then execute each one
659 for f in fs:
660 executeSetups(f, name, opts)
661 else:
662 # fs is a single function, so just apply it
663 fs(name, opts)
664
665 # -----------------------------------------------------------------------------
666 # The current directory of tests
667
668 def newTestDir(tempdir, dir):
669
670 global thisdir_settings
671 # reset the options for this test directory
672 def settings(name, opts, tempdir=tempdir, dir=dir):
673 return _newTestDir(name, opts, tempdir, dir)
674 thisdir_settings = settings
675
676 # Should be equal to entry in toplevel .gitignore.
677 testdir_suffix = '.run'
678
679 def _newTestDir(name, opts, tempdir, dir):
680 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
681 opts.srcdir = os.path.join(os.getcwd(), dir)
682 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
683 opts.compiler_always_flags = config.compiler_always_flags
684
685 # -----------------------------------------------------------------------------
686 # Actually doing tests
687
688 parallelTests = []
689 aloneTests = []
690 allTestNames = set([])
691
692 def runTest(watcher, opts, name, func, args):
693 if config.use_threads:
694 pool_sema.acquire()
695 t = threading.Thread(target=test_common_thread,
696 name=name,
697 args=(watcher, name, opts, func, args))
698 t.daemon = False
699 t.start()
700 else:
701 test_common_work(watcher, name, opts, func, args)
702
703 # name :: String
704 # setup :: [TestOpt] -> IO ()
705 def test(name, setup, func, args):
706 global aloneTests
707 global parallelTests
708 global allTestNames
709 global thisdir_settings
710 if name in allTestNames:
711 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
712 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
713 framework_fail(name, 'bad_name', 'This test has an invalid name')
714
715 if config.run_only_some_tests:
716 if name not in config.only:
717 return
718 else:
719 # Note [Mutating config.only]
720 # config.only is initially the set of tests requested by
721 # the user (via 'make TEST='). We then remove all tests that
722 # we've already seen (in .T files), so that we can later
723 # report on any tests we couldn't find and error out.
724 config.only.remove(name)
725
726 # Make a deep copy of the default_testopts, as we need our own copy
727 # of any dictionaries etc inside it. Otherwise, if one test modifies
728 # them, all tests will see the modified version!
729 myTestOpts = copy.deepcopy(default_testopts)
730
731 executeSetups([thisdir_settings, setup], name, myTestOpts)
732
733 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
734 if myTestOpts.alone:
735 aloneTests.append(thisTest)
736 else:
737 parallelTests.append(thisTest)
738 allTestNames.add(name)
739
740 if config.use_threads:
741 def test_common_thread(watcher, name, opts, func, args):
742 try:
743 test_common_work(watcher, name, opts, func, args)
744 finally:
745 pool_sema.release()
746
747 def get_package_cache_timestamp():
748 if config.package_conf_cache_file == '':
749 return 0.0
750 else:
751 try:
752 return os.stat(config.package_conf_cache_file).st_mtime
753 except:
754 return 0.0
755
756 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
757
758 def test_common_work(watcher, name, opts, func, args):
759 try:
760 t.total_tests += 1
761 setLocalTestOpts(opts)
762
763 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
764
765 # All the ways we might run this test
766 if func == compile or func == multimod_compile:
767 all_ways = config.compile_ways
768 elif func == compile_and_run or func == multimod_compile_and_run:
769 all_ways = config.run_ways
770 elif func == ghci_script:
771 if 'ghci' in config.run_ways:
772 all_ways = ['ghci']
773 else:
774 all_ways = []
775 else:
776 all_ways = ['normal']
777
778 # A test itself can request extra ways by setting opts.extra_ways
779 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
780
781 t.total_test_cases += len(all_ways)
782
783 ok_way = lambda way: \
784 not getTestOpts().skip \
785 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
786 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
787 and (not (config.skip_perf_tests and isStatsTest())) \
788 and (not (config.only_perf_tests and not isStatsTest())) \
789 and way not in getTestOpts().omit_ways
790
791 # Which ways we are asked to skip
792 do_ways = list(filter (ok_way,all_ways))
793
794 # Only run all ways in slow mode.
795 # See Note [validate and testsuite speed] in toplevel Makefile.
796 if config.accept:
797 # Only ever run one way
798 do_ways = do_ways[:1]
799 elif config.speed > 0:
800 # However, if we EXPLICITLY asked for a way (with extra_ways)
801 # please test it!
802 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
803 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
804 do_ways = other_ways[:1] + explicit_ways
805
806 # Find all files in the source directory that this test
807 # depends on. Do this only once for all ways.
808 # Generously add all filenames that start with the name of
809 # the test to this set, as a convenience to test authors.
810 # They will have to use the `extra_files` setup function to
811 # specify all other files that their test depends on (but
812 # this seems to be necessary for only about 10% of all
813 # tests).
814 files = set(f for f in os.listdir(opts.srcdir)
815 if f.startswith(name) and not f == name and
816 not f.endswith(testdir_suffix) and
817 not os.path.splitext(f)[1] in do_not_copy)
818 for filename in (opts.extra_files + extra_src_files.get(name, [])):
819 if filename.startswith('/'):
820 framework_fail(name, 'whole-test',
821 'no absolute paths in extra_files please: ' + filename)
822
823 elif '*' in filename:
824 # Don't use wildcards in extra_files too much, as
825 # globbing is slow.
826 files.update((os.path.relpath(f, opts.srcdir)
827 for f in glob.iglob(in_srcdir(filename))))
828
829 elif filename:
830 files.add(filename)
831
832 else:
833 framework_fail(name, 'whole-test', 'extra_file is empty string')
834
835 # Run the required tests...
836 for way in do_ways:
837 if stopping():
838 break
839 try:
840 do_test(name, way, func, args, files)
841 except KeyboardInterrupt:
842 stopNow()
843 except Exception as e:
844 framework_fail(name, way, str(e))
845 traceback.print_exc()
846
847 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
848
849 if config.cleanup and do_ways:
850 try:
851 cleanup()
852 except Exception as e:
853 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
854
855 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
856
857 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
858 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
859
860 except Exception as e:
861 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
862 finally:
863 watcher.notify()
864
865 def do_test(name, way, func, args, files):
866 opts = getTestOpts()
867
868 full_name = name + '(' + way + ')'
869
870 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
871 full_name, t.total_tests, len(allTestNames),
872 [len(t.unexpected_passes),
873 len(t.unexpected_failures),
874 len(t.framework_failures)]))
875
876 # Clean up prior to the test, so that we can't spuriously conclude
877 # that it passed on the basis of old run outputs.
878 cleanup()
879 os.makedirs(opts.testdir)
880
881 # Link all source files for this test into a new directory in
882 # /tmp, and run the test in that directory. This makes it
883 # possible to run tests in parallel, without modification, that
884 # would otherwise (accidentally) write to the same output file.
885 # It also makes it easier to keep the testsuite clean.
886
887 for extra_file in files:
888 src = in_srcdir(extra_file)
889 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
890 if os.path.isfile(src):
891 link_or_copy_file(src, dst)
892 elif os.path.isdir(src):
893 if os.path.exists(dst):
894 shutil.rmtree(dst)
895 os.mkdir(dst)
896 lndir(src, dst)
897 else:
898 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
899 # When using a ghc built without haddock support, .t
900 # files are rightfully missing. Don't
901 # framework_fail. Test will be skipped later.
902 pass
903 else:
904 framework_fail(name, way,
905 'extra_file does not exist: ' + extra_file)
906
907 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
908 # When running 'MAKE' make sure 'TOP' still points to the
909 # root of the testsuite.
910 src_makefile = in_srcdir('Makefile')
911 dst_makefile = in_testdir('Makefile')
912 if os.path.exists(src_makefile):
913 with io.open(src_makefile, 'r', encoding='utf8') as src:
914 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
915 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
916 dst.write(makefile)
917
918 if opts.pre_cmd:
919 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
920 stderr = subprocess.STDOUT,
921 print_output = config.verbose >= 3)
922
923 # If user used expect_broken then don't record failures of pre_cmd
924 if exit_code != 0 and opts.expect not in ['fail']:
925 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
926 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
927
928 result = func(*[name,way] + args)
929
930 if opts.expect not in ['pass', 'fail', 'missing-lib']:
931 framework_fail(name, way, 'bad expected ' + opts.expect)
932
933 try:
934 passFail = result['passFail']
935 except (KeyError, TypeError):
936 passFail = 'No passFail found'
937
938 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
939
940 if passFail == 'pass':
941 if _expect_pass(way):
942 t.expected_passes.append(TestResult(directory, name, "", way))
943 t.n_expected_passes += 1
944 else:
945 if_verbose(1, '*** unexpected pass for %s' % full_name)
946 t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
947 elif passFail == 'fail':
948 if _expect_pass(way):
949 reason = result['reason']
950 tag = result.get('tag')
951 if tag == 'stat':
952 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
953 t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
954 else:
955 if_verbose(1, '*** unexpected failure for %s' % full_name)
956 result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
957 t.unexpected_failures.append(result)
958 else:
959 if opts.expect == 'missing-lib':
960 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
961 else:
962 t.n_expected_failures += 1
963 else:
964 framework_fail(name, way, 'bad result ' + passFail)
965
966 # Make is often invoked with -s, which means if it fails, we get
967 # no feedback at all. This is annoying. So let's remove the option
968 # if found and instead have the testsuite decide on what to do
969 # with the output.
970 def override_options(pre_cmd):
971 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
972 return pre_cmd.replace('-s' , '') \
973 .replace('--silent', '') \
974 .replace('--quiet' , '')
975
976 return pre_cmd
977
978 def framework_fail(name, way, reason):
979 opts = getTestOpts()
980 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
981 full_name = name + '(' + way + ')'
982 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
983 t.framework_failures.append(TestResult(directory, name, reason, way))
984
985 def framework_warn(name, way, reason):
986 opts = getTestOpts()
987 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
988 full_name = name + '(' + way + ')'
989 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
990 t.framework_warnings.append(TestResult(directory, name, reason, way))
991
992 def badResult(result):
993 try:
994 if result['passFail'] == 'pass':
995 return False
996 return True
997 except (KeyError, TypeError):
998 return True
999
1000 # -----------------------------------------------------------------------------
1001 # Generic command tests
1002
1003 # A generic command test is expected to run and exit successfully.
1004 #
1005 # The expected exit code can be changed via exit_code() as normal, and
1006 # the expected stdout/stderr are stored in <testname>.stdout and
1007 # <testname>.stderr. The output of the command can be ignored
1008 # altogether by using the setup function ignore_stdout instead of
1009 # run_command.
1010
1011 def run_command( name, way, cmd ):
1012 return simple_run( name, '', override_options(cmd), '' )
1013
1014 def makefile_test( name, way, target=None ):
1015 if target is None:
1016 target = name
1017
1018 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1019 return run_command(name, way, cmd)
1020
1021 # -----------------------------------------------------------------------------
1022 # GHCi tests
1023
1024 def ghci_script( name, way, script):
1025 flags = ' '.join(get_compiler_flags())
1026 way_flags = ' '.join(config.way_flags[way])
1027
1028 # We pass HC and HC_OPTS as environment variables, so that the
1029 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1030 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1031 ).format(flags=flags, way_flags=way_flags)
1032 # NB: put way_flags before flags so that flags in all.T can overrie others
1033
1034 getTestOpts().stdin = script
1035 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1036
1037 # -----------------------------------------------------------------------------
1038 # Compile-only tests
1039
1040 def compile( name, way, extra_hc_opts ):
1041 return do_compile( name, way, 0, '', [], extra_hc_opts )
1042
1043 def compile_fail( name, way, extra_hc_opts ):
1044 return do_compile( name, way, 1, '', [], extra_hc_opts )
1045
1046 def backpack_typecheck( name, way, extra_hc_opts ):
1047 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1048
1049 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1050 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1051
1052 def backpack_compile( name, way, extra_hc_opts ):
1053 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1054
1055 def backpack_compile_fail( name, way, extra_hc_opts ):
1056 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1057
1058 def backpack_run( name, way, extra_hc_opts ):
1059 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1060
1061 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1062 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1063
1064 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1065 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1066
1067 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1068 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1069
1070 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1071 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1072
1073 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1074 # print 'Compile only, extra args = ', extra_hc_opts
1075
1076 result = extras_build( way, extra_mods, extra_hc_opts )
1077 if badResult(result):
1078 return result
1079 extra_hc_opts = result['hc_opts']
1080
1081 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1082
1083 if badResult(result):
1084 return result
1085
1086 # the actual stderr should always match the expected, regardless
1087 # of whether we expected the compilation to fail or not (successful
1088 # compilations may generate warnings).
1089
1090 expected_stderr_file = find_expected_file(name, 'stderr')
1091 actual_stderr_file = add_suffix(name, 'comp.stderr')
1092 diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
1093
1094 if not compare_outputs(way, 'stderr',
1095 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1096 normalise_errmsg),
1097 expected_stderr_file, actual_stderr_file,
1098 diff_file=diff_file_name,
1099 whitespace_normaliser=getattr(getTestOpts(),
1100 "whitespace_normaliser",
1101 normalise_whitespace)):
1102 stderr = open(diff_file_name, 'rb').read()
1103 os.remove(diff_file_name)
1104 return failBecauseStderr('stderr mismatch', stderr=stderr )
1105
1106
1107 # no problems found, this test passed
1108 return passed()
1109
1110 def compile_cmp_asm( name, way, extra_hc_opts ):
1111 print('Compile only, extra args = ', extra_hc_opts)
1112 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1113
1114 if badResult(result):
1115 return result
1116
1117 # the actual stderr should always match the expected, regardless
1118 # of whether we expected the compilation to fail or not (successful
1119 # compilations may generate warnings).
1120
1121 expected_asm_file = find_expected_file(name, 'asm')
1122 actual_asm_file = add_suffix(name, 's')
1123
1124 if not compare_outputs(way, 'asm',
1125 join_normalisers(normalise_errmsg, normalise_asm),
1126 expected_asm_file, actual_asm_file):
1127 return failBecause('asm mismatch')
1128
1129 # no problems found, this test passed
1130 return passed()
1131
1132 # -----------------------------------------------------------------------------
1133 # Compile-and-run tests
1134
1135 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1136 # print 'Compile and run, extra args = ', extra_hc_opts
1137
1138 result = extras_build( way, extra_mods, extra_hc_opts )
1139 if badResult(result):
1140 return result
1141 extra_hc_opts = result['hc_opts']
1142
1143 if way.startswith('ghci'): # interpreted...
1144 return interpreter_run(name, way, extra_hc_opts, top_mod)
1145 else: # compiled...
1146 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1147 if badResult(result):
1148 return result
1149
1150 cmd = './' + name;
1151
1152 # we don't check the compiler's stderr for a compile-and-run test
1153 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1154
1155 def compile_and_run( name, way, extra_hc_opts ):
1156 return compile_and_run__( name, way, '', [], extra_hc_opts)
1157
1158 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1159 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1160
1161 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1162 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1163
1164 def stats( name, way, stats_file ):
1165 opts = getTestOpts()
1166 return check_stats(name, way, stats_file, opts.stats_range_fields)
1167
1168 def metric_dict(name, way, metric, value):
1169 return Perf.PerfStat(
1170 test_env = config.test_env,
1171 test = name,
1172 way = way,
1173 metric = metric,
1174 value = value)
1175
1176 # -----------------------------------------------------------------------------
1177 # Check test stats. This prints the results for the user.
1178 # name: name of the test.
1179 # way: the way.
1180 # stats_file: the path of the stats_file containing the stats for the test.
1181 # range_fields: see TestOptions.stats_range_fields
1182 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1183 # This prints the results for the user.
1184 def check_stats(name, way, stats_file, range_fields):
1185 head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
1186 result = passed()
1187 if range_fields:
1188 try:
1189 f = open(in_testdir(stats_file))
1190 except IOError as e:
1191 return failBecause(str(e))
1192 stats_file_contents = f.read()
1193 f.close()
1194
1195 for (metric, baseline_and_dev) in range_fields.items():
1196 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1197 if field_match == None:
1198 print('Failed to find metric: ', metric)
1199 metric_result = failBecause('no such stats metric')
1200 else:
1201 actual_val = int(field_match.group(1))
1202
1203 # Store the metric so it can later be stored in a git note.
1204 perf_stat = metric_dict(name, way, metric, actual_val)
1205 change = None
1206
1207 # If this is the first time running the benchmark, then pass.
1208 baseline = baseline_and_dev[0](way, head_commit) \
1209 if Perf.inside_git_repo() else None
1210 if baseline == None:
1211 metric_result = passed()
1212 change = MetricChange.NewMetric
1213 else:
1214 tolerance_dev = baseline_and_dev[1]
1215 (change, metric_result) = Perf.check_stats_change(
1216 perf_stat,
1217 baseline,
1218 tolerance_dev,
1219 config.allowed_perf_changes,
1220 config.verbose >= 4)
1221 t.metrics.append((change, perf_stat))
1222
1223 # If any metric fails then the test fails.
1224 # Note, the remaining metrics are still run so that
1225 # a complete list of changes can be presented to the user.
1226 if metric_result['passFail'] == 'fail':
1227 result = metric_result
1228
1229 return result
1230
1231 # -----------------------------------------------------------------------------
1232 # Build a single-module program
1233
1234 def extras_build( way, extra_mods, extra_hc_opts ):
1235 for mod, opts in extra_mods:
1236 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1237 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1238 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1239 if badResult(result):
1240 return result
1241
1242 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1243
1244 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1245 opts = getTestOpts()
1246
1247 # Redirect stdout and stderr to the same file
1248 stdout = in_testdir(name, 'comp.stderr')
1249 stderr = subprocess.STDOUT
1250
1251 if top_mod != '':
1252 srcname = top_mod
1253 elif addsuf:
1254 if backpack:
1255 srcname = add_suffix(name, 'bkp')
1256 else:
1257 srcname = add_hs_lhs_suffix(name)
1258 else:
1259 srcname = name
1260
1261 if top_mod != '':
1262 to_do = '--make '
1263 if link:
1264 to_do = to_do + '-o ' + name
1265 elif backpack:
1266 if link:
1267 to_do = '-o ' + name + ' '
1268 else:
1269 to_do = ''
1270 to_do = to_do + '--backpack '
1271 elif link:
1272 to_do = '-o ' + name
1273 else:
1274 to_do = '-c' # just compile
1275
1276 stats_file = name + '.comp.stats'
1277 if isCompilerStatsTest():
1278 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1279 if backpack:
1280 extra_hc_opts += ' -outputdir ' + name + '.out'
1281
1282 # Required by GHC 7.3+, harmless for earlier versions:
1283 if (getTestOpts().c_src or
1284 getTestOpts().objc_src or
1285 getTestOpts().objcpp_src or
1286 getTestOpts().cmm_src):
1287 extra_hc_opts += ' -no-hs-main '
1288
1289 if getTestOpts().compile_cmd_prefix == '':
1290 cmd_prefix = ''
1291 else:
1292 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1293
1294 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1295
1296 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1297 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1298 ).format(**locals())
1299
1300 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1301
1302 actual_stderr_path = in_testdir(name, 'comp.stderr')
1303
1304 if exit_code != 0 and not should_fail:
1305 if config.verbose >= 1 and _expect_pass(way):
1306 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1307 dump_file(actual_stderr_path)
1308
1309 # ToDo: if the sub-shell was killed by ^C, then exit
1310
1311 if isCompilerStatsTest():
1312 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1313 if badResult(statsResult):
1314 return statsResult
1315
1316 if should_fail:
1317 if exit_code == 0:
1318 stderr_contents = open(actual_stderr_path, 'rb').read()
1319 return failBecauseStderr('exit code 0', stderr_contents)
1320 else:
1321 if exit_code != 0:
1322 stderr_contents = open(actual_stderr_path, 'rb').read()
1323 return failBecauseStderr('exit code non-0', stderr_contents)
1324
1325 return passed()
1326
1327 # -----------------------------------------------------------------------------
1328 # Run a program and check its output
1329 #
1330 # If testname.stdin exists, route input from that, else
1331 # from /dev/null. Route output to testname.run.stdout and
1332 # testname.run.stderr. Returns the exit code of the run.
1333
1334 def simple_run(name, way, prog, extra_run_opts):
1335 opts = getTestOpts()
1336
1337 # figure out what to use for stdin
1338 if opts.stdin:
1339 stdin = in_testdir(opts.stdin)
1340 elif os.path.exists(in_testdir(name, 'stdin')):
1341 stdin = in_testdir(name, 'stdin')
1342 else:
1343 stdin = None
1344
1345 stdout = in_testdir(name, 'run.stdout')
1346 if opts.combined_output:
1347 stderr = subprocess.STDOUT
1348 else:
1349 stderr = in_testdir(name, 'run.stderr')
1350
1351 my_rts_flags = rts_flags(way)
1352
1353 # Collect stats if necessary:
1354 # isStatsTest and not isCompilerStatsTest():
1355 # assume we are running a ghc compiled program. Collect stats.
1356 # isStatsTest and way == 'ghci':
1357 # assume we are running a program via ghci. Collect stats
1358 stats_file = name + '.stats'
1359 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1360 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1361 else:
1362 stats_args = ''
1363
1364 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1365 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1366
1367 if opts.cmd_wrapper != None:
1368 cmd = opts.cmd_wrapper(cmd)
1369
1370 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1371
1372 # run the command
1373 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1374
1375 # check the exit code
1376 if exit_code != opts.exit_code:
1377 if config.verbose >= 1 and _expect_pass(way):
1378 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1379 dump_stdout(name)
1380 dump_stderr(name)
1381 return failBecause('bad exit code')
1382
1383 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1384 return failBecause('bad stderr')
1385 if not (opts.ignore_stdout or stdout_ok(name, way)):
1386 return failBecause('bad stdout')
1387
1388 check_hp = '-h' in my_rts_flags and opts.check_hp
1389 check_prof = '-p' in my_rts_flags
1390
1391 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1392 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1393 return failBecause('bad heap profile')
1394 if check_prof and not check_prof_ok(name, way):
1395 return failBecause('bad profile')
1396
1397 return check_stats(name, way, stats_file, opts.stats_range_fields)
1398
1399 def rts_flags(way):
1400 args = config.way_rts_flags.get(way, [])
1401 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1402
1403 # -----------------------------------------------------------------------------
1404 # Run a program in the interpreter and check its output
1405
1406 def interpreter_run(name, way, extra_hc_opts, top_mod):
1407 opts = getTestOpts()
1408
1409 stdout = in_testdir(name, 'interp.stdout')
1410 stderr = in_testdir(name, 'interp.stderr')
1411 script = in_testdir(name, 'genscript')
1412
1413 if opts.combined_output:
1414 framework_fail(name, 'unsupported',
1415 'WAY=ghci and combined_output together is not supported')
1416
1417 if (top_mod == ''):
1418 srcname = add_hs_lhs_suffix(name)
1419 else:
1420 srcname = top_mod
1421
1422 delimiter = '===== program output begins here\n'
1423
1424 with io.open(script, 'w', encoding='utf8') as f:
1425 # set the prog name and command-line args to match the compiled
1426 # environment.
1427 f.write(':set prog ' + name + '\n')
1428 f.write(':set args ' + opts.extra_run_opts + '\n')
1429 # Add marker lines to the stdout and stderr output files, so we
1430 # can separate GHCi's output from the program's.
1431 f.write(':! echo ' + delimiter)
1432 f.write(':! echo 1>&2 ' + delimiter)
1433 # Set stdout to be line-buffered to match the compiled environment.
1434 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1435 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1436 # in the event of an exception as for the compiled program.
1437 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1438
1439 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1440 if os.path.exists(stdin):
1441 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1442
1443 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1444
1445 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1446 ).format(**locals())
1447
1448 if getTestOpts().cmd_wrapper != None:
1449 cmd = opts.cmd_wrapper(cmd);
1450
1451 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1452
1453 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1454
1455 # split the stdout into compilation/program output
1456 split_file(stdout, delimiter,
1457 in_testdir(name, 'comp.stdout'),
1458 in_testdir(name, 'run.stdout'))
1459 split_file(stderr, delimiter,
1460 in_testdir(name, 'comp.stderr'),
1461 in_testdir(name, 'run.stderr'))
1462
1463 # check the exit code
1464 if exit_code != getTestOpts().exit_code:
1465 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1466 dump_stdout(name)
1467 dump_stderr(name)
1468 return failBecause('bad exit code')
1469
1470 # ToDo: if the sub-shell was killed by ^C, then exit
1471
1472 if not (opts.ignore_stderr or stderr_ok(name, way)):
1473 return failBecause('bad stderr')
1474 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1475 return failBecause('bad stdout')
1476 else:
1477 return passed()
1478
1479 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1480 # See Note [Universal newlines].
1481 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1482 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1483 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1484 line = infile.readline()
1485 while re.sub('^\s*','',line) != delimiter and line != '':
1486 out1.write(line)
1487 line = infile.readline()
1488
1489 line = infile.readline()
1490 while line != '':
1491 out2.write(line)
1492 line = infile.readline()
1493
1494 # -----------------------------------------------------------------------------
1495 # Utils
1496 def get_compiler_flags():
1497 opts = getTestOpts()
1498
1499 flags = copy.copy(opts.compiler_always_flags)
1500
1501 flags.append(opts.extra_hc_opts)
1502
1503 if opts.outputdir != None:
1504 flags.extend(["-outputdir", opts.outputdir])
1505
1506 return flags
1507
1508 def stdout_ok(name, way):
1509 actual_stdout_file = add_suffix(name, 'run.stdout')
1510 expected_stdout_file = find_expected_file(name, 'stdout')
1511
1512 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1513
1514 check_stdout = getTestOpts().check_stdout
1515 if check_stdout:
1516 actual_stdout_path = in_testdir(actual_stdout_file)
1517 return check_stdout(actual_stdout_path, extra_norm)
1518
1519 return compare_outputs(way, 'stdout', extra_norm,
1520 expected_stdout_file, actual_stdout_file)
1521
1522 def dump_stdout( name ):
1523 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1524 str = f.read().strip()
1525 if str:
1526 print("Stdout (", name, "):")
1527 print(str)
1528
1529 def stderr_ok(name, way):
1530 actual_stderr_file = add_suffix(name, 'run.stderr')
1531 expected_stderr_file = find_expected_file(name, 'stderr')
1532
1533 return compare_outputs(way, 'stderr',
1534 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1535 expected_stderr_file, actual_stderr_file,
1536 whitespace_normaliser=normalise_whitespace)
1537
1538 def dump_stderr( name ):
1539 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1540 str = f.read().strip()
1541 if str:
1542 print("Stderr (", name, "):")
1543 print(str)
1544
1545 def read_no_crs(file):
1546 str = ''
1547 try:
1548 # See Note [Universal newlines].
1549 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1550 str = h.read()
1551 except Exception:
1552 # On Windows, if the program fails very early, it seems the
1553 # files stdout/stderr are redirected to may not get created
1554 pass
1555 return str
1556
1557 def write_file(file, str):
1558 # See Note [Universal newlines].
1559 with io.open(file, 'w', encoding='utf8', newline='') as h:
1560 h.write(str)
1561
1562 # Note [Universal newlines]
1563 #
1564 # We don't want to write any Windows style line endings ever, because
1565 # it would mean that `make accept` would touch every line of the file
1566 # when switching between Linux and Windows.
1567 #
1568 # Furthermore, when reading a file, it is convenient to translate all
1569 # Windows style endings to '\n', as it simplifies searching or massaging
1570 # the content.
1571 #
1572 # Solution: use `io.open` instead of `open`
1573 # * when reading: use newline=None to translate '\r\n' to '\n'
1574 # * when writing: use newline='' to not translate '\n' to '\r\n'
1575 #
1576 # See https://docs.python.org/2/library/io.html#io.open.
1577 #
1578 # This should work with both python2 and python3, and with both mingw*
1579 # as msys2 style Python.
1580 #
1581 # Do note that io.open returns unicode strings. So we have to specify
1582 # the expected encoding. But there is at least one file which is not
1583 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1584 # Another solution would be to open files in binary mode always, and
1585 # operate on bytes.
1586
1587 def check_hp_ok(name):
1588 opts = getTestOpts()
1589
1590 # do not qualify for hp2ps because we should be in the right directory
1591 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1592
1593 hp2psResult = runCmd(hp2psCmd)
1594
1595 actual_ps_path = in_testdir(name, 'ps')
1596
1597 if hp2psResult == 0:
1598 if os.path.exists(actual_ps_path):
1599 if gs_working:
1600 gsResult = runCmd(genGSCmd(actual_ps_path))
1601 if (gsResult == 0):
1602 return (True)
1603 else:
1604 print("hp2ps output for " + name + "is not valid PostScript")
1605 else: return (True) # assume postscript is valid without ghostscript
1606 else:
1607 print("hp2ps did not generate PostScript for " + name)
1608 return (False)
1609 else:
1610 print("hp2ps error when processing heap profile for " + name)
1611 return(False)
1612
1613 def check_prof_ok(name, way):
1614 expected_prof_file = find_expected_file(name, 'prof.sample')
1615 expected_prof_path = in_testdir(expected_prof_file)
1616
1617 # Check actual prof file only if we have an expected prof file to
1618 # compare it with.
1619 if not os.path.exists(expected_prof_path):
1620 return True
1621
1622 actual_prof_file = add_suffix(name, 'prof')
1623 actual_prof_path = in_testdir(actual_prof_file)
1624
1625 if not os.path.exists(actual_prof_path):
1626 print(actual_prof_path + " does not exist")
1627 return(False)
1628
1629 if os.path.getsize(actual_prof_path) == 0:
1630 print(actual_prof_path + " is empty")
1631 return(False)
1632
1633 return compare_outputs(way, 'prof', normalise_prof,
1634 expected_prof_file, actual_prof_file,
1635 whitespace_normaliser=normalise_whitespace)
1636
1637 # Compare expected output to actual output, and optionally accept the
1638 # new output. Returns true if output matched or was accepted, false
1639 # otherwise. See Note [Output comparison] for the meaning of the
1640 # normaliser and whitespace_normaliser parameters.
1641 def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
1642 whitespace_normaliser=lambda x:x):
1643
1644 expected_path = in_srcdir(expected_file)
1645 actual_path = in_testdir(actual_file)
1646
1647 if os.path.exists(expected_path):
1648 expected_str = normaliser(read_no_crs(expected_path))
1649 # Create the .normalised file in the testdir, not in the srcdir.
1650 expected_normalised_file = add_suffix(expected_file, 'normalised')
1651 expected_normalised_path = in_testdir(expected_normalised_file)
1652 else:
1653 expected_str = ''
1654 expected_normalised_path = '/dev/null'
1655
1656 actual_raw = read_no_crs(actual_path)
1657 actual_str = normaliser(actual_raw)
1658
1659 # See Note [Output comparison].
1660 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1661 return True
1662 else:
1663 if config.verbose >= 1 and _expect_pass(way):
1664 print('Actual ' + kind + ' output differs from expected:')
1665
1666 if expected_normalised_path != '/dev/null':
1667 write_file(expected_normalised_path, expected_str)
1668
1669 actual_normalised_path = add_suffix(actual_path, 'normalised')
1670 write_file(actual_normalised_path, actual_str)
1671
1672 if config.verbose >= 1 and _expect_pass(way):
1673 # See Note [Output comparison].
1674 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1675 actual_normalised_path),
1676 stdout=diff_file,
1677 print_output=True)
1678
1679 # If for some reason there were no non-whitespace differences,
1680 # then do a full diff
1681 if r == 0:
1682 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1683 actual_normalised_path),
1684 stdout=diff_file,
1685 print_output=True)
1686 elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
1687 # we will try to read it later
1688
1689 if config.accept and (getTestOpts().expect == 'fail' or
1690 way in getTestOpts().expect_fail_for):
1691 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1692 return False
1693 elif config.accept and actual_raw:
1694 if config.accept_platform:
1695 if_verbose(1, 'Accepting new output for platform "'
1696 + config.platform + '".')
1697 expected_path += '-' + config.platform
1698 elif config.accept_os:
1699 if_verbose(1, 'Accepting new output for os "'
1700 + config.os + '".')
1701 expected_path += '-' + config.os
1702 else:
1703 if_verbose(1, 'Accepting new output.')
1704
1705 write_file(expected_path, actual_raw)
1706 return True
1707 elif config.accept:
1708 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1709 os.remove(expected_path)
1710 return True
1711 else:
1712 return False
1713
1714 # Note [Output comparison]
1715 #
1716 # We do two types of output comparison:
1717 #
1718 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1719 # optional `whitespace_normaliser` to the expected and the actual
1720 # output, before comparing the two.
1721 #
1722 # 2. To show as a diff to the user when the test indeed failed. We apply
1723 # the same `normaliser` function to the outputs, to make the diff as
1724 # small as possible (only showing the actual problem). But we don't
1725 # apply the `whitespace_normaliser` here, because it might completely
1726 # squash all whitespace, making the diff unreadable. Instead we rely
1727 # on the `diff` program to ignore whitespace changes as much as
1728 # possible (#10152).
1729
1730 def normalise_whitespace( str ):
1731 # Merge contiguous whitespace characters into a single space.
1732 return ' '.join(str.split())
1733
1734 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1735
1736 def normalise_callstacks(s):
1737 opts = getTestOpts()
1738 def repl(matches):
1739 location = matches.group(1)
1740 location = normalise_slashes_(location)
1741 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1742 # Ignore line number differences in call stacks (#10834).
1743 s = re.sub(callSite_re, repl, s)
1744 # Ignore the change in how we identify implicit call-stacks
1745 s = s.replace('from ImplicitParams', 'from HasCallStack')
1746 if not opts.keep_prof_callstacks:
1747 # Don't output prof callstacks. Test output should be
1748 # independent from the WAY we run the test.
1749 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1750 return s
1751
1752 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1753
1754 def normalise_type_reps(str):
1755 """ Normalise out fingerprints from Typeable TyCon representations """
1756 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1757
1758 def normalise_errmsg( str ):
1759 """Normalise error-messages emitted via stderr"""
1760 # IBM AIX's `ld` is a bit chatty
1761 if opsys('aix'):
1762 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1763 # remove " error:" and lower-case " Warning:" to make patch for
1764 # trac issue #10021 smaller
1765 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1766 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1767 str = normalise_callstacks(str)
1768 str = normalise_type_reps(str)
1769
1770 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1771 # the colon is there because it appears in error messages; this
1772 # hacky solution is used in place of more sophisticated filename
1773 # mangling
1774 str = re.sub('([^\\s])\\.exe', '\\1', str)
1775
1776 # normalise slashes, minimise Windows/Unix filename differences
1777 str = re.sub('\\\\', '/', str)
1778
1779 # The inplace ghc's are called ghc-stage[123] to avoid filename
1780 # collisions, so we need to normalise that to just "ghc"
1781 str = re.sub('ghc-stage[123]', 'ghc', str)
1782
1783 # Error messages sometimes contain integer implementation package
1784 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1785
1786 # Error messages sometimes contain this blurb which can vary
1787 # spuriously depending upon build configuration (e.g. based on integer
1788 # backend)
1789 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1790 '...plus N instances involving out-of-scope types', str)
1791
1792 # Also filter out bullet characters. This is because bullets are used to
1793 # separate error sections, and tests shouldn't be sensitive to how the
1794 # the division happens.
1795 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1796 str = str.replace(bullet, '')
1797
1798 # Windows only, this is a bug in hsc2hs but it is preventing
1799 # stable output for the testsuite. See Trac #9775. For now we filter out this
1800 # warning message to get clean output.
1801 if config.msys:
1802 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1803 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1804
1805 return str
1806
1807 # normalise a .prof file, so that we can reasonably compare it against
1808 # a sample. This doesn't compare any of the actual profiling data,
1809 # only the shape of the profile and the number of entries.
1810 def normalise_prof (str):
1811 # strip everything up to the line beginning "COST CENTRE"
1812 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1813
1814 # strip results for CAFs, these tend to change unpredictably
1815 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1816
1817 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1818 # sometimes under MAIN.
1819 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1820
1821 # We have something like this:
1822 #
1823 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1824 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1825 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1826 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1827 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1828 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1829 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1830 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1831 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1832 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1833 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1834 #
1835 # then we remove all the specific profiling data, leaving only the cost
1836 # centre name, module, src, and entries, to end up with this: (modulo
1837 # whitespace between columns)
1838 #
1839 # MAIN MAIN <built-in> 0
1840 # readPrec Main Main_1.hs:7:13-16 1
1841 # readPrec Main Main_1.hs:4:13-16 1
1842 # == Main Main_1.hs:7:25-26 1
1843 # == Main Main_1.hs:4:25-26 1
1844 # showsPrec Main Main_1.hs:7:19-22 2
1845 # showsPrec Main Main_1.hs:4:19-22 2
1846 # readPrec Main Main_1.hs:7:13-16 0
1847 # readPrec Main Main_1.hs:4:13-16 0
1848
1849 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1850 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1851 # this works fine.
1852 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1853 '\\1 \\2 \\3 \\5\n', str)
1854 return str
1855
1856 def normalise_slashes_( str ):
1857 str = re.sub('\\\\', '/', str)
1858 str = re.sub('//', '/', str)
1859 return str
1860
1861 def normalise_exe_( str ):
1862 str = re.sub('\.exe', '', str)
1863 return str
1864
1865 def normalise_output( str ):
1866 # remove " error:" and lower-case " Warning:" to make patch for
1867 # trac issue #10021 smaller
1868 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1869 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1870 # Remove a .exe extension (for Windows)
1871 # This can occur in error messages generated by the program.
1872 str = re.sub('([^\\s])\\.exe', '\\1', str)
1873 str = normalise_callstacks(str)
1874 str = normalise_type_reps(str)
1875 return str
1876
1877 def normalise_asm( str ):
1878 lines = str.split('\n')
1879 # Only keep instructions and labels not starting with a dot.
1880 metadata = re.compile('^[ \t]*\\..*$')
1881 out = []
1882 for line in lines:
1883 # Drop metadata directives (e.g. ".type")
1884 if not metadata.match(line):
1885 line = re.sub('@plt', '', line)
1886 instr = line.lstrip().split()
1887 # Drop empty lines.
1888 if not instr:
1889 continue
1890 # Drop operands, except for call instructions.
1891 elif instr[0] == 'call':
1892 out.append(instr[0] + ' ' + instr[1])
1893 else:
1894 out.append(instr[0])
1895 out = '\n'.join(out)
1896 return out
1897
1898 def if_verbose( n, s ):
1899 if config.verbose >= n:
1900 print(s)
1901
1902 def dump_file(f):
1903 try:
1904 with io.open(f) as file:
1905 print(file.read())
1906 except Exception:
1907 print('')
1908
1909 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1910 timeout_prog = strip_quotes(config.timeout_prog)
1911 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1912
1913 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1914 cmd = cmd.format(**config.__dict__)
1915 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1916
1917 stdin_file = io.open(stdin, 'rb') if stdin else None
1918 stdout_buffer = b''
1919 stderr_buffer = b''
1920
1921 hStdErr = subprocess.PIPE
1922 if stderr is subprocess.STDOUT:
1923 hStdErr = subprocess.STDOUT
1924
1925 try:
1926 # cmd is a complex command in Bourne-shell syntax
1927 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1928 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1929 # to invoke the Bourne shell
1930
1931 r = subprocess.Popen([timeout_prog, timeout, cmd],
1932 stdin=stdin_file,
1933 stdout=subprocess.PIPE,
1934 stderr=hStdErr,
1935 env=ghc_env)
1936
1937 stdout_buffer, stderr_buffer = r.communicate()
1938 finally:
1939 if stdin_file:
1940 stdin_file.close()
1941 if config.verbose >= 1 and print_output:
1942 if stdout_buffer:
1943 sys.stdout.buffer.write(stdout_buffer)
1944 if stderr_buffer:
1945 sys.stderr.buffer.write(stderr_buffer)
1946
1947 if stdout:
1948 with io.open(stdout, 'wb') as f:
1949 f.write(stdout_buffer)
1950 if stderr:
1951 if stderr is not subprocess.STDOUT:
1952 with io.open(stderr, 'wb') as f:
1953 f.write(stderr_buffer)
1954
1955 if r.returncode == 98:
1956 # The python timeout program uses 98 to signal that ^C was pressed
1957 stopNow()
1958 if r.returncode == 99 and getTestOpts().exit_code != 99:
1959 # Only print a message when timeout killed the process unexpectedly.
1960 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1961 return r.returncode
1962
1963 # -----------------------------------------------------------------------------
1964 # checking if ghostscript is available for checking the output of hp2ps
1965
1966 def genGSCmd(psfile):
1967 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1968
1969 def gsNotWorking():
1970 global gs_working
1971 print("GhostScript not available for hp2ps tests")
1972
1973 global gs_working
1974 gs_working = False
1975 if config.have_profiling:
1976 if config.gs != '':
1977 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1978 if resultGood == 0:
1979 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1980 ' >/dev/null 2>&1')
1981 if resultBad != 0:
1982 print("GhostScript available for hp2ps tests")
1983 gs_working = True
1984 else:
1985 gsNotWorking();
1986 else:
1987 gsNotWorking();
1988 else:
1989 gsNotWorking();
1990
1991 def add_suffix( name, suffix ):
1992 if suffix == '':
1993 return name
1994 else:
1995 return name + '.' + suffix
1996
1997 def add_hs_lhs_suffix(name):
1998 if getTestOpts().c_src:
1999 return add_suffix(name, 'c')
2000 elif getTestOpts().cmm_src:
2001 return add_suffix(name, 'cmm')
2002 elif getTestOpts().objc_src:
2003 return add_suffix(name, 'm')
2004 elif getTestOpts().objcpp_src:
2005 return add_suffix(name, 'mm')
2006 elif getTestOpts().literate:
2007 return add_suffix(name, 'lhs')
2008 else:
2009 return add_suffix(name, 'hs')
2010
2011 def replace_suffix( name, suffix ):
2012 base, suf = os.path.splitext(name)
2013 return base + '.' + suffix
2014
2015 def in_testdir(name, suffix=''):
2016 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2017
2018 def in_srcdir(name, suffix=''):
2019 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2020
2021 # Finding the sample output. The filename is of the form
2022 #
2023 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
2024 #
2025 def find_expected_file(name, suff):
2026 basename = add_suffix(name, suff)
2027 # Override the basename if the user has specified one, this will then be
2028 # subjected to the same name mangling scheme as normal to allow platform
2029 # specific overrides to work.
2030 basename = getTestOpts().use_specs.get (suff, basename)
2031
2032 files = [basename + ws + plat
2033 for plat in ['-' + config.platform, '-' + config.os, '']
2034 for ws in ['-ws-' + config.wordsize, '']]
2035
2036 for f in files:
2037 if os.path.exists(in_srcdir(f)):
2038 return f
2039
2040 return basename
2041
2042 if config.msys:
2043 import stat
2044 def cleanup():
2045 testdir = getTestOpts().testdir
2046 max_attempts = 5
2047 retries = max_attempts
2048 def on_error(function, path, excinfo):
2049 # At least one test (T11489) removes the write bit from a file it
2050 # produces. Windows refuses to delete read-only files with a
2051 # permission error. Try setting the write bit and try again.
2052 os.chmod(path, stat.S_IWRITE)
2053 function(path)
2054
2055 # On Windows we have to retry the delete a couple of times.
2056 # The reason for this is that a FileDelete command just marks a
2057 # file for deletion. The file is really only removed when the last
2058 # handle to the file is closed. Unfortunately there are a lot of
2059 # system services that can have a file temporarily opened using a shared
2060 # readonly lock, such as the built in AV and search indexer.
2061 #
2062 # We can't really guarantee that these are all off, so what we can do is
2063 # whenever after a rmtree the folder still exists to try again and wait a bit.
2064 #
2065 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2066 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2067 # still locked then abort the current test by throwing an exception, this so it won't fail
2068 # with an even more cryptic error.
2069 #
2070 # See Trac #13162
2071 exception = None
2072 while retries > 0 and os.path.exists(testdir):
2073 time.sleep((max_attempts-retries)*6)
2074 try:
2075 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2076 except Exception as e:
2077 exception = e
2078 retries -= 1
2079
2080 if retries == 0 and os.path.exists(testdir):
2081 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2082 % (testdir, exception))
2083 else:
2084 def cleanup():
2085 testdir = getTestOpts().testdir
2086 if os.path.exists(testdir):
2087 shutil.rmtree(testdir, ignore_errors=False)
2088
2089
2090 # -----------------------------------------------------------------------------
2091 # Return a list of all the files ending in '.T' below directories roots.
2092
2093 def findTFiles(roots):
2094 for root in roots:
2095 for path, dirs, files in os.walk(root, topdown=True):
2096 # Never pick up .T files in uncleaned .run directories.
2097 dirs[:] = [dir for dir in sorted(dirs)
2098 if not dir.endswith(testdir_suffix)]
2099 for filename in files:
2100 if filename.endswith('.T'):
2101 yield os.path.join(path, filename)
2102
2103 # -----------------------------------------------------------------------------
2104 # Output a test summary to the specified file object
2105
2106 def summary(t, file, short=False, color=False):
2107
2108 file.write('\n')
2109 printUnexpectedTests(file,
2110 [t.unexpected_passes, t.unexpected_failures,
2111 t.unexpected_stat_failures, t.framework_failures])
2112
2113 if short:
2114 # Only print the list of unexpected tests above.
2115 return
2116
2117 colorize = lambda s: s
2118 if color:
2119 if len(t.unexpected_failures) > 0 or \
2120 len(t.unexpected_stat_failures) > 0 or \
2121 len(t.unexpected_passes) > 0 or \
2122 len(t.framework_failures) > 0:
2123 colorize = str_fail
2124 else:
2125 colorize = str_pass
2126
2127 file.write(colorize('SUMMARY') + ' for test run started at '
2128 + time.strftime("%c %Z", t.start_time) + '\n'
2129 + str(datetime.timedelta(seconds=
2130 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2131 + ' spent to go through\n'
2132 + repr(t.total_tests).rjust(8)
2133 + ' total tests, which gave rise to\n'
2134 + repr(t.total_test_cases).rjust(8)
2135 + ' test cases, of which\n'
2136 + repr(t.n_tests_skipped).rjust(8)
2137 + ' were skipped\n'
2138 + '\n'
2139 + repr(len(t.missing_libs)).rjust(8)
2140 + ' had missing libraries\n'
2141 + repr(t.n_expected_passes).rjust(8)
2142 + ' expected passes\n'
2143 + repr(t.n_expected_failures).rjust(8)
2144 + ' expected failures\n'
2145 + '\n'
2146 + repr(len(t.framework_failures)).rjust(8)
2147 + ' caused framework failures\n'
2148 + repr(len(t.framework_warnings)).rjust(8)
2149 + ' caused framework warnings\n'
2150 + repr(len(t.unexpected_passes)).rjust(8)
2151 + ' unexpected passes\n'
2152 + repr(len(t.unexpected_failures)).rjust(8)
2153 + ' unexpected failures\n'
2154 + repr(len(t.unexpected_stat_failures)).rjust(8)
2155 + ' unexpected stat failures\n'
2156 + '\n')
2157
2158 if t.unexpected_passes:
2159 file.write('Unexpected passes:\n')
2160 printTestInfosSummary(file, t.unexpected_passes)
2161
2162 if t.unexpected_failures:
2163 file.write('Unexpected failures:\n')
2164 printTestInfosSummary(file, t.unexpected_failures)
2165
2166 if t.unexpected_stat_failures:
2167 file.write('Unexpected stat failures:\n')
2168 printTestInfosSummary(file, t.unexpected_stat_failures)
2169
2170 if t.framework_failures:
2171 file.write('Framework failures:\n')
2172 printTestInfosSummary(file, t.framework_failures)
2173
2174 if t.framework_warnings:
2175 file.write('Framework warnings:\n')
2176 printTestInfosSummary(file, t.framework_warnings)
2177
2178 if stopping():
2179 file.write('WARNING: Testsuite run was terminated early\n')
2180
2181 def printUnexpectedTests(file, testInfoss):
2182 unexpected = set(result.testname
2183 for testInfos in testInfoss
2184 for result in testInfos
2185 if not result.testname.endswith('.T'))
2186 if unexpected:
2187 file.write('Unexpected results from:\n')
2188 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2189 file.write('\n')
2190
2191 def printTestInfosSummary(file, testInfos):
2192 maxDirLen = max(len(tr.directory) for tr in testInfos)
2193 for result in testInfos:
2194 directory = result.directory.ljust(maxDirLen)
2195 file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
2196 r = result,
2197 directory = directory))
2198 file.write('\n')
2199
2200 def modify_lines(s, f):
2201 s = '\n'.join([f(l) for l in s.splitlines()])
2202 if s and s[-1] != '\n':
2203 # Prevent '\ No newline at end of file' warnings when diffing.
2204 s += '\n'
2205 return s