e800772cd1c1a9e9661c4024b62f7c7e270087a0
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 def use_specs( specs ):
193 """
194 use_specs allows one to override files based on suffixes. e.g. 'stdout',
195 'stderr', 'asm', 'prof.sample', etc.
196
197 Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
198 prof002.stdout.
199
200 Full Example:
201 test('T5889', [only_ways(['normal']), req_profiling,
202 extra_files(['T5889/A.hs', 'T5889/B.hs']),
203 use_specs({'stdout' : 'prof002.stdout'})],
204 multimod_compile,
205 ['A B', '-O -prof -fno-prof-count-entries -v0'])
206
207 """
208 return lambda name, opts, s=specs: _use_specs( name, opts, s )
209
210 def _use_specs( name, opts, specs ):
211 opts.extra_files.extend(specs.values ())
212 opts.use_specs = specs
213
214 # -----
215
216 def expect_fail_for( ways ):
217 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
218
219 def _expect_fail_for( name, opts, ways ):
220 opts.expect_fail_for = ways
221
222 def expect_broken( bug ):
223 # This test is a expected not to work due to the indicated trac bug
224 # number.
225 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
226
227 def _expect_broken( name, opts, bug ):
228 record_broken(name, opts, bug)
229 opts.expect = 'fail';
230
231 def expect_broken_for( bug, ways ):
232 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
233
234 def _expect_broken_for( name, opts, bug, ways ):
235 record_broken(name, opts, bug)
236 opts.expect_fail_for = ways
237
238 def record_broken(name, opts, bug):
239 me = (bug, opts.testdir, name)
240 if not me in brokens:
241 brokens.append(me)
242
243 def _expect_pass(way):
244 # Helper function. Not intended for use in .T files.
245 opts = getTestOpts()
246 return opts.expect == 'pass' and way not in opts.expect_fail_for
247
248 # -----
249
250 def omit_ways( ways ):
251 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
252
253 def _omit_ways( name, opts, ways ):
254 opts.omit_ways = ways
255
256 # -----
257
258 def only_ways( ways ):
259 return lambda name, opts, w=ways: _only_ways( name, opts, w )
260
261 def _only_ways( name, opts, ways ):
262 opts.only_ways = ways
263
264 # -----
265
266 def extra_ways( ways ):
267 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
268
269 def _extra_ways( name, opts, ways ):
270 opts.extra_ways = ways
271
272 # -----
273
274 def set_stdin( file ):
275 return lambda name, opts, f=file: _set_stdin(name, opts, f);
276
277 def _set_stdin( name, opts, f ):
278 opts.stdin = f
279
280 # -----
281
282 def exit_code( val ):
283 return lambda name, opts, v=val: _exit_code(name, opts, v);
284
285 def _exit_code( name, opts, v ):
286 opts.exit_code = v
287
288 def signal_exit_code( val ):
289 if opsys('solaris2'):
290 return exit_code( val )
291 else:
292 # When application running on Linux receives fatal error
293 # signal, then its exit code is encoded as 128 + signal
294 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
295 # I assume that Mac OS X behaves in the same way at least Mac
296 # OS X builder behavior suggests this.
297 return exit_code( val+128 )
298
299 # -----
300
301 def compile_timeout_multiplier( val ):
302 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
303
304 def _compile_timeout_multiplier( name, opts, v ):
305 opts.compile_timeout_multiplier = v
306
307 def run_timeout_multiplier( val ):
308 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
309
310 def _run_timeout_multiplier( name, opts, v ):
311 opts.run_timeout_multiplier = v
312
313 # -----
314
315 def extra_run_opts( val ):
316 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
317
318 def _extra_run_opts( name, opts, v ):
319 opts.extra_run_opts = v
320
321 # -----
322
323 def extra_hc_opts( val ):
324 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
325
326 def _extra_hc_opts( name, opts, v ):
327 opts.extra_hc_opts = v
328
329 # -----
330
331 def extra_clean( files ):
332 # TODO. Remove all calls to extra_clean.
333 return lambda _name, _opts: None
334
335 def extra_files(files):
336 return lambda name, opts: _extra_files(name, opts, files)
337
338 def _extra_files(name, opts, files):
339 opts.extra_files.extend(files)
340
341 # -----
342
343 # Defaults to "test everything, and only break on extreme cases"
344 #
345 # The inputs to this function are slightly interesting:
346 # metric can be either:
347 # - 'all', in which case all 3 possible metrics are collected and compared.
348 # - The specific metric one wants to use in the test.
349 # - A list of the metrics one wants to use in the test.
350 #
351 # Deviation defaults to 20% because the goal is correctness over performance.
352 # The testsuite should avoid breaking when there is not an actual error.
353 # Instead, the testsuite should notify of regressions in a non-breaking manner.
354 #
355 # collect_compiler_stats is used when the metrics collected are about the compiler.
356 # collect_stats is used in the majority case when the metrics to be collected
357 # are about the performance of the runtime code generated by the compiler.
358 def collect_compiler_stats(metric='all',deviation=20):
359 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
360
361 def collect_stats(metric='all', deviation=20):
362 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
363
364 def testing_metrics():
365 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
366
367 # This is an internal function that is used only in the implementation.
368 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
369 # If the boolean is set to true, it indicates that this test is one that
370 # measures the performance numbers of the compiler.
371 # As this is a fairly rare case in the testsuite, it defaults to false to
372 # indicate that it is a 'normal' performance test.
373 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
374 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
375 failBecause('This test has an invalid name.')
376
377 # Normalize metrics to a list of strings.
378 if isinstance(metrics, str):
379 if metrics == 'all':
380 metrics = testing_metrics()
381 else:
382 metrics = [metrics]
383
384 opts.is_stats_test = True
385 if is_compiler_stats_test:
386 opts.is_compiler_stats_test = True
387
388 # Compiler performance numbers change when debugging is on, making the results
389 # useless and confusing. Therefore, skip if debugging is on.
390 if config.compiler_debugged and is_compiler_stats_test:
391 opts.skip = 1
392
393 for metric in metrics:
394 def baselineByWay(way, target_commit, metric=metric):
395 return Perf.baseline_metric( \
396 target_commit, name, config.test_env, metric, way)
397
398 opts.stats_range_fields[metric] = (baselineByWay, deviation)
399
400 # -----
401
402 def when(b, f):
403 # When list_brokens is on, we want to see all expect_broken calls,
404 # so we always do f
405 if b or config.list_broken:
406 return f
407 else:
408 return normal
409
410 def unless(b, f):
411 return when(not b, f)
412
413 def doing_ghci():
414 return 'ghci' in config.run_ways
415
416 def ghc_dynamic():
417 return config.ghc_dynamic
418
419 def fast():
420 return config.speed == 2
421
422 def platform( plat ):
423 return config.platform == plat
424
425 def opsys( os ):
426 return config.os == os
427
428 def arch( arch ):
429 return config.arch == arch
430
431 def wordsize( ws ):
432 return config.wordsize == str(ws)
433
434 def msys( ):
435 return config.msys
436
437 def cygwin( ):
438 return config.cygwin
439
440 def have_vanilla( ):
441 return config.have_vanilla
442
443 def have_ncg( ):
444 return config.have_ncg
445
446 def have_dynamic( ):
447 return config.have_dynamic
448
449 def have_profiling( ):
450 return config.have_profiling
451
452 def in_tree_compiler( ):
453 return config.in_tree_compiler
454
455 def unregisterised( ):
456 return config.unregisterised
457
458 def compiler_profiled( ):
459 return config.compiler_profiled
460
461 def compiler_debugged( ):
462 return config.compiler_debugged
463
464 def have_gdb( ):
465 return config.have_gdb
466
467 def have_readelf( ):
468 return config.have_readelf
469
470 def integer_gmp( ):
471 return have_library("integer-gmp")
472
473 def integer_simple( ):
474 return have_library("integer-simple")
475
476 def llvm_build ( ):
477 return config.ghc_built_by_llvm
478
479 # ---
480
481 def high_memory_usage(name, opts):
482 opts.alone = True
483
484 # If a test is for a multi-CPU race, then running the test alone
485 # increases the chance that we'll actually see it.
486 def multi_cpu_race(name, opts):
487 opts.alone = True
488
489 # ---
490 def literate( name, opts ):
491 opts.literate = True
492
493 def c_src( name, opts ):
494 opts.c_src = True
495
496 def objc_src( name, opts ):
497 opts.objc_src = True
498
499 def objcpp_src( name, opts ):
500 opts.objcpp_src = True
501
502 def cmm_src( name, opts ):
503 opts.cmm_src = True
504
505 def outputdir( odir ):
506 return lambda name, opts, d=odir: _outputdir(name, opts, d)
507
508 def _outputdir( name, opts, odir ):
509 opts.outputdir = odir;
510
511 # ----
512
513 def pre_cmd( cmd ):
514 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
515
516 def _pre_cmd( name, opts, cmd ):
517 opts.pre_cmd = cmd
518
519 # ----
520
521 def cmd_prefix( prefix ):
522 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
523
524 def _cmd_prefix( name, opts, prefix ):
525 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
526
527 # ----
528
529 def cmd_wrapper( fun ):
530 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
531
532 def _cmd_wrapper( name, opts, fun ):
533 opts.cmd_wrapper = fun
534
535 # ----
536
537 def compile_cmd_prefix( prefix ):
538 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
539
540 def _compile_cmd_prefix( name, opts, prefix ):
541 opts.compile_cmd_prefix = prefix
542
543 # ----
544
545 def check_stdout( f ):
546 return lambda name, opts, f=f: _check_stdout(name, opts, f)
547
548 def _check_stdout( name, opts, f ):
549 opts.check_stdout = f
550
551 def no_check_hp(name, opts):
552 opts.check_hp = False
553
554 # ----
555
556 def filter_stdout_lines( regex ):
557 """ Filter lines of stdout with the given regular expression """
558 def f( name, opts ):
559 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
560 return f
561
562 def normalise_slashes( name, opts ):
563 _normalise_fun(name, opts, normalise_slashes_)
564
565 def normalise_exe( name, opts ):
566 _normalise_fun(name, opts, normalise_exe_)
567
568 def normalise_fun( *fs ):
569 return lambda name, opts: _normalise_fun(name, opts, fs)
570
571 def _normalise_fun( name, opts, *fs ):
572 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
573
574 def normalise_errmsg_fun( *fs ):
575 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
576
577 def _normalise_errmsg_fun( name, opts, *fs ):
578 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
579
580 def check_errmsg(needle):
581 def norm(str):
582 if needle in str:
583 return "%s contained in -ddump-simpl\n" % needle
584 else:
585 return "%s not contained in -ddump-simpl\n" % needle
586 return normalise_errmsg_fun(norm)
587
588 def grep_errmsg(needle):
589 def norm(str):
590 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
591 return normalise_errmsg_fun(norm)
592
593 def normalise_whitespace_fun(f):
594 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
595
596 def _normalise_whitespace_fun(name, opts, f):
597 opts.whitespace_normaliser = f
598
599 def normalise_version_( *pkgs ):
600 def normalise_version__( str ):
601 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
602 '\\1-<VERSION>', str)
603 return normalise_version__
604
605 def normalise_version( *pkgs ):
606 def normalise_version__( name, opts ):
607 _normalise_fun(name, opts, normalise_version_(*pkgs))
608 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
609 return normalise_version__
610
611 def normalise_drive_letter(name, opts):
612 # Windows only. Change D:\\ to C:\\.
613 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
614
615 def keep_prof_callstacks(name, opts):
616 """Keep profiling callstacks.
617
618 Use together with `only_ways(prof_ways)`.
619 """
620 opts.keep_prof_callstacks = True
621
622 def join_normalisers(*a):
623 """
624 Compose functions, flattening sequences.
625
626 join_normalisers(f1,[f2,f3],f4)
627
628 is the same as
629
630 lambda x: f1(f2(f3(f4(x))))
631 """
632
633 def flatten(l):
634 """
635 Taken from http://stackoverflow.com/a/2158532/946226
636 """
637 for el in l:
638 if (isinstance(el, collections.Iterable)
639 and not isinstance(el, (bytes, str))):
640 for sub in flatten(el):
641 yield sub
642 else:
643 yield el
644
645 a = flatten(a)
646
647 fn = lambda x:x # identity function
648 for f in a:
649 assert callable(f)
650 fn = lambda x,f=f,fn=fn: fn(f(x))
651 return fn
652
653 # ----
654 # Function for composing two opt-fns together
655
656 def executeSetups(fs, name, opts):
657 if type(fs) is list:
658 # If we have a list of setups, then execute each one
659 for f in fs:
660 executeSetups(f, name, opts)
661 else:
662 # fs is a single function, so just apply it
663 fs(name, opts)
664
665 # -----------------------------------------------------------------------------
666 # The current directory of tests
667
668 def newTestDir(tempdir, dir):
669
670 global thisdir_settings
671 # reset the options for this test directory
672 def settings(name, opts, tempdir=tempdir, dir=dir):
673 return _newTestDir(name, opts, tempdir, dir)
674 thisdir_settings = settings
675
676 # Should be equal to entry in toplevel .gitignore.
677 testdir_suffix = '.run'
678
679 def _newTestDir(name, opts, tempdir, dir):
680 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
681 opts.srcdir = os.path.join(os.getcwd(), dir)
682 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
683 opts.compiler_always_flags = config.compiler_always_flags
684
685 # -----------------------------------------------------------------------------
686 # Actually doing tests
687
688 parallelTests = []
689 aloneTests = []
690 allTestNames = set([])
691
692 def runTest(watcher, opts, name, func, args):
693 if config.use_threads:
694 pool_sema.acquire()
695 t = threading.Thread(target=test_common_thread,
696 name=name,
697 args=(watcher, name, opts, func, args))
698 t.daemon = False
699 t.start()
700 else:
701 test_common_work(watcher, name, opts, func, args)
702
703 # name :: String
704 # setup :: [TestOpt] -> IO ()
705 def test(name, setup, func, args):
706 global aloneTests
707 global parallelTests
708 global allTestNames
709 global thisdir_settings
710 if name in allTestNames:
711 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
712 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
713 framework_fail(name, 'bad_name', 'This test has an invalid name')
714
715 if config.run_only_some_tests:
716 if name not in config.only:
717 return
718 else:
719 # Note [Mutating config.only]
720 # config.only is initially the set of tests requested by
721 # the user (via 'make TEST='). We then remove all tests that
722 # we've already seen (in .T files), so that we can later
723 # report on any tests we couldn't find and error out.
724 config.only.remove(name)
725
726 # Make a deep copy of the default_testopts, as we need our own copy
727 # of any dictionaries etc inside it. Otherwise, if one test modifies
728 # them, all tests will see the modified version!
729 myTestOpts = copy.deepcopy(default_testopts)
730
731 executeSetups([thisdir_settings, setup], name, myTestOpts)
732
733 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
734 if myTestOpts.alone:
735 aloneTests.append(thisTest)
736 else:
737 parallelTests.append(thisTest)
738 allTestNames.add(name)
739
740 if config.use_threads:
741 def test_common_thread(watcher, name, opts, func, args):
742 try:
743 test_common_work(watcher, name, opts, func, args)
744 finally:
745 pool_sema.release()
746
747 def get_package_cache_timestamp():
748 if config.package_conf_cache_file == '':
749 return 0.0
750 else:
751 try:
752 return os.stat(config.package_conf_cache_file).st_mtime
753 except:
754 return 0.0
755
756 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
757
758 def test_common_work(watcher, name, opts, func, args):
759 try:
760 t.total_tests += 1
761 setLocalTestOpts(opts)
762
763 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
764
765 # All the ways we might run this test
766 if func == compile or func == multimod_compile:
767 all_ways = config.compile_ways
768 elif func == compile_and_run or func == multimod_compile_and_run:
769 all_ways = config.run_ways
770 elif func == ghci_script:
771 if 'ghci' in config.run_ways:
772 all_ways = ['ghci']
773 else:
774 all_ways = []
775 else:
776 all_ways = ['normal']
777
778 # A test itself can request extra ways by setting opts.extra_ways
779 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
780
781 t.total_test_cases += len(all_ways)
782
783 ok_way = lambda way: \
784 not getTestOpts().skip \
785 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
786 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
787 and (not (config.skip_perf_tests and isStatsTest())) \
788 and (not (config.only_perf_tests and not isStatsTest())) \
789 and way not in getTestOpts().omit_ways
790
791 # Which ways we are asked to skip
792 do_ways = list(filter (ok_way,all_ways))
793
794 # Only run all ways in slow mode.
795 # See Note [validate and testsuite speed] in toplevel Makefile.
796 if config.accept:
797 # Only ever run one way
798 do_ways = do_ways[:1]
799 elif config.speed > 0:
800 # However, if we EXPLICITLY asked for a way (with extra_ways)
801 # please test it!
802 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
803 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
804 do_ways = other_ways[:1] + explicit_ways
805
806 # Find all files in the source directory that this test
807 # depends on. Do this only once for all ways.
808 # Generously add all filenames that start with the name of
809 # the test to this set, as a convenience to test authors.
810 # They will have to use the `extra_files` setup function to
811 # specify all other files that their test depends on (but
812 # this seems to be necessary for only about 10% of all
813 # tests).
814 files = set(f for f in os.listdir(opts.srcdir)
815 if f.startswith(name) and not f == name and
816 not f.endswith(testdir_suffix) and
817 not os.path.splitext(f)[1] in do_not_copy)
818 for filename in (opts.extra_files + extra_src_files.get(name, [])):
819 if filename.startswith('/'):
820 framework_fail(name, 'whole-test',
821 'no absolute paths in extra_files please: ' + filename)
822
823 elif '*' in filename:
824 # Don't use wildcards in extra_files too much, as
825 # globbing is slow.
826 files.update((os.path.relpath(f, opts.srcdir)
827 for f in glob.iglob(in_srcdir(filename))))
828
829 elif filename:
830 files.add(filename)
831
832 else:
833 framework_fail(name, 'whole-test', 'extra_file is empty string')
834
835 # Run the required tests...
836 for way in do_ways:
837 if stopping():
838 break
839 try:
840 do_test(name, way, func, args, files)
841 except KeyboardInterrupt:
842 stopNow()
843 except Exception as e:
844 framework_fail(name, way, str(e))
845 traceback.print_exc()
846
847 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
848
849 if config.cleanup and do_ways:
850 try:
851 cleanup()
852 except Exception as e:
853 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
854
855 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
856
857 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
858 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
859
860 except Exception as e:
861 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
862 finally:
863 watcher.notify()
864
865 def do_test(name, way, func, args, files):
866 opts = getTestOpts()
867
868 full_name = name + '(' + way + ')'
869
870 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
871 full_name, t.total_tests, len(allTestNames),
872 [len(t.unexpected_passes),
873 len(t.unexpected_failures),
874 len(t.framework_failures)]))
875
876 # Clean up prior to the test, so that we can't spuriously conclude
877 # that it passed on the basis of old run outputs.
878 cleanup()
879 os.makedirs(opts.testdir)
880
881 # Link all source files for this test into a new directory in
882 # /tmp, and run the test in that directory. This makes it
883 # possible to run tests in parallel, without modification, that
884 # would otherwise (accidentally) write to the same output file.
885 # It also makes it easier to keep the testsuite clean.
886
887 for extra_file in files:
888 src = in_srcdir(extra_file)
889 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
890 if os.path.isfile(src):
891 link_or_copy_file(src, dst)
892 elif os.path.isdir(src):
893 if os.path.exists(dst):
894 shutil.rmtree(dst)
895 os.mkdir(dst)
896 lndir(src, dst)
897 else:
898 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
899 # When using a ghc built without haddock support, .t
900 # files are rightfully missing. Don't
901 # framework_fail. Test will be skipped later.
902 pass
903 else:
904 framework_fail(name, way,
905 'extra_file does not exist: ' + extra_file)
906
907 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
908 # When running 'MAKE' make sure 'TOP' still points to the
909 # root of the testsuite.
910 src_makefile = in_srcdir('Makefile')
911 dst_makefile = in_testdir('Makefile')
912 if os.path.exists(src_makefile):
913 with io.open(src_makefile, 'r', encoding='utf8') as src:
914 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
915 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
916 dst.write(makefile)
917
918 if opts.pre_cmd:
919 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
920 stderr = subprocess.STDOUT,
921 print_output = config.verbose >= 3)
922
923 # If user used expect_broken then don't record failures of pre_cmd
924 if exit_code != 0 and opts.expect not in ['fail']:
925 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
926 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
927
928 result = func(*[name,way] + args)
929
930 if opts.expect not in ['pass', 'fail', 'missing-lib']:
931 framework_fail(name, way, 'bad expected ' + opts.expect)
932
933 try:
934 passFail = result['passFail']
935 except (KeyError, TypeError):
936 passFail = 'No passFail found'
937
938 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
939
940 if passFail == 'pass':
941 if _expect_pass(way):
942 t.expected_passes.append(TestResult(directory, name, "", way))
943 t.n_expected_passes += 1
944 else:
945 if_verbose(1, '*** unexpected pass for %s' % full_name)
946 t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
947 elif passFail == 'fail':
948 if _expect_pass(way):
949 reason = result['reason']
950 tag = result.get('tag')
951 if tag == 'stat':
952 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
953 t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
954 else:
955 if_verbose(1, '*** unexpected failure for %s' % full_name)
956 result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
957 t.unexpected_failures.append(result)
958 else:
959 if opts.expect == 'missing-lib':
960 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
961 else:
962 t.n_expected_failures += 1
963 else:
964 framework_fail(name, way, 'bad result ' + passFail)
965
966 # Make is often invoked with -s, which means if it fails, we get
967 # no feedback at all. This is annoying. So let's remove the option
968 # if found and instead have the testsuite decide on what to do
969 # with the output.
970 def override_options(pre_cmd):
971 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
972 return pre_cmd.replace('-s' , '') \
973 .replace('--silent', '') \
974 .replace('--quiet' , '')
975
976 return pre_cmd
977
978 def framework_fail(name, way, reason):
979 opts = getTestOpts()
980 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
981 full_name = name + '(' + way + ')'
982 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
983 t.framework_failures.append(TestResult(directory, name, reason, way))
984
985 def framework_warn(name, way, reason):
986 opts = getTestOpts()
987 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
988 full_name = name + '(' + way + ')'
989 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
990 t.framework_warnings.append(TestResult(directory, name, reason, way))
991
992 def badResult(result):
993 try:
994 if result['passFail'] == 'pass':
995 return False
996 return True
997 except (KeyError, TypeError):
998 return True
999
1000 # -----------------------------------------------------------------------------
1001 # Generic command tests
1002
1003 # A generic command test is expected to run and exit successfully.
1004 #
1005 # The expected exit code can be changed via exit_code() as normal, and
1006 # the expected stdout/stderr are stored in <testname>.stdout and
1007 # <testname>.stderr. The output of the command can be ignored
1008 # altogether by using the setup function ignore_stdout instead of
1009 # run_command.
1010
1011 def run_command( name, way, cmd ):
1012 return simple_run( name, '', override_options(cmd), '' )
1013
1014 def makefile_test( name, way, target=None ):
1015 if target is None:
1016 target = name
1017
1018 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1019 return run_command(name, way, cmd)
1020
1021 # -----------------------------------------------------------------------------
1022 # GHCi tests
1023
1024 def ghci_script( name, way, script):
1025 flags = ' '.join(get_compiler_flags())
1026 way_flags = ' '.join(config.way_flags[way])
1027
1028 # We pass HC and HC_OPTS as environment variables, so that the
1029 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1030 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1031 ).format(flags=flags, way_flags=way_flags)
1032 # NB: put way_flags before flags so that flags in all.T can overrie others
1033
1034 getTestOpts().stdin = script
1035 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1036
1037 # -----------------------------------------------------------------------------
1038 # Compile-only tests
1039
1040 def compile( name, way, extra_hc_opts ):
1041 return do_compile( name, way, 0, '', [], extra_hc_opts )
1042
1043 def compile_fail( name, way, extra_hc_opts ):
1044 return do_compile( name, way, 1, '', [], extra_hc_opts )
1045
1046 def backpack_typecheck( name, way, extra_hc_opts ):
1047 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1048
1049 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1050 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1051
1052 def backpack_compile( name, way, extra_hc_opts ):
1053 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1054
1055 def backpack_compile_fail( name, way, extra_hc_opts ):
1056 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1057
1058 def backpack_run( name, way, extra_hc_opts ):
1059 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1060
1061 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1062 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1063
1064 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1065 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1066
1067 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1068 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1069
1070 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1071 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1072
1073 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1074 # print 'Compile only, extra args = ', extra_hc_opts
1075
1076 result = extras_build( way, extra_mods, extra_hc_opts )
1077 if badResult(result):
1078 return result
1079 extra_hc_opts = result['hc_opts']
1080
1081 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1082
1083 if badResult(result):
1084 return result
1085
1086 # the actual stderr should always match the expected, regardless
1087 # of whether we expected the compilation to fail or not (successful
1088 # compilations may generate warnings).
1089
1090 expected_stderr_file = find_expected_file(name, 'stderr')
1091 actual_stderr_file = add_suffix(name, 'comp.stderr')
1092 diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
1093
1094 if not compare_outputs(way, 'stderr',
1095 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1096 normalise_errmsg),
1097 expected_stderr_file, actual_stderr_file,
1098 diff_file=diff_file_name,
1099 whitespace_normaliser=getattr(getTestOpts(),
1100 "whitespace_normaliser",
1101 normalise_whitespace)):
1102 stderr = open(diff_file_name, 'rb').read()
1103 os.remove(diff_file_name)
1104 return failBecauseStderr('stderr mismatch', stderr=stderr )
1105
1106
1107 # no problems found, this test passed
1108 return passed()
1109
1110 def compile_cmp_asm( name, way, extra_hc_opts ):
1111 print('Compile only, extra args = ', extra_hc_opts)
1112 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1113
1114 if badResult(result):
1115 return result
1116
1117 # the actual stderr should always match the expected, regardless
1118 # of whether we expected the compilation to fail or not (successful
1119 # compilations may generate warnings).
1120
1121 expected_asm_file = find_expected_file(name, 'asm')
1122 actual_asm_file = add_suffix(name, 's')
1123
1124 if not compare_outputs(way, 'asm',
1125 join_normalisers(normalise_errmsg, normalise_asm),
1126 expected_asm_file, actual_asm_file):
1127 return failBecause('asm mismatch')
1128
1129 # no problems found, this test passed
1130 return passed()
1131
1132 # -----------------------------------------------------------------------------
1133 # Compile-and-run tests
1134
1135 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1136 # print 'Compile and run, extra args = ', extra_hc_opts
1137
1138 result = extras_build( way, extra_mods, extra_hc_opts )
1139 if badResult(result):
1140 return result
1141 extra_hc_opts = result['hc_opts']
1142
1143 if way.startswith('ghci'): # interpreted...
1144 return interpreter_run(name, way, extra_hc_opts, top_mod)
1145 else: # compiled...
1146 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1147 if badResult(result):
1148 return result
1149
1150 cmd = './' + name;
1151
1152 # we don't check the compiler's stderr for a compile-and-run test
1153 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1154
1155 def compile_and_run( name, way, extra_hc_opts ):
1156 return compile_and_run__( name, way, '', [], extra_hc_opts)
1157
1158 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1159 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1160
1161 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1162 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1163
1164 def stats( name, way, stats_file ):
1165 opts = getTestOpts()
1166 return check_stats(name, way, stats_file, opts.stats_range_fields)
1167
1168 def metric_dict(name, way, metric, value):
1169 return Perf.PerfStat(
1170 test_env = config.test_env,
1171 test = name,
1172 way = way,
1173 metric = metric,
1174 value = value)
1175
1176 # -----------------------------------------------------------------------------
1177 # Check test stats. This prints the results for the user.
1178 # name: name of the test.
1179 # way: the way.
1180 # stats_file: the path of the stats_file containing the stats for the test.
1181 # range_fields: see TestOptions.stats_range_fields
1182 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1183 # This prints the results for the user.
1184 def check_stats(name, way, stats_file, range_fields):
1185 head_commit = Perf.commit_hash('HEAD')
1186 result = passed()
1187 if range_fields:
1188 try:
1189 f = open(in_testdir(stats_file))
1190 except IOError as e:
1191 return failBecause(str(e))
1192 stats_file_contents = f.read()
1193 f.close()
1194
1195 for (metric, baseline_and_dev) in range_fields.items():
1196 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1197 if field_match == None:
1198 print('Failed to find metric: ', metric)
1199 metric_result = failBecause('no such stats metric')
1200 else:
1201 actual_val = int(field_match.group(1))
1202
1203 # Store the metric so it can later be stored in a git note.
1204 perf_stat = metric_dict(name, way, metric, actual_val)
1205 change = None
1206
1207 # If this is the first time running the benchmark, then pass.
1208 baseline = baseline_and_dev[0](way, head_commit)
1209 if baseline == None:
1210 metric_result = passed()
1211 change = MetricChange.NewMetric
1212 else:
1213 tolerance_dev = baseline_and_dev[1]
1214 (change, metric_result) = Perf.check_stats_change(
1215 perf_stat,
1216 baseline,
1217 tolerance_dev,
1218 config.allowed_perf_changes,
1219 config.verbose >= 4)
1220 t.metrics.append((change, perf_stat))
1221
1222 # If any metric fails then the test fails.
1223 # Note, the remaining metrics are still run so that
1224 # a complete list of changes can be presented to the user.
1225 if metric_result['passFail'] == 'fail':
1226 result = metric_result
1227
1228 return result
1229
1230 # -----------------------------------------------------------------------------
1231 # Build a single-module program
1232
1233 def extras_build( way, extra_mods, extra_hc_opts ):
1234 for mod, opts in extra_mods:
1235 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1236 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1237 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1238 if badResult(result):
1239 return result
1240
1241 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1242
1243 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1244 opts = getTestOpts()
1245
1246 # Redirect stdout and stderr to the same file
1247 stdout = in_testdir(name, 'comp.stderr')
1248 stderr = subprocess.STDOUT
1249
1250 if top_mod != '':
1251 srcname = top_mod
1252 elif addsuf:
1253 if backpack:
1254 srcname = add_suffix(name, 'bkp')
1255 else:
1256 srcname = add_hs_lhs_suffix(name)
1257 else:
1258 srcname = name
1259
1260 if top_mod != '':
1261 to_do = '--make '
1262 if link:
1263 to_do = to_do + '-o ' + name
1264 elif backpack:
1265 if link:
1266 to_do = '-o ' + name + ' '
1267 else:
1268 to_do = ''
1269 to_do = to_do + '--backpack '
1270 elif link:
1271 to_do = '-o ' + name
1272 else:
1273 to_do = '-c' # just compile
1274
1275 stats_file = name + '.comp.stats'
1276 if isCompilerStatsTest():
1277 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1278 if backpack:
1279 extra_hc_opts += ' -outputdir ' + name + '.out'
1280
1281 # Required by GHC 7.3+, harmless for earlier versions:
1282 if (getTestOpts().c_src or
1283 getTestOpts().objc_src or
1284 getTestOpts().objcpp_src or
1285 getTestOpts().cmm_src):
1286 extra_hc_opts += ' -no-hs-main '
1287
1288 if getTestOpts().compile_cmd_prefix == '':
1289 cmd_prefix = ''
1290 else:
1291 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1292
1293 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1294
1295 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1296 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1297 ).format(**locals())
1298
1299 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1300
1301 actual_stderr_path = in_testdir(name, 'comp.stderr')
1302
1303 if exit_code != 0 and not should_fail:
1304 if config.verbose >= 1 and _expect_pass(way):
1305 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1306 dump_file(actual_stderr_path)
1307
1308 # ToDo: if the sub-shell was killed by ^C, then exit
1309
1310 if isCompilerStatsTest():
1311 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1312 if badResult(statsResult):
1313 return statsResult
1314
1315 if should_fail:
1316 if exit_code == 0:
1317 stderr_contents = open(actual_stderr_path, 'rb').read()
1318 return failBecauseStderr('exit code 0', stderr_contents)
1319 else:
1320 if exit_code != 0:
1321 stderr_contents = open(actual_stderr_path, 'rb').read()
1322 return failBecauseStderr('exit code non-0', stderr_contents)
1323
1324 return passed()
1325
1326 # -----------------------------------------------------------------------------
1327 # Run a program and check its output
1328 #
1329 # If testname.stdin exists, route input from that, else
1330 # from /dev/null. Route output to testname.run.stdout and
1331 # testname.run.stderr. Returns the exit code of the run.
1332
1333 def simple_run(name, way, prog, extra_run_opts):
1334 opts = getTestOpts()
1335
1336 # figure out what to use for stdin
1337 if opts.stdin:
1338 stdin = in_testdir(opts.stdin)
1339 elif os.path.exists(in_testdir(name, 'stdin')):
1340 stdin = in_testdir(name, 'stdin')
1341 else:
1342 stdin = None
1343
1344 stdout = in_testdir(name, 'run.stdout')
1345 if opts.combined_output:
1346 stderr = subprocess.STDOUT
1347 else:
1348 stderr = in_testdir(name, 'run.stderr')
1349
1350 my_rts_flags = rts_flags(way)
1351
1352 # Collect stats if necessary:
1353 # isStatsTest and not isCompilerStatsTest():
1354 # assume we are running a ghc compiled program. Collect stats.
1355 # isStatsTest and way == 'ghci':
1356 # assume we are running a program via ghci. Collect stats
1357 stats_file = name + '.stats'
1358 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1359 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1360 else:
1361 stats_args = ''
1362
1363 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1364 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1365
1366 if opts.cmd_wrapper != None:
1367 cmd = opts.cmd_wrapper(cmd)
1368
1369 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1370
1371 # run the command
1372 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1373
1374 # check the exit code
1375 if exit_code != opts.exit_code:
1376 if config.verbose >= 1 and _expect_pass(way):
1377 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1378 dump_stdout(name)
1379 dump_stderr(name)
1380 return failBecause('bad exit code')
1381
1382 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1383 return failBecause('bad stderr')
1384 if not (opts.ignore_stdout or stdout_ok(name, way)):
1385 return failBecause('bad stdout')
1386
1387 check_hp = '-h' in my_rts_flags and opts.check_hp
1388 check_prof = '-p' in my_rts_flags
1389
1390 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1391 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1392 return failBecause('bad heap profile')
1393 if check_prof and not check_prof_ok(name, way):
1394 return failBecause('bad profile')
1395
1396 return check_stats(name, way, stats_file, opts.stats_range_fields)
1397
1398 def rts_flags(way):
1399 args = config.way_rts_flags.get(way, [])
1400 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1401
1402 # -----------------------------------------------------------------------------
1403 # Run a program in the interpreter and check its output
1404
1405 def interpreter_run(name, way, extra_hc_opts, top_mod):
1406 opts = getTestOpts()
1407
1408 stdout = in_testdir(name, 'interp.stdout')
1409 stderr = in_testdir(name, 'interp.stderr')
1410 script = in_testdir(name, 'genscript')
1411
1412 if opts.combined_output:
1413 framework_fail(name, 'unsupported',
1414 'WAY=ghci and combined_output together is not supported')
1415
1416 if (top_mod == ''):
1417 srcname = add_hs_lhs_suffix(name)
1418 else:
1419 srcname = top_mod
1420
1421 delimiter = '===== program output begins here\n'
1422
1423 with io.open(script, 'w', encoding='utf8') as f:
1424 # set the prog name and command-line args to match the compiled
1425 # environment.
1426 f.write(':set prog ' + name + '\n')
1427 f.write(':set args ' + opts.extra_run_opts + '\n')
1428 # Add marker lines to the stdout and stderr output files, so we
1429 # can separate GHCi's output from the program's.
1430 f.write(':! echo ' + delimiter)
1431 f.write(':! echo 1>&2 ' + delimiter)
1432 # Set stdout to be line-buffered to match the compiled environment.
1433 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1434 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1435 # in the event of an exception as for the compiled program.
1436 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1437
1438 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1439 if os.path.exists(stdin):
1440 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1441
1442 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1443
1444 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1445 ).format(**locals())
1446
1447 if getTestOpts().cmd_wrapper != None:
1448 cmd = opts.cmd_wrapper(cmd);
1449
1450 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1451
1452 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1453
1454 # split the stdout into compilation/program output
1455 split_file(stdout, delimiter,
1456 in_testdir(name, 'comp.stdout'),
1457 in_testdir(name, 'run.stdout'))
1458 split_file(stderr, delimiter,
1459 in_testdir(name, 'comp.stderr'),
1460 in_testdir(name, 'run.stderr'))
1461
1462 # check the exit code
1463 if exit_code != getTestOpts().exit_code:
1464 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1465 dump_stdout(name)
1466 dump_stderr(name)
1467 return failBecause('bad exit code')
1468
1469 # ToDo: if the sub-shell was killed by ^C, then exit
1470
1471 if not (opts.ignore_stderr or stderr_ok(name, way)):
1472 return failBecause('bad stderr')
1473 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1474 return failBecause('bad stdout')
1475 else:
1476 return passed()
1477
1478 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1479 # See Note [Universal newlines].
1480 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1481 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1482 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1483 line = infile.readline()
1484 while re.sub('^\s*','',line) != delimiter and line != '':
1485 out1.write(line)
1486 line = infile.readline()
1487
1488 line = infile.readline()
1489 while line != '':
1490 out2.write(line)
1491 line = infile.readline()
1492
1493 # -----------------------------------------------------------------------------
1494 # Utils
1495 def get_compiler_flags():
1496 opts = getTestOpts()
1497
1498 flags = copy.copy(opts.compiler_always_flags)
1499
1500 flags.append(opts.extra_hc_opts)
1501
1502 if opts.outputdir != None:
1503 flags.extend(["-outputdir", opts.outputdir])
1504
1505 return flags
1506
1507 def stdout_ok(name, way):
1508 actual_stdout_file = add_suffix(name, 'run.stdout')
1509 expected_stdout_file = find_expected_file(name, 'stdout')
1510
1511 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1512
1513 check_stdout = getTestOpts().check_stdout
1514 if check_stdout:
1515 actual_stdout_path = in_testdir(actual_stdout_file)
1516 return check_stdout(actual_stdout_path, extra_norm)
1517
1518 return compare_outputs(way, 'stdout', extra_norm,
1519 expected_stdout_file, actual_stdout_file)
1520
1521 def dump_stdout( name ):
1522 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1523 str = f.read().strip()
1524 if str:
1525 print("Stdout (", name, "):")
1526 print(str)
1527
1528 def stderr_ok(name, way):
1529 actual_stderr_file = add_suffix(name, 'run.stderr')
1530 expected_stderr_file = find_expected_file(name, 'stderr')
1531
1532 return compare_outputs(way, 'stderr',
1533 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1534 expected_stderr_file, actual_stderr_file,
1535 whitespace_normaliser=normalise_whitespace)
1536
1537 def dump_stderr( name ):
1538 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1539 str = f.read().strip()
1540 if str:
1541 print("Stderr (", name, "):")
1542 print(str)
1543
1544 def read_no_crs(file):
1545 str = ''
1546 try:
1547 # See Note [Universal newlines].
1548 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1549 str = h.read()
1550 except Exception:
1551 # On Windows, if the program fails very early, it seems the
1552 # files stdout/stderr are redirected to may not get created
1553 pass
1554 return str
1555
1556 def write_file(file, str):
1557 # See Note [Universal newlines].
1558 with io.open(file, 'w', encoding='utf8', newline='') as h:
1559 h.write(str)
1560
1561 # Note [Universal newlines]
1562 #
1563 # We don't want to write any Windows style line endings ever, because
1564 # it would mean that `make accept` would touch every line of the file
1565 # when switching between Linux and Windows.
1566 #
1567 # Furthermore, when reading a file, it is convenient to translate all
1568 # Windows style endings to '\n', as it simplifies searching or massaging
1569 # the content.
1570 #
1571 # Solution: use `io.open` instead of `open`
1572 # * when reading: use newline=None to translate '\r\n' to '\n'
1573 # * when writing: use newline='' to not translate '\n' to '\r\n'
1574 #
1575 # See https://docs.python.org/2/library/io.html#io.open.
1576 #
1577 # This should work with both python2 and python3, and with both mingw*
1578 # as msys2 style Python.
1579 #
1580 # Do note that io.open returns unicode strings. So we have to specify
1581 # the expected encoding. But there is at least one file which is not
1582 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1583 # Another solution would be to open files in binary mode always, and
1584 # operate on bytes.
1585
1586 def check_hp_ok(name):
1587 opts = getTestOpts()
1588
1589 # do not qualify for hp2ps because we should be in the right directory
1590 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1591
1592 hp2psResult = runCmd(hp2psCmd)
1593
1594 actual_ps_path = in_testdir(name, 'ps')
1595
1596 if hp2psResult == 0:
1597 if os.path.exists(actual_ps_path):
1598 if gs_working:
1599 gsResult = runCmd(genGSCmd(actual_ps_path))
1600 if (gsResult == 0):
1601 return (True)
1602 else:
1603 print("hp2ps output for " + name + "is not valid PostScript")
1604 else: return (True) # assume postscript is valid without ghostscript
1605 else:
1606 print("hp2ps did not generate PostScript for " + name)
1607 return (False)
1608 else:
1609 print("hp2ps error when processing heap profile for " + name)
1610 return(False)
1611
1612 def check_prof_ok(name, way):
1613 expected_prof_file = find_expected_file(name, 'prof.sample')
1614 expected_prof_path = in_testdir(expected_prof_file)
1615
1616 # Check actual prof file only if we have an expected prof file to
1617 # compare it with.
1618 if not os.path.exists(expected_prof_path):
1619 return True
1620
1621 actual_prof_file = add_suffix(name, 'prof')
1622 actual_prof_path = in_testdir(actual_prof_file)
1623
1624 if not os.path.exists(actual_prof_path):
1625 print(actual_prof_path + " does not exist")
1626 return(False)
1627
1628 if os.path.getsize(actual_prof_path) == 0:
1629 print(actual_prof_path + " is empty")
1630 return(False)
1631
1632 return compare_outputs(way, 'prof', normalise_prof,
1633 expected_prof_file, actual_prof_file,
1634 whitespace_normaliser=normalise_whitespace)
1635
1636 # Compare expected output to actual output, and optionally accept the
1637 # new output. Returns true if output matched or was accepted, false
1638 # otherwise. See Note [Output comparison] for the meaning of the
1639 # normaliser and whitespace_normaliser parameters.
1640 def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
1641 whitespace_normaliser=lambda x:x):
1642
1643 expected_path = in_srcdir(expected_file)
1644 actual_path = in_testdir(actual_file)
1645
1646 if os.path.exists(expected_path):
1647 expected_str = normaliser(read_no_crs(expected_path))
1648 # Create the .normalised file in the testdir, not in the srcdir.
1649 expected_normalised_file = add_suffix(expected_file, 'normalised')
1650 expected_normalised_path = in_testdir(expected_normalised_file)
1651 else:
1652 expected_str = ''
1653 expected_normalised_path = '/dev/null'
1654
1655 actual_raw = read_no_crs(actual_path)
1656 actual_str = normaliser(actual_raw)
1657
1658 # See Note [Output comparison].
1659 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1660 return True
1661 else:
1662 if config.verbose >= 1 and _expect_pass(way):
1663 print('Actual ' + kind + ' output differs from expected:')
1664
1665 if expected_normalised_path != '/dev/null':
1666 write_file(expected_normalised_path, expected_str)
1667
1668 actual_normalised_path = add_suffix(actual_path, 'normalised')
1669 write_file(actual_normalised_path, actual_str)
1670
1671 if config.verbose >= 1 and _expect_pass(way):
1672 # See Note [Output comparison].
1673 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1674 actual_normalised_path),
1675 stdout=diff_file,
1676 print_output=True)
1677
1678 # If for some reason there were no non-whitespace differences,
1679 # then do a full diff
1680 if r == 0:
1681 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1682 actual_normalised_path),
1683 stdout=diff_file,
1684 print_output=True)
1685 elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
1686 # we will try to read it later
1687
1688 if config.accept and (getTestOpts().expect == 'fail' or
1689 way in getTestOpts().expect_fail_for):
1690 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1691 return False
1692 elif config.accept and actual_raw:
1693 if config.accept_platform:
1694 if_verbose(1, 'Accepting new output for platform "'
1695 + config.platform + '".')
1696 expected_path += '-' + config.platform
1697 elif config.accept_os:
1698 if_verbose(1, 'Accepting new output for os "'
1699 + config.os + '".')
1700 expected_path += '-' + config.os
1701 else:
1702 if_verbose(1, 'Accepting new output.')
1703
1704 write_file(expected_path, actual_raw)
1705 return True
1706 elif config.accept:
1707 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1708 os.remove(expected_path)
1709 return True
1710 else:
1711 return False
1712
1713 # Note [Output comparison]
1714 #
1715 # We do two types of output comparison:
1716 #
1717 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1718 # optional `whitespace_normaliser` to the expected and the actual
1719 # output, before comparing the two.
1720 #
1721 # 2. To show as a diff to the user when the test indeed failed. We apply
1722 # the same `normaliser` function to the outputs, to make the diff as
1723 # small as possible (only showing the actual problem). But we don't
1724 # apply the `whitespace_normaliser` here, because it might completely
1725 # squash all whitespace, making the diff unreadable. Instead we rely
1726 # on the `diff` program to ignore whitespace changes as much as
1727 # possible (#10152).
1728
1729 def normalise_whitespace( str ):
1730 # Merge contiguous whitespace characters into a single space.
1731 return ' '.join(str.split())
1732
1733 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1734
1735 def normalise_callstacks(s):
1736 opts = getTestOpts()
1737 def repl(matches):
1738 location = matches.group(1)
1739 location = normalise_slashes_(location)
1740 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1741 # Ignore line number differences in call stacks (#10834).
1742 s = re.sub(callSite_re, repl, s)
1743 # Ignore the change in how we identify implicit call-stacks
1744 s = s.replace('from ImplicitParams', 'from HasCallStack')
1745 if not opts.keep_prof_callstacks:
1746 # Don't output prof callstacks. Test output should be
1747 # independent from the WAY we run the test.
1748 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1749 return s
1750
1751 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1752
1753 def normalise_type_reps(str):
1754 """ Normalise out fingerprints from Typeable TyCon representations """
1755 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1756
1757 def normalise_errmsg( str ):
1758 """Normalise error-messages emitted via stderr"""
1759 # IBM AIX's `ld` is a bit chatty
1760 if opsys('aix'):
1761 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1762 # remove " error:" and lower-case " Warning:" to make patch for
1763 # trac issue #10021 smaller
1764 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1765 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1766 str = normalise_callstacks(str)
1767 str = normalise_type_reps(str)
1768
1769 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1770 # the colon is there because it appears in error messages; this
1771 # hacky solution is used in place of more sophisticated filename
1772 # mangling
1773 str = re.sub('([^\\s])\\.exe', '\\1', str)
1774
1775 # normalise slashes, minimise Windows/Unix filename differences
1776 str = re.sub('\\\\', '/', str)
1777
1778 # The inplace ghc's are called ghc-stage[123] to avoid filename
1779 # collisions, so we need to normalise that to just "ghc"
1780 str = re.sub('ghc-stage[123]', 'ghc', str)
1781
1782 # Error messages sometimes contain integer implementation package
1783 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1784
1785 # Error messages sometimes contain this blurb which can vary
1786 # spuriously depending upon build configuration (e.g. based on integer
1787 # backend)
1788 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1789 '...plus N instances involving out-of-scope types', str)
1790
1791 # Also filter out bullet characters. This is because bullets are used to
1792 # separate error sections, and tests shouldn't be sensitive to how the
1793 # the division happens.
1794 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1795 str = str.replace(bullet, '')
1796
1797 # Windows only, this is a bug in hsc2hs but it is preventing
1798 # stable output for the testsuite. See Trac #9775. For now we filter out this
1799 # warning message to get clean output.
1800 if config.msys:
1801 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1802 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1803
1804 return str
1805
1806 # normalise a .prof file, so that we can reasonably compare it against
1807 # a sample. This doesn't compare any of the actual profiling data,
1808 # only the shape of the profile and the number of entries.
1809 def normalise_prof (str):
1810 # strip everything up to the line beginning "COST CENTRE"
1811 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1812
1813 # strip results for CAFs, these tend to change unpredictably
1814 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1815
1816 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1817 # sometimes under MAIN.
1818 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1819
1820 # We have something like this:
1821 #
1822 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1823 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1824 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1825 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1826 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1827 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1828 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1829 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1830 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1831 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1832 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1833 #
1834 # then we remove all the specific profiling data, leaving only the cost
1835 # centre name, module, src, and entries, to end up with this: (modulo
1836 # whitespace between columns)
1837 #
1838 # MAIN MAIN <built-in> 0
1839 # readPrec Main Main_1.hs:7:13-16 1
1840 # readPrec Main Main_1.hs:4:13-16 1
1841 # == Main Main_1.hs:7:25-26 1
1842 # == Main Main_1.hs:4:25-26 1
1843 # showsPrec Main Main_1.hs:7:19-22 2
1844 # showsPrec Main Main_1.hs:4:19-22 2
1845 # readPrec Main Main_1.hs:7:13-16 0
1846 # readPrec Main Main_1.hs:4:13-16 0
1847
1848 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1849 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1850 # this works fine.
1851 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1852 '\\1 \\2 \\3 \\5\n', str)
1853 return str
1854
1855 def normalise_slashes_( str ):
1856 str = re.sub('\\\\', '/', str)
1857 str = re.sub('//', '/', str)
1858 return str
1859
1860 def normalise_exe_( str ):
1861 str = re.sub('\.exe', '', str)
1862 return str
1863
1864 def normalise_output( str ):
1865 # remove " error:" and lower-case " Warning:" to make patch for
1866 # trac issue #10021 smaller
1867 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1868 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1869 # Remove a .exe extension (for Windows)
1870 # This can occur in error messages generated by the program.
1871 str = re.sub('([^\\s])\\.exe', '\\1', str)
1872 str = normalise_callstacks(str)
1873 str = normalise_type_reps(str)
1874 return str
1875
1876 def normalise_asm( str ):
1877 lines = str.split('\n')
1878 # Only keep instructions and labels not starting with a dot.
1879 metadata = re.compile('^[ \t]*\\..*$')
1880 out = []
1881 for line in lines:
1882 # Drop metadata directives (e.g. ".type")
1883 if not metadata.match(line):
1884 line = re.sub('@plt', '', line)
1885 instr = line.lstrip().split()
1886 # Drop empty lines.
1887 if not instr:
1888 continue
1889 # Drop operands, except for call instructions.
1890 elif instr[0] == 'call':
1891 out.append(instr[0] + ' ' + instr[1])
1892 else:
1893 out.append(instr[0])
1894 out = '\n'.join(out)
1895 return out
1896
1897 def if_verbose( n, s ):
1898 if config.verbose >= n:
1899 print(s)
1900
1901 def dump_file(f):
1902 try:
1903 with io.open(f) as file:
1904 print(file.read())
1905 except Exception:
1906 print('')
1907
1908 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1909 timeout_prog = strip_quotes(config.timeout_prog)
1910 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1911
1912 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1913 cmd = cmd.format(**config.__dict__)
1914 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1915
1916 stdin_file = io.open(stdin, 'rb') if stdin else None
1917 stdout_buffer = b''
1918 stderr_buffer = b''
1919
1920 hStdErr = subprocess.PIPE
1921 if stderr is subprocess.STDOUT:
1922 hStdErr = subprocess.STDOUT
1923
1924 try:
1925 # cmd is a complex command in Bourne-shell syntax
1926 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1927 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1928 # to invoke the Bourne shell
1929
1930 r = subprocess.Popen([timeout_prog, timeout, cmd],
1931 stdin=stdin_file,
1932 stdout=subprocess.PIPE,
1933 stderr=hStdErr,
1934 env=ghc_env)
1935
1936 stdout_buffer, stderr_buffer = r.communicate()
1937 finally:
1938 if stdin_file:
1939 stdin_file.close()
1940 if config.verbose >= 1 and print_output:
1941 if stdout_buffer:
1942 sys.stdout.buffer.write(stdout_buffer)
1943 if stderr_buffer:
1944 sys.stderr.buffer.write(stderr_buffer)
1945
1946 if stdout:
1947 with io.open(stdout, 'wb') as f:
1948 f.write(stdout_buffer)
1949 if stderr:
1950 if stderr is not subprocess.STDOUT:
1951 with io.open(stderr, 'wb') as f:
1952 f.write(stderr_buffer)
1953
1954 if r.returncode == 98:
1955 # The python timeout program uses 98 to signal that ^C was pressed
1956 stopNow()
1957 if r.returncode == 99 and getTestOpts().exit_code != 99:
1958 # Only print a message when timeout killed the process unexpectedly.
1959 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1960 return r.returncode
1961
1962 # -----------------------------------------------------------------------------
1963 # checking if ghostscript is available for checking the output of hp2ps
1964
1965 def genGSCmd(psfile):
1966 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1967
1968 def gsNotWorking():
1969 global gs_working
1970 print("GhostScript not available for hp2ps tests")
1971
1972 global gs_working
1973 gs_working = False
1974 if config.have_profiling:
1975 if config.gs != '':
1976 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1977 if resultGood == 0:
1978 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1979 ' >/dev/null 2>&1')
1980 if resultBad != 0:
1981 print("GhostScript available for hp2ps tests")
1982 gs_working = True
1983 else:
1984 gsNotWorking();
1985 else:
1986 gsNotWorking();
1987 else:
1988 gsNotWorking();
1989
1990 def add_suffix( name, suffix ):
1991 if suffix == '':
1992 return name
1993 else:
1994 return name + '.' + suffix
1995
1996 def add_hs_lhs_suffix(name):
1997 if getTestOpts().c_src:
1998 return add_suffix(name, 'c')
1999 elif getTestOpts().cmm_src:
2000 return add_suffix(name, 'cmm')
2001 elif getTestOpts().objc_src:
2002 return add_suffix(name, 'm')
2003 elif getTestOpts().objcpp_src:
2004 return add_suffix(name, 'mm')
2005 elif getTestOpts().literate:
2006 return add_suffix(name, 'lhs')
2007 else:
2008 return add_suffix(name, 'hs')
2009
2010 def replace_suffix( name, suffix ):
2011 base, suf = os.path.splitext(name)
2012 return base + '.' + suffix
2013
2014 def in_testdir(name, suffix=''):
2015 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2016
2017 def in_srcdir(name, suffix=''):
2018 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2019
2020 # Finding the sample output. The filename is of the form
2021 #
2022 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
2023 #
2024 def find_expected_file(name, suff):
2025 basename = add_suffix(name, suff)
2026 # Override the basename if the user has specified one, this will then be
2027 # subjected to the same name mangling scheme as normal to allow platform
2028 # specific overrides to work.
2029 basename = getTestOpts().use_specs.get (suff, basename)
2030
2031 files = [basename + ws + plat
2032 for plat in ['-' + config.platform, '-' + config.os, '']
2033 for ws in ['-ws-' + config.wordsize, '']]
2034
2035 for f in files:
2036 if os.path.exists(in_srcdir(f)):
2037 return f
2038
2039 return basename
2040
2041 if config.msys:
2042 import stat
2043 def cleanup():
2044 testdir = getTestOpts().testdir
2045 max_attempts = 5
2046 retries = max_attempts
2047 def on_error(function, path, excinfo):
2048 # At least one test (T11489) removes the write bit from a file it
2049 # produces. Windows refuses to delete read-only files with a
2050 # permission error. Try setting the write bit and try again.
2051 os.chmod(path, stat.S_IWRITE)
2052 function(path)
2053
2054 # On Windows we have to retry the delete a couple of times.
2055 # The reason for this is that a FileDelete command just marks a
2056 # file for deletion. The file is really only removed when the last
2057 # handle to the file is closed. Unfortunately there are a lot of
2058 # system services that can have a file temporarily opened using a shared
2059 # readonly lock, such as the built in AV and search indexer.
2060 #
2061 # We can't really guarantee that these are all off, so what we can do is
2062 # whenever after a rmtree the folder still exists to try again and wait a bit.
2063 #
2064 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2065 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2066 # still locked then abort the current test by throwing an exception, this so it won't fail
2067 # with an even more cryptic error.
2068 #
2069 # See Trac #13162
2070 exception = None
2071 while retries > 0 and os.path.exists(testdir):
2072 time.sleep((max_attempts-retries)*6)
2073 try:
2074 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2075 except Exception as e:
2076 exception = e
2077 retries -= 1
2078
2079 if retries == 0 and os.path.exists(testdir):
2080 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2081 % (testdir, exception))
2082 else:
2083 def cleanup():
2084 testdir = getTestOpts().testdir
2085 if os.path.exists(testdir):
2086 shutil.rmtree(testdir, ignore_errors=False)
2087
2088
2089 # -----------------------------------------------------------------------------
2090 # Return a list of all the files ending in '.T' below directories roots.
2091
2092 def findTFiles(roots):
2093 for root in roots:
2094 for path, dirs, files in os.walk(root, topdown=True):
2095 # Never pick up .T files in uncleaned .run directories.
2096 dirs[:] = [dir for dir in sorted(dirs)
2097 if not dir.endswith(testdir_suffix)]
2098 for filename in files:
2099 if filename.endswith('.T'):
2100 yield os.path.join(path, filename)
2101
2102 # -----------------------------------------------------------------------------
2103 # Output a test summary to the specified file object
2104
2105 def summary(t, file, short=False, color=False):
2106
2107 file.write('\n')
2108 printUnexpectedTests(file,
2109 [t.unexpected_passes, t.unexpected_failures,
2110 t.unexpected_stat_failures, t.framework_failures])
2111
2112 if short:
2113 # Only print the list of unexpected tests above.
2114 return
2115
2116 colorize = lambda s: s
2117 if color:
2118 if len(t.unexpected_failures) > 0 or \
2119 len(t.unexpected_stat_failures) > 0 or \
2120 len(t.framework_failures) > 0:
2121 colorize = str_fail
2122 else:
2123 colorize = str_pass
2124
2125 file.write(colorize('SUMMARY') + ' for test run started at '
2126 + time.strftime("%c %Z", t.start_time) + '\n'
2127 + str(datetime.timedelta(seconds=
2128 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2129 + ' spent to go through\n'
2130 + repr(t.total_tests).rjust(8)
2131 + ' total tests, which gave rise to\n'
2132 + repr(t.total_test_cases).rjust(8)
2133 + ' test cases, of which\n'
2134 + repr(t.n_tests_skipped).rjust(8)
2135 + ' were skipped\n'
2136 + '\n'
2137 + repr(len(t.missing_libs)).rjust(8)
2138 + ' had missing libraries\n'
2139 + repr(t.n_expected_passes).rjust(8)
2140 + ' expected passes\n'
2141 + repr(t.n_expected_failures).rjust(8)
2142 + ' expected failures\n'
2143 + '\n'
2144 + repr(len(t.framework_failures)).rjust(8)
2145 + ' caused framework failures\n'
2146 + repr(len(t.framework_warnings)).rjust(8)
2147 + ' caused framework warnings\n'
2148 + repr(len(t.unexpected_passes)).rjust(8)
2149 + ' unexpected passes\n'
2150 + repr(len(t.unexpected_failures)).rjust(8)
2151 + ' unexpected failures\n'
2152 + repr(len(t.unexpected_stat_failures)).rjust(8)
2153 + ' unexpected stat failures\n'
2154 + '\n')
2155
2156 if t.unexpected_passes:
2157 file.write('Unexpected passes:\n')
2158 printTestInfosSummary(file, t.unexpected_passes)
2159
2160 if t.unexpected_failures:
2161 file.write('Unexpected failures:\n')
2162 printTestInfosSummary(file, t.unexpected_failures)
2163
2164 if t.unexpected_stat_failures:
2165 file.write('Unexpected stat failures:\n')
2166 printTestInfosSummary(file, t.unexpected_stat_failures)
2167
2168 if t.framework_failures:
2169 file.write('Framework failures:\n')
2170 printTestInfosSummary(file, t.framework_failures)
2171
2172 if t.framework_warnings:
2173 file.write('Framework warnings:\n')
2174 printTestInfosSummary(file, t.framework_warnings)
2175
2176 if stopping():
2177 file.write('WARNING: Testsuite run was terminated early\n')
2178
2179 def printUnexpectedTests(file, testInfoss):
2180 unexpected = set(result.testname
2181 for testInfos in testInfoss
2182 for result in testInfos
2183 if not result.testname.endswith('.T'))
2184 if unexpected:
2185 file.write('Unexpected results from:\n')
2186 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2187 file.write('\n')
2188
2189 def printTestInfosSummary(file, testInfos):
2190 maxDirLen = max(len(tr.directory) for tr in testInfos)
2191 for result in testInfos:
2192 directory = result.directory.ljust(maxDirLen)
2193 file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
2194 r = result,
2195 directory = directory))
2196 file.write('\n')
2197
2198 def modify_lines(s, f):
2199 s = '\n'.join([f(l) for l in s.splitlines()])
2200 if s and s[-1] != '\n':
2201 # Prevent '\ No newline at end of file' warnings when diffing.
2202 s += '\n'
2203 return s