b637b1992d7afc6c8077a2d3d1a357a7b1442819
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 # -----
193
194 def expect_fail_for( ways ):
195 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
196
197 def _expect_fail_for( name, opts, ways ):
198 opts.expect_fail_for = ways
199
200 def expect_broken( bug ):
201 # This test is a expected not to work due to the indicated trac bug
202 # number.
203 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
204
205 def _expect_broken( name, opts, bug ):
206 record_broken(name, opts, bug)
207 opts.expect = 'fail';
208
209 def expect_broken_for( bug, ways ):
210 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
211
212 def _expect_broken_for( name, opts, bug, ways ):
213 record_broken(name, opts, bug)
214 opts.expect_fail_for = ways
215
216 def record_broken(name, opts, bug):
217 me = (bug, opts.testdir, name)
218 if not me in brokens:
219 brokens.append(me)
220
221 def _expect_pass(way):
222 # Helper function. Not intended for use in .T files.
223 opts = getTestOpts()
224 return opts.expect == 'pass' and way not in opts.expect_fail_for
225
226 # -----
227
228 def omit_ways( ways ):
229 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
230
231 def _omit_ways( name, opts, ways ):
232 opts.omit_ways = ways
233
234 # -----
235
236 def only_ways( ways ):
237 return lambda name, opts, w=ways: _only_ways( name, opts, w )
238
239 def _only_ways( name, opts, ways ):
240 opts.only_ways = ways
241
242 # -----
243
244 def extra_ways( ways ):
245 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
246
247 def _extra_ways( name, opts, ways ):
248 opts.extra_ways = ways
249
250 # -----
251
252 def set_stdin( file ):
253 return lambda name, opts, f=file: _set_stdin(name, opts, f);
254
255 def _set_stdin( name, opts, f ):
256 opts.stdin = f
257
258 # -----
259
260 def exit_code( val ):
261 return lambda name, opts, v=val: _exit_code(name, opts, v);
262
263 def _exit_code( name, opts, v ):
264 opts.exit_code = v
265
266 def signal_exit_code( val ):
267 if opsys('solaris2'):
268 return exit_code( val )
269 else:
270 # When application running on Linux receives fatal error
271 # signal, then its exit code is encoded as 128 + signal
272 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
273 # I assume that Mac OS X behaves in the same way at least Mac
274 # OS X builder behavior suggests this.
275 return exit_code( val+128 )
276
277 # -----
278
279 def compile_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
281
282 def _compile_timeout_multiplier( name, opts, v ):
283 opts.compile_timeout_multiplier = v
284
285 def run_timeout_multiplier( val ):
286 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
287
288 def _run_timeout_multiplier( name, opts, v ):
289 opts.run_timeout_multiplier = v
290
291 # -----
292
293 def extra_run_opts( val ):
294 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
295
296 def _extra_run_opts( name, opts, v ):
297 opts.extra_run_opts = v
298
299 # -----
300
301 def extra_hc_opts( val ):
302 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
303
304 def _extra_hc_opts( name, opts, v ):
305 opts.extra_hc_opts = v
306
307 # -----
308
309 def extra_clean( files ):
310 # TODO. Remove all calls to extra_clean.
311 return lambda _name, _opts: None
312
313 def extra_files(files):
314 return lambda name, opts: _extra_files(name, opts, files)
315
316 def _extra_files(name, opts, files):
317 opts.extra_files.extend(files)
318
319 # -----
320
321 # Defaults to "test everything, and only break on extreme cases"
322 #
323 # The inputs to this function are slightly interesting:
324 # metric can be either:
325 # - 'all', in which case all 3 possible metrics are collected and compared.
326 # - The specific metric one wants to use in the test.
327 # - A list of the metrics one wants to use in the test.
328 #
329 # Deviation defaults to 20% because the goal is correctness over performance.
330 # The testsuite should avoid breaking when there is not an actual error.
331 # Instead, the testsuite should notify of regressions in a non-breaking manner.
332 #
333 # collect_compiler_stats is used when the metrics collected are about the compiler.
334 # collect_stats is used in the majority case when the metrics to be collected
335 # are about the performance of the runtime code generated by the compiler.
336 def collect_compiler_stats(metric='all',deviation=20):
337 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
338
339 def collect_stats(metric='all', deviation=20):
340 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
341
342 def testing_metrics():
343 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
344
345 # This is an internal function that is used only in the implementation.
346 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
347 # If the boolean is set to true, it indicates that this test is one that
348 # measures the performance numbers of the compiler.
349 # As this is a fairly rare case in the testsuite, it defaults to false to
350 # indicate that it is a 'normal' performance test.
351 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
352 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
353 failBecause('This test has an invalid name.')
354
355 # Normalize metrics to a list of strings.
356 if isinstance(metrics, str):
357 if metrics == 'all':
358 metrics = testing_metrics()
359 else:
360 metrics = [metrics]
361
362 opts.is_stats_test = True
363 if is_compiler_stats_test:
364 opts.is_compiler_stats_test = True
365
366 # Compiler performance numbers change when debugging is on, making the results
367 # useless and confusing. Therefore, skip if debugging is on.
368 if config.compiler_debugged and is_compiler_stats_test:
369 opts.skip = 1
370
371 for metric in metrics:
372 baselineByWay = lambda way, target_commit: Perf.baseline_metric( \
373 target_commit, name, config.test_env, metric, way)
374
375 opts.stats_range_fields[metric] = (baselineByWay, deviation)
376
377 # -----
378
379 def when(b, f):
380 # When list_brokens is on, we want to see all expect_broken calls,
381 # so we always do f
382 if b or config.list_broken:
383 return f
384 else:
385 return normal
386
387 def unless(b, f):
388 return when(not b, f)
389
390 def doing_ghci():
391 return 'ghci' in config.run_ways
392
393 def ghc_dynamic():
394 return config.ghc_dynamic
395
396 def fast():
397 return config.speed == 2
398
399 def platform( plat ):
400 return config.platform == plat
401
402 def opsys( os ):
403 return config.os == os
404
405 def arch( arch ):
406 return config.arch == arch
407
408 def wordsize( ws ):
409 return config.wordsize == str(ws)
410
411 def msys( ):
412 return config.msys
413
414 def cygwin( ):
415 return config.cygwin
416
417 def have_vanilla( ):
418 return config.have_vanilla
419
420 def have_ncg( ):
421 return config.have_ncg
422
423 def have_dynamic( ):
424 return config.have_dynamic
425
426 def have_profiling( ):
427 return config.have_profiling
428
429 def in_tree_compiler( ):
430 return config.in_tree_compiler
431
432 def unregisterised( ):
433 return config.unregisterised
434
435 def compiler_profiled( ):
436 return config.compiler_profiled
437
438 def compiler_debugged( ):
439 return config.compiler_debugged
440
441 def have_gdb( ):
442 return config.have_gdb
443
444 def have_readelf( ):
445 return config.have_readelf
446
447 # ---
448
449 def high_memory_usage(name, opts):
450 opts.alone = True
451
452 # If a test is for a multi-CPU race, then running the test alone
453 # increases the chance that we'll actually see it.
454 def multi_cpu_race(name, opts):
455 opts.alone = True
456
457 # ---
458 def literate( name, opts ):
459 opts.literate = True
460
461 def c_src( name, opts ):
462 opts.c_src = True
463
464 def objc_src( name, opts ):
465 opts.objc_src = True
466
467 def objcpp_src( name, opts ):
468 opts.objcpp_src = True
469
470 def cmm_src( name, opts ):
471 opts.cmm_src = True
472
473 def outputdir( odir ):
474 return lambda name, opts, d=odir: _outputdir(name, opts, d)
475
476 def _outputdir( name, opts, odir ):
477 opts.outputdir = odir;
478
479 # ----
480
481 def pre_cmd( cmd ):
482 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
483
484 def _pre_cmd( name, opts, cmd ):
485 opts.pre_cmd = cmd
486
487 # ----
488
489 def cmd_prefix( prefix ):
490 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
491
492 def _cmd_prefix( name, opts, prefix ):
493 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
494
495 # ----
496
497 def cmd_wrapper( fun ):
498 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
499
500 def _cmd_wrapper( name, opts, fun ):
501 opts.cmd_wrapper = fun
502
503 # ----
504
505 def compile_cmd_prefix( prefix ):
506 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
507
508 def _compile_cmd_prefix( name, opts, prefix ):
509 opts.compile_cmd_prefix = prefix
510
511 # ----
512
513 def check_stdout( f ):
514 return lambda name, opts, f=f: _check_stdout(name, opts, f)
515
516 def _check_stdout( name, opts, f ):
517 opts.check_stdout = f
518
519 def no_check_hp(name, opts):
520 opts.check_hp = False
521
522 # ----
523
524 def filter_stdout_lines( regex ):
525 """ Filter lines of stdout with the given regular expression """
526 def f( name, opts ):
527 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
528 return f
529
530 def normalise_slashes( name, opts ):
531 _normalise_fun(name, opts, normalise_slashes_)
532
533 def normalise_exe( name, opts ):
534 _normalise_fun(name, opts, normalise_exe_)
535
536 def normalise_fun( *fs ):
537 return lambda name, opts: _normalise_fun(name, opts, fs)
538
539 def _normalise_fun( name, opts, *fs ):
540 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
541
542 def normalise_errmsg_fun( *fs ):
543 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
544
545 def _normalise_errmsg_fun( name, opts, *fs ):
546 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
547
548 def check_errmsg(needle):
549 def norm(str):
550 if needle in str:
551 return "%s contained in -ddump-simpl\n" % needle
552 else:
553 return "%s not contained in -ddump-simpl\n" % needle
554 return normalise_errmsg_fun(norm)
555
556 def grep_errmsg(needle):
557 def norm(str):
558 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
559 return normalise_errmsg_fun(norm)
560
561 def normalise_whitespace_fun(f):
562 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
563
564 def _normalise_whitespace_fun(name, opts, f):
565 opts.whitespace_normaliser = f
566
567 def normalise_version_( *pkgs ):
568 def normalise_version__( str ):
569 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
570 '\\1-<VERSION>', str)
571 return normalise_version__
572
573 def normalise_version( *pkgs ):
574 def normalise_version__( name, opts ):
575 _normalise_fun(name, opts, normalise_version_(*pkgs))
576 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
577 return normalise_version__
578
579 def normalise_drive_letter(name, opts):
580 # Windows only. Change D:\\ to C:\\.
581 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
582
583 def keep_prof_callstacks(name, opts):
584 """Keep profiling callstacks.
585
586 Use together with `only_ways(prof_ways)`.
587 """
588 opts.keep_prof_callstacks = True
589
590 def join_normalisers(*a):
591 """
592 Compose functions, flattening sequences.
593
594 join_normalisers(f1,[f2,f3],f4)
595
596 is the same as
597
598 lambda x: f1(f2(f3(f4(x))))
599 """
600
601 def flatten(l):
602 """
603 Taken from http://stackoverflow.com/a/2158532/946226
604 """
605 for el in l:
606 if (isinstance(el, collections.Iterable)
607 and not isinstance(el, (bytes, str))):
608 for sub in flatten(el):
609 yield sub
610 else:
611 yield el
612
613 a = flatten(a)
614
615 fn = lambda x:x # identity function
616 for f in a:
617 assert callable(f)
618 fn = lambda x,f=f,fn=fn: fn(f(x))
619 return fn
620
621 # ----
622 # Function for composing two opt-fns together
623
624 def executeSetups(fs, name, opts):
625 if type(fs) is list:
626 # If we have a list of setups, then execute each one
627 for f in fs:
628 executeSetups(f, name, opts)
629 else:
630 # fs is a single function, so just apply it
631 fs(name, opts)
632
633 # -----------------------------------------------------------------------------
634 # The current directory of tests
635
636 def newTestDir(tempdir, dir):
637
638 global thisdir_settings
639 # reset the options for this test directory
640 def settings(name, opts, tempdir=tempdir, dir=dir):
641 return _newTestDir(name, opts, tempdir, dir)
642 thisdir_settings = settings
643
644 # Should be equal to entry in toplevel .gitignore.
645 testdir_suffix = '.run'
646
647 def _newTestDir(name, opts, tempdir, dir):
648 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
649 opts.srcdir = os.path.join(os.getcwd(), dir)
650 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
651 opts.compiler_always_flags = config.compiler_always_flags
652
653 # -----------------------------------------------------------------------------
654 # Actually doing tests
655
656 parallelTests = []
657 aloneTests = []
658 allTestNames = set([])
659
660 def runTest(watcher, opts, name, func, args):
661 if config.use_threads:
662 pool_sema.acquire()
663 t = threading.Thread(target=test_common_thread,
664 name=name,
665 args=(watcher, name, opts, func, args))
666 t.daemon = False
667 t.start()
668 else:
669 test_common_work(watcher, name, opts, func, args)
670
671 # name :: String
672 # setup :: [TestOpt] -> IO ()
673 def test(name, setup, func, args):
674 global aloneTests
675 global parallelTests
676 global allTestNames
677 global thisdir_settings
678 if name in allTestNames:
679 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
680 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
681 framework_fail(name, 'bad_name', 'This test has an invalid name')
682
683 if config.run_only_some_tests:
684 if name not in config.only:
685 return
686 else:
687 # Note [Mutating config.only]
688 # config.only is initially the set of tests requested by
689 # the user (via 'make TEST='). We then remove all tests that
690 # we've already seen (in .T files), so that we can later
691 # report on any tests we couldn't find and error out.
692 config.only.remove(name)
693
694 # Make a deep copy of the default_testopts, as we need our own copy
695 # of any dictionaries etc inside it. Otherwise, if one test modifies
696 # them, all tests will see the modified version!
697 myTestOpts = copy.deepcopy(default_testopts)
698
699 executeSetups([thisdir_settings, setup], name, myTestOpts)
700
701 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
702 if myTestOpts.alone:
703 aloneTests.append(thisTest)
704 else:
705 parallelTests.append(thisTest)
706 allTestNames.add(name)
707
708 if config.use_threads:
709 def test_common_thread(watcher, name, opts, func, args):
710 try:
711 test_common_work(watcher, name, opts, func, args)
712 finally:
713 pool_sema.release()
714
715 def get_package_cache_timestamp():
716 if config.package_conf_cache_file == '':
717 return 0.0
718 else:
719 try:
720 return os.stat(config.package_conf_cache_file).st_mtime
721 except:
722 return 0.0
723
724 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
725
726 def test_common_work(watcher, name, opts, func, args):
727 try:
728 t.total_tests += 1
729 setLocalTestOpts(opts)
730
731 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
732
733 # All the ways we might run this test
734 if func == compile or func == multimod_compile:
735 all_ways = config.compile_ways
736 elif func == compile_and_run or func == multimod_compile_and_run:
737 all_ways = config.run_ways
738 elif func == ghci_script:
739 if 'ghci' in config.run_ways:
740 all_ways = ['ghci']
741 else:
742 all_ways = []
743 else:
744 all_ways = ['normal']
745
746 # A test itself can request extra ways by setting opts.extra_ways
747 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
748
749 t.total_test_cases += len(all_ways)
750
751 ok_way = lambda way: \
752 not getTestOpts().skip \
753 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
754 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
755 and (not (config.skip_perf_tests and isStatsTest())) \
756 and (not (config.only_perf_tests and not isStatsTest())) \
757 and way not in getTestOpts().omit_ways
758
759 # Which ways we are asked to skip
760 do_ways = list(filter (ok_way,all_ways))
761
762 # Only run all ways in slow mode.
763 # See Note [validate and testsuite speed] in toplevel Makefile.
764 if config.accept:
765 # Only ever run one way
766 do_ways = do_ways[:1]
767 elif config.speed > 0:
768 # However, if we EXPLICITLY asked for a way (with extra_ways)
769 # please test it!
770 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
771 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
772 do_ways = other_ways[:1] + explicit_ways
773
774 # Find all files in the source directory that this test
775 # depends on. Do this only once for all ways.
776 # Generously add all filenames that start with the name of
777 # the test to this set, as a convenience to test authors.
778 # They will have to use the `extra_files` setup function to
779 # specify all other files that their test depends on (but
780 # this seems to be necessary for only about 10% of all
781 # tests).
782 files = set(f for f in os.listdir(opts.srcdir)
783 if f.startswith(name) and not f == name and
784 not f.endswith(testdir_suffix) and
785 not os.path.splitext(f)[1] in do_not_copy)
786 for filename in (opts.extra_files + extra_src_files.get(name, [])):
787 if filename.startswith('/'):
788 framework_fail(name, 'whole-test',
789 'no absolute paths in extra_files please: ' + filename)
790
791 elif '*' in filename:
792 # Don't use wildcards in extra_files too much, as
793 # globbing is slow.
794 files.update((os.path.relpath(f, opts.srcdir)
795 for f in glob.iglob(in_srcdir(filename))))
796
797 elif filename:
798 files.add(filename)
799
800 else:
801 framework_fail(name, 'whole-test', 'extra_file is empty string')
802
803 # Run the required tests...
804 for way in do_ways:
805 if stopping():
806 break
807 try:
808 do_test(name, way, func, args, files)
809 except KeyboardInterrupt:
810 stopNow()
811 except Exception as e:
812 framework_fail(name, way, str(e))
813 traceback.print_exc()
814
815 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
816
817 if config.cleanup and do_ways:
818 try:
819 cleanup()
820 except Exception as e:
821 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
822
823 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
824
825 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
826 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
827
828 except Exception as e:
829 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
830 finally:
831 watcher.notify()
832
833 def do_test(name, way, func, args, files):
834 opts = getTestOpts()
835
836 full_name = name + '(' + way + ')'
837
838 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
839 full_name, t.total_tests, len(allTestNames),
840 [len(t.unexpected_passes),
841 len(t.unexpected_failures),
842 len(t.framework_failures)]))
843
844 # Clean up prior to the test, so that we can't spuriously conclude
845 # that it passed on the basis of old run outputs.
846 cleanup()
847 os.makedirs(opts.testdir)
848
849 # Link all source files for this test into a new directory in
850 # /tmp, and run the test in that directory. This makes it
851 # possible to run tests in parallel, without modification, that
852 # would otherwise (accidentally) write to the same output file.
853 # It also makes it easier to keep the testsuite clean.
854
855 for extra_file in files:
856 src = in_srcdir(extra_file)
857 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
858 if os.path.isfile(src):
859 link_or_copy_file(src, dst)
860 elif os.path.isdir(src):
861 if os.path.exists(dst):
862 shutil.rmtree(dst)
863 os.mkdir(dst)
864 lndir(src, dst)
865 else:
866 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
867 # When using a ghc built without haddock support, .t
868 # files are rightfully missing. Don't
869 # framework_fail. Test will be skipped later.
870 pass
871 else:
872 framework_fail(name, way,
873 'extra_file does not exist: ' + extra_file)
874
875 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
876 # When running 'MAKE' make sure 'TOP' still points to the
877 # root of the testsuite.
878 src_makefile = in_srcdir('Makefile')
879 dst_makefile = in_testdir('Makefile')
880 if os.path.exists(src_makefile):
881 with io.open(src_makefile, 'r', encoding='utf8') as src:
882 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
883 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
884 dst.write(makefile)
885
886 if opts.pre_cmd:
887 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
888 stderr = subprocess.STDOUT,
889 print_output = config.verbose >= 3)
890
891 # If user used expect_broken then don't record failures of pre_cmd
892 if exit_code != 0 and opts.expect not in ['fail']:
893 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
894 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
895
896 result = func(*[name,way] + args)
897
898 if opts.expect not in ['pass', 'fail', 'missing-lib']:
899 framework_fail(name, way, 'bad expected ' + opts.expect)
900
901 try:
902 passFail = result['passFail']
903 except (KeyError, TypeError):
904 passFail = 'No passFail found'
905
906 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
907
908 if passFail == 'pass':
909 if _expect_pass(way):
910 t.expected_passes.append((directory, name, way))
911 t.n_expected_passes += 1
912 else:
913 if_verbose(1, '*** unexpected pass for %s' % full_name)
914 t.unexpected_passes.append((directory, name, 'unexpected', way))
915 elif passFail == 'fail':
916 if _expect_pass(way):
917 reason = result['reason']
918 tag = result.get('tag')
919 if tag == 'stat':
920 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
921 t.unexpected_stat_failures.append((directory, name, reason, way))
922 else:
923 if_verbose(1, '*** unexpected failure for %s' % full_name)
924 t.unexpected_failures.append((directory, name, reason, way))
925 else:
926 if opts.expect == 'missing-lib':
927 t.missing_libs.append((directory, name, 'missing-lib', way))
928 else:
929 t.n_expected_failures += 1
930 else:
931 framework_fail(name, way, 'bad result ' + passFail)
932
933 # Make is often invoked with -s, which means if it fails, we get
934 # no feedback at all. This is annoying. So let's remove the option
935 # if found and instead have the testsuite decide on what to do
936 # with the output.
937 def override_options(pre_cmd):
938 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
939 return pre_cmd.replace('-s' , '') \
940 .replace('--silent', '') \
941 .replace('--quiet' , '')
942
943 return pre_cmd
944
945 def framework_fail(name, way, reason):
946 opts = getTestOpts()
947 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
948 full_name = name + '(' + way + ')'
949 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
950 t.framework_failures.append((directory, name, way, reason))
951
952 def framework_warn(name, way, reason):
953 opts = getTestOpts()
954 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
955 full_name = name + '(' + way + ')'
956 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
957 t.framework_warnings.append((directory, name, way, reason))
958
959 def badResult(result):
960 try:
961 if result['passFail'] == 'pass':
962 return False
963 return True
964 except (KeyError, TypeError):
965 return True
966
967 # -----------------------------------------------------------------------------
968 # Generic command tests
969
970 # A generic command test is expected to run and exit successfully.
971 #
972 # The expected exit code can be changed via exit_code() as normal, and
973 # the expected stdout/stderr are stored in <testname>.stdout and
974 # <testname>.stderr. The output of the command can be ignored
975 # altogether by using the setup function ignore_stdout instead of
976 # run_command.
977
978 def run_command( name, way, cmd ):
979 return simple_run( name, '', override_options(cmd), '' )
980
981 def makefile_test( name, way, target=None ):
982 if target is None:
983 target = name
984
985 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
986 return run_command(name, way, cmd)
987
988 # -----------------------------------------------------------------------------
989 # GHCi tests
990
991 def ghci_script( name, way, script):
992 flags = ' '.join(get_compiler_flags())
993 way_flags = ' '.join(config.way_flags[way])
994
995 # We pass HC and HC_OPTS as environment variables, so that the
996 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
997 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
998 ).format(flags=flags, way_flags=way_flags)
999 # NB: put way_flags before flags so that flags in all.T can overrie others
1000
1001 getTestOpts().stdin = script
1002 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1003
1004 # -----------------------------------------------------------------------------
1005 # Compile-only tests
1006
1007 def compile( name, way, extra_hc_opts ):
1008 return do_compile( name, way, 0, '', [], extra_hc_opts )
1009
1010 def compile_fail( name, way, extra_hc_opts ):
1011 return do_compile( name, way, 1, '', [], extra_hc_opts )
1012
1013 def backpack_typecheck( name, way, extra_hc_opts ):
1014 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1015
1016 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1017 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1018
1019 def backpack_compile( name, way, extra_hc_opts ):
1020 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1021
1022 def backpack_compile_fail( name, way, extra_hc_opts ):
1023 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1024
1025 def backpack_run( name, way, extra_hc_opts ):
1026 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1027
1028 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1029 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1030
1031 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1032 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1033
1034 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1035 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1036
1037 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1038 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1039
1040 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1041 # print 'Compile only, extra args = ', extra_hc_opts
1042
1043 result = extras_build( way, extra_mods, extra_hc_opts )
1044 if badResult(result):
1045 return result
1046 extra_hc_opts = result['hc_opts']
1047
1048 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1049
1050 if badResult(result):
1051 return result
1052
1053 # the actual stderr should always match the expected, regardless
1054 # of whether we expected the compilation to fail or not (successful
1055 # compilations may generate warnings).
1056
1057 expected_stderr_file = find_expected_file(name, 'stderr')
1058 actual_stderr_file = add_suffix(name, 'comp.stderr')
1059
1060 if not compare_outputs(way, 'stderr',
1061 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1062 normalise_errmsg),
1063 expected_stderr_file, actual_stderr_file,
1064 whitespace_normaliser=getattr(getTestOpts(),
1065 "whitespace_normaliser",
1066 normalise_whitespace)):
1067 return failBecause('stderr mismatch')
1068
1069 # no problems found, this test passed
1070 return passed()
1071
1072 def compile_cmp_asm( name, way, extra_hc_opts ):
1073 print('Compile only, extra args = ', extra_hc_opts)
1074 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1075
1076 if badResult(result):
1077 return result
1078
1079 # the actual stderr should always match the expected, regardless
1080 # of whether we expected the compilation to fail or not (successful
1081 # compilations may generate warnings).
1082
1083 expected_asm_file = find_expected_file(name, 'asm')
1084 actual_asm_file = add_suffix(name, 's')
1085
1086 if not compare_outputs(way, 'asm',
1087 join_normalisers(normalise_errmsg, normalise_asm),
1088 expected_asm_file, actual_asm_file):
1089 return failBecause('asm mismatch')
1090
1091 # no problems found, this test passed
1092 return passed()
1093
1094 # -----------------------------------------------------------------------------
1095 # Compile-and-run tests
1096
1097 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1098 # print 'Compile and run, extra args = ', extra_hc_opts
1099
1100 result = extras_build( way, extra_mods, extra_hc_opts )
1101 if badResult(result):
1102 return result
1103 extra_hc_opts = result['hc_opts']
1104
1105 if way.startswith('ghci'): # interpreted...
1106 return interpreter_run(name, way, extra_hc_opts, top_mod)
1107 else: # compiled...
1108 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1109 if badResult(result):
1110 return result
1111
1112 cmd = './' + name;
1113
1114 # we don't check the compiler's stderr for a compile-and-run test
1115 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1116
1117 def compile_and_run( name, way, extra_hc_opts ):
1118 return compile_and_run__( name, way, '', [], extra_hc_opts)
1119
1120 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1121 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1122
1123 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1124 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1125
1126 def stats( name, way, stats_file ):
1127 opts = getTestOpts()
1128 return check_stats(name, way, stats_file, opts.stats_range_fields)
1129
1130 def metric_dict(name, way, metric, value):
1131 return Perf.PerfStat(
1132 test_env = config.test_env,
1133 test = name,
1134 way = way,
1135 metric = metric,
1136 value = value)
1137
1138 # -----------------------------------------------------------------------------
1139 # Check test stats. This prints the results for the user.
1140 # name: name of the test.
1141 # way: the way.
1142 # stats_file: the path of the stats_file containing the stats for the test.
1143 # range_fields: see TestOptions.stats_range_fields
1144 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1145 # This prints the results for the user.
1146 def check_stats(name, way, stats_file, range_fields):
1147 head_commit = Perf.commit_hash('HEAD')
1148 result = passed()
1149 if range_fields:
1150 try:
1151 f = open(in_testdir(stats_file))
1152 except IOError as e:
1153 return failBecause(str(e))
1154 stats_file_contents = f.read()
1155 f.close()
1156
1157 for (metric, baseline_and_dev) in range_fields.items():
1158 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1159 if field_match == None:
1160 print('Failed to find metric: ', metric)
1161 metric_result = failBecause('no such stats metric')
1162 else:
1163 actual_val = int(field_match.group(1))
1164
1165 # Store the metric so it can later be stored in a git note.
1166 perf_stat = metric_dict(name, way, metric, actual_val)
1167 change = None
1168
1169 # If this is the first time running the benchmark, then pass.
1170 baseline = baseline_and_dev[0](way, head_commit)
1171 if baseline == None:
1172 metric_result = passed()
1173 change = MetricChange.NewMetric
1174 else:
1175 tolerance_dev = baseline_and_dev[1]
1176 (change, metric_result) = Perf.check_stats_change(
1177 perf_stat,
1178 baseline,
1179 tolerance_dev,
1180 config.allowed_perf_changes,
1181 config.verbose >= 4)
1182 t.metrics.append((change, perf_stat))
1183
1184 # If any metric fails then the test fails.
1185 # Note, the remaining metrics are still run so that
1186 # a complete list of changes can be presented to the user.
1187 if metric_result['passFail'] == 'fail':
1188 result = metric_result
1189
1190 return result
1191
1192 # -----------------------------------------------------------------------------
1193 # Build a single-module program
1194
1195 def extras_build( way, extra_mods, extra_hc_opts ):
1196 for mod, opts in extra_mods:
1197 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1198 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1199 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1200 if badResult(result):
1201 return result
1202
1203 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1204
1205 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1206 opts = getTestOpts()
1207
1208 # Redirect stdout and stderr to the same file
1209 stdout = in_testdir(name, 'comp.stderr')
1210 stderr = subprocess.STDOUT
1211
1212 if top_mod != '':
1213 srcname = top_mod
1214 elif addsuf:
1215 if backpack:
1216 srcname = add_suffix(name, 'bkp')
1217 else:
1218 srcname = add_hs_lhs_suffix(name)
1219 else:
1220 srcname = name
1221
1222 if top_mod != '':
1223 to_do = '--make '
1224 if link:
1225 to_do = to_do + '-o ' + name
1226 elif backpack:
1227 if link:
1228 to_do = '-o ' + name + ' '
1229 else:
1230 to_do = ''
1231 to_do = to_do + '--backpack '
1232 elif link:
1233 to_do = '-o ' + name
1234 else:
1235 to_do = '-c' # just compile
1236
1237 stats_file = name + '.comp.stats'
1238 if isCompilerStatsTest():
1239 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1240 if backpack:
1241 extra_hc_opts += ' -outputdir ' + name + '.out'
1242
1243 # Required by GHC 7.3+, harmless for earlier versions:
1244 if (getTestOpts().c_src or
1245 getTestOpts().objc_src or
1246 getTestOpts().objcpp_src or
1247 getTestOpts().cmm_src):
1248 extra_hc_opts += ' -no-hs-main '
1249
1250 if getTestOpts().compile_cmd_prefix == '':
1251 cmd_prefix = ''
1252 else:
1253 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1254
1255 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1256
1257 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1258 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1259 ).format(**locals())
1260
1261 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1262
1263 if exit_code != 0 and not should_fail:
1264 if config.verbose >= 1 and _expect_pass(way):
1265 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1266 actual_stderr_path = in_testdir(name, 'comp.stderr')
1267 dump_file(actual_stderr_path)
1268
1269 # ToDo: if the sub-shell was killed by ^C, then exit
1270
1271 if isCompilerStatsTest():
1272 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1273 if badResult(statsResult):
1274 return statsResult
1275
1276 if should_fail:
1277 if exit_code == 0:
1278 return failBecause('exit code 0')
1279 else:
1280 if exit_code != 0:
1281 return failBecause('exit code non-0')
1282
1283 return passed()
1284
1285 # -----------------------------------------------------------------------------
1286 # Run a program and check its output
1287 #
1288 # If testname.stdin exists, route input from that, else
1289 # from /dev/null. Route output to testname.run.stdout and
1290 # testname.run.stderr. Returns the exit code of the run.
1291
1292 def simple_run(name, way, prog, extra_run_opts):
1293 opts = getTestOpts()
1294
1295 # figure out what to use for stdin
1296 if opts.stdin:
1297 stdin = in_testdir(opts.stdin)
1298 elif os.path.exists(in_testdir(name, 'stdin')):
1299 stdin = in_testdir(name, 'stdin')
1300 else:
1301 stdin = None
1302
1303 stdout = in_testdir(name, 'run.stdout')
1304 if opts.combined_output:
1305 stderr = subprocess.STDOUT
1306 else:
1307 stderr = in_testdir(name, 'run.stderr')
1308
1309 my_rts_flags = rts_flags(way)
1310
1311 # Collect stats if necessary:
1312 # isStatsTest and not isCompilerStatsTest():
1313 # assume we are running a ghc compiled program. Collect stats.
1314 # isStatsTest and way == 'ghci':
1315 # assume we are running a program via ghci. Collect stats
1316 stats_file = name + '.stats'
1317 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1318 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1319 else:
1320 stats_args = ''
1321
1322 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1323 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1324
1325 if opts.cmd_wrapper != None:
1326 cmd = opts.cmd_wrapper(cmd)
1327
1328 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1329
1330 # run the command
1331 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1332
1333 # check the exit code
1334 if exit_code != opts.exit_code:
1335 if config.verbose >= 1 and _expect_pass(way):
1336 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1337 dump_stdout(name)
1338 dump_stderr(name)
1339 return failBecause('bad exit code')
1340
1341 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1342 return failBecause('bad stderr')
1343 if not (opts.ignore_stdout or stdout_ok(name, way)):
1344 return failBecause('bad stdout')
1345
1346 check_hp = '-h' in my_rts_flags and opts.check_hp
1347 check_prof = '-p' in my_rts_flags
1348
1349 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1350 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1351 return failBecause('bad heap profile')
1352 if check_prof and not check_prof_ok(name, way):
1353 return failBecause('bad profile')
1354
1355 return check_stats(name, way, stats_file, opts.stats_range_fields)
1356
1357 def rts_flags(way):
1358 args = config.way_rts_flags.get(way, [])
1359 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1360
1361 # -----------------------------------------------------------------------------
1362 # Run a program in the interpreter and check its output
1363
1364 def interpreter_run(name, way, extra_hc_opts, top_mod):
1365 opts = getTestOpts()
1366
1367 stdout = in_testdir(name, 'interp.stdout')
1368 stderr = in_testdir(name, 'interp.stderr')
1369 script = in_testdir(name, 'genscript')
1370
1371 if opts.combined_output:
1372 framework_fail(name, 'unsupported',
1373 'WAY=ghci and combined_output together is not supported')
1374
1375 if (top_mod == ''):
1376 srcname = add_hs_lhs_suffix(name)
1377 else:
1378 srcname = top_mod
1379
1380 delimiter = '===== program output begins here\n'
1381
1382 with io.open(script, 'w', encoding='utf8') as f:
1383 # set the prog name and command-line args to match the compiled
1384 # environment.
1385 f.write(':set prog ' + name + '\n')
1386 f.write(':set args ' + opts.extra_run_opts + '\n')
1387 # Add marker lines to the stdout and stderr output files, so we
1388 # can separate GHCi's output from the program's.
1389 f.write(':! echo ' + delimiter)
1390 f.write(':! echo 1>&2 ' + delimiter)
1391 # Set stdout to be line-buffered to match the compiled environment.
1392 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1393 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1394 # in the event of an exception as for the compiled program.
1395 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1396
1397 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1398 if os.path.exists(stdin):
1399 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1400
1401 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1402
1403 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1404 ).format(**locals())
1405
1406 if getTestOpts().cmd_wrapper != None:
1407 cmd = opts.cmd_wrapper(cmd);
1408
1409 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1410
1411 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1412
1413 # split the stdout into compilation/program output
1414 split_file(stdout, delimiter,
1415 in_testdir(name, 'comp.stdout'),
1416 in_testdir(name, 'run.stdout'))
1417 split_file(stderr, delimiter,
1418 in_testdir(name, 'comp.stderr'),
1419 in_testdir(name, 'run.stderr'))
1420
1421 # check the exit code
1422 if exit_code != getTestOpts().exit_code:
1423 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1424 dump_stdout(name)
1425 dump_stderr(name)
1426 return failBecause('bad exit code')
1427
1428 # ToDo: if the sub-shell was killed by ^C, then exit
1429
1430 if not (opts.ignore_stderr or stderr_ok(name, way)):
1431 return failBecause('bad stderr')
1432 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1433 return failBecause('bad stdout')
1434 else:
1435 return passed()
1436
1437 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1438 # See Note [Universal newlines].
1439 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1440 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1441 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1442 line = infile.readline()
1443 while re.sub('^\s*','',line) != delimiter and line != '':
1444 out1.write(line)
1445 line = infile.readline()
1446
1447 line = infile.readline()
1448 while line != '':
1449 out2.write(line)
1450 line = infile.readline()
1451
1452 # -----------------------------------------------------------------------------
1453 # Utils
1454 def get_compiler_flags():
1455 opts = getTestOpts()
1456
1457 flags = copy.copy(opts.compiler_always_flags)
1458
1459 flags.append(opts.extra_hc_opts)
1460
1461 if opts.outputdir != None:
1462 flags.extend(["-outputdir", opts.outputdir])
1463
1464 return flags
1465
1466 def stdout_ok(name, way):
1467 actual_stdout_file = add_suffix(name, 'run.stdout')
1468 expected_stdout_file = find_expected_file(name, 'stdout')
1469
1470 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1471
1472 check_stdout = getTestOpts().check_stdout
1473 if check_stdout:
1474 actual_stdout_path = in_testdir(actual_stdout_file)
1475 return check_stdout(actual_stdout_path, extra_norm)
1476
1477 return compare_outputs(way, 'stdout', extra_norm,
1478 expected_stdout_file, actual_stdout_file)
1479
1480 def dump_stdout( name ):
1481 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1482 str = f.read().strip()
1483 if str:
1484 print("Stdout (", name, "):")
1485 print(str)
1486
1487 def stderr_ok(name, way):
1488 actual_stderr_file = add_suffix(name, 'run.stderr')
1489 expected_stderr_file = find_expected_file(name, 'stderr')
1490
1491 return compare_outputs(way, 'stderr',
1492 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1493 expected_stderr_file, actual_stderr_file,
1494 whitespace_normaliser=normalise_whitespace)
1495
1496 def dump_stderr( name ):
1497 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1498 str = f.read().strip()
1499 if str:
1500 print("Stderr (", name, "):")
1501 print(str)
1502
1503 def read_no_crs(file):
1504 str = ''
1505 try:
1506 # See Note [Universal newlines].
1507 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1508 str = h.read()
1509 except Exception:
1510 # On Windows, if the program fails very early, it seems the
1511 # files stdout/stderr are redirected to may not get created
1512 pass
1513 return str
1514
1515 def write_file(file, str):
1516 # See Note [Universal newlines].
1517 with io.open(file, 'w', encoding='utf8', newline='') as h:
1518 h.write(str)
1519
1520 # Note [Universal newlines]
1521 #
1522 # We don't want to write any Windows style line endings ever, because
1523 # it would mean that `make accept` would touch every line of the file
1524 # when switching between Linux and Windows.
1525 #
1526 # Furthermore, when reading a file, it is convenient to translate all
1527 # Windows style endings to '\n', as it simplifies searching or massaging
1528 # the content.
1529 #
1530 # Solution: use `io.open` instead of `open`
1531 # * when reading: use newline=None to translate '\r\n' to '\n'
1532 # * when writing: use newline='' to not translate '\n' to '\r\n'
1533 #
1534 # See https://docs.python.org/2/library/io.html#io.open.
1535 #
1536 # This should work with both python2 and python3, and with both mingw*
1537 # as msys2 style Python.
1538 #
1539 # Do note that io.open returns unicode strings. So we have to specify
1540 # the expected encoding. But there is at least one file which is not
1541 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1542 # Another solution would be to open files in binary mode always, and
1543 # operate on bytes.
1544
1545 def check_hp_ok(name):
1546 opts = getTestOpts()
1547
1548 # do not qualify for hp2ps because we should be in the right directory
1549 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1550
1551 hp2psResult = runCmd(hp2psCmd)
1552
1553 actual_ps_path = in_testdir(name, 'ps')
1554
1555 if hp2psResult == 0:
1556 if os.path.exists(actual_ps_path):
1557 if gs_working:
1558 gsResult = runCmd(genGSCmd(actual_ps_path))
1559 if (gsResult == 0):
1560 return (True)
1561 else:
1562 print("hp2ps output for " + name + "is not valid PostScript")
1563 else: return (True) # assume postscript is valid without ghostscript
1564 else:
1565 print("hp2ps did not generate PostScript for " + name)
1566 return (False)
1567 else:
1568 print("hp2ps error when processing heap profile for " + name)
1569 return(False)
1570
1571 def check_prof_ok(name, way):
1572 expected_prof_file = find_expected_file(name, 'prof.sample')
1573 expected_prof_path = in_testdir(expected_prof_file)
1574
1575 # Check actual prof file only if we have an expected prof file to
1576 # compare it with.
1577 if not os.path.exists(expected_prof_path):
1578 return True
1579
1580 actual_prof_file = add_suffix(name, 'prof')
1581 actual_prof_path = in_testdir(actual_prof_file)
1582
1583 if not os.path.exists(actual_prof_path):
1584 print(actual_prof_path + " does not exist")
1585 return(False)
1586
1587 if os.path.getsize(actual_prof_path) == 0:
1588 print(actual_prof_path + " is empty")
1589 return(False)
1590
1591 return compare_outputs(way, 'prof', normalise_prof,
1592 expected_prof_file, actual_prof_file,
1593 whitespace_normaliser=normalise_whitespace)
1594
1595 # Compare expected output to actual output, and optionally accept the
1596 # new output. Returns true if output matched or was accepted, false
1597 # otherwise. See Note [Output comparison] for the meaning of the
1598 # normaliser and whitespace_normaliser parameters.
1599 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1600 whitespace_normaliser=lambda x:x):
1601
1602 expected_path = in_srcdir(expected_file)
1603 actual_path = in_testdir(actual_file)
1604
1605 if os.path.exists(expected_path):
1606 expected_str = normaliser(read_no_crs(expected_path))
1607 # Create the .normalised file in the testdir, not in the srcdir.
1608 expected_normalised_file = add_suffix(expected_file, 'normalised')
1609 expected_normalised_path = in_testdir(expected_normalised_file)
1610 else:
1611 expected_str = ''
1612 expected_normalised_path = '/dev/null'
1613
1614 actual_raw = read_no_crs(actual_path)
1615 actual_str = normaliser(actual_raw)
1616
1617 # See Note [Output comparison].
1618 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1619 return True
1620 else:
1621 if config.verbose >= 1 and _expect_pass(way):
1622 print('Actual ' + kind + ' output differs from expected:')
1623
1624 if expected_normalised_path != '/dev/null':
1625 write_file(expected_normalised_path, expected_str)
1626
1627 actual_normalised_path = add_suffix(actual_path, 'normalised')
1628 write_file(actual_normalised_path, actual_str)
1629
1630 if config.verbose >= 1 and _expect_pass(way):
1631 # See Note [Output comparison].
1632 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1633 actual_normalised_path),
1634 print_output=True)
1635
1636 # If for some reason there were no non-whitespace differences,
1637 # then do a full diff
1638 if r == 0:
1639 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1640 actual_normalised_path),
1641 print_output=True)
1642
1643 if config.accept and (getTestOpts().expect == 'fail' or
1644 way in getTestOpts().expect_fail_for):
1645 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1646 return False
1647 elif config.accept and actual_raw:
1648 if config.accept_platform:
1649 if_verbose(1, 'Accepting new output for platform "'
1650 + config.platform + '".')
1651 expected_path += '-' + config.platform
1652 elif config.accept_os:
1653 if_verbose(1, 'Accepting new output for os "'
1654 + config.os + '".')
1655 expected_path += '-' + config.os
1656 else:
1657 if_verbose(1, 'Accepting new output.')
1658
1659 write_file(expected_path, actual_raw)
1660 return True
1661 elif config.accept:
1662 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1663 os.remove(expected_path)
1664 return True
1665 else:
1666 return False
1667
1668 # Note [Output comparison]
1669 #
1670 # We do two types of output comparison:
1671 #
1672 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1673 # optional `whitespace_normaliser` to the expected and the actual
1674 # output, before comparing the two.
1675 #
1676 # 2. To show as a diff to the user when the test indeed failed. We apply
1677 # the same `normaliser` function to the outputs, to make the diff as
1678 # small as possible (only showing the actual problem). But we don't
1679 # apply the `whitespace_normaliser` here, because it might completely
1680 # squash all whitespace, making the diff unreadable. Instead we rely
1681 # on the `diff` program to ignore whitespace changes as much as
1682 # possible (#10152).
1683
1684 def normalise_whitespace( str ):
1685 # Merge contiguous whitespace characters into a single space.
1686 return ' '.join(str.split())
1687
1688 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1689
1690 def normalise_callstacks(s):
1691 opts = getTestOpts()
1692 def repl(matches):
1693 location = matches.group(1)
1694 location = normalise_slashes_(location)
1695 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1696 # Ignore line number differences in call stacks (#10834).
1697 s = re.sub(callSite_re, repl, s)
1698 # Ignore the change in how we identify implicit call-stacks
1699 s = s.replace('from ImplicitParams', 'from HasCallStack')
1700 if not opts.keep_prof_callstacks:
1701 # Don't output prof callstacks. Test output should be
1702 # independent from the WAY we run the test.
1703 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1704 return s
1705
1706 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1707
1708 def normalise_type_reps(str):
1709 """ Normalise out fingerprints from Typeable TyCon representations """
1710 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1711
1712 def normalise_errmsg( str ):
1713 """Normalise error-messages emitted via stderr"""
1714 # IBM AIX's `ld` is a bit chatty
1715 if opsys('aix'):
1716 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1717 # remove " error:" and lower-case " Warning:" to make patch for
1718 # trac issue #10021 smaller
1719 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1720 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1721 str = normalise_callstacks(str)
1722 str = normalise_type_reps(str)
1723
1724 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1725 # the colon is there because it appears in error messages; this
1726 # hacky solution is used in place of more sophisticated filename
1727 # mangling
1728 str = re.sub('([^\\s])\\.exe', '\\1', str)
1729
1730 # normalise slashes, minimise Windows/Unix filename differences
1731 str = re.sub('\\\\', '/', str)
1732
1733 # The inplace ghc's are called ghc-stage[123] to avoid filename
1734 # collisions, so we need to normalise that to just "ghc"
1735 str = re.sub('ghc-stage[123]', 'ghc', str)
1736
1737 # Error messages sometimes contain integer implementation package
1738 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1739
1740 # Error messages sometimes contain this blurb which can vary
1741 # spuriously depending upon build configuration (e.g. based on integer
1742 # backend)
1743 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1744 '...plus N instances involving out-of-scope types', str)
1745
1746 # Also filter out bullet characters. This is because bullets are used to
1747 # separate error sections, and tests shouldn't be sensitive to how the
1748 # the division happens.
1749 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1750 str = str.replace(bullet, '')
1751
1752 # Windows only, this is a bug in hsc2hs but it is preventing
1753 # stable output for the testsuite. See Trac #9775. For now we filter out this
1754 # warning message to get clean output.
1755 if config.msys:
1756 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1757 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1758
1759 return str
1760
1761 # normalise a .prof file, so that we can reasonably compare it against
1762 # a sample. This doesn't compare any of the actual profiling data,
1763 # only the shape of the profile and the number of entries.
1764 def normalise_prof (str):
1765 # strip everything up to the line beginning "COST CENTRE"
1766 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1767
1768 # strip results for CAFs, these tend to change unpredictably
1769 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1770
1771 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1772 # sometimes under MAIN.
1773 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1774
1775 # We have something like this:
1776 #
1777 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1778 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1779 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1780 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1781 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1782 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1783 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1784 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1785 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1786 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1787 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1788 #
1789 # then we remove all the specific profiling data, leaving only the cost
1790 # centre name, module, src, and entries, to end up with this: (modulo
1791 # whitespace between columns)
1792 #
1793 # MAIN MAIN <built-in> 0
1794 # readPrec Main Main_1.hs:7:13-16 1
1795 # readPrec Main Main_1.hs:4:13-16 1
1796 # == Main Main_1.hs:7:25-26 1
1797 # == Main Main_1.hs:4:25-26 1
1798 # showsPrec Main Main_1.hs:7:19-22 2
1799 # showsPrec Main Main_1.hs:4:19-22 2
1800 # readPrec Main Main_1.hs:7:13-16 0
1801 # readPrec Main Main_1.hs:4:13-16 0
1802
1803 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1804 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1805 # this works fine.
1806 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1807 '\\1 \\2 \\3 \\5\n', str)
1808 return str
1809
1810 def normalise_slashes_( str ):
1811 str = re.sub('\\\\', '/', str)
1812 str = re.sub('//', '/', str)
1813 return str
1814
1815 def normalise_exe_( str ):
1816 str = re.sub('\.exe', '', str)
1817 return str
1818
1819 def normalise_output( str ):
1820 # remove " error:" and lower-case " Warning:" to make patch for
1821 # trac issue #10021 smaller
1822 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1823 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1824 # Remove a .exe extension (for Windows)
1825 # This can occur in error messages generated by the program.
1826 str = re.sub('([^\\s])\\.exe', '\\1', str)
1827 str = normalise_callstacks(str)
1828 str = normalise_type_reps(str)
1829 return str
1830
1831 def normalise_asm( str ):
1832 lines = str.split('\n')
1833 # Only keep instructions and labels not starting with a dot.
1834 metadata = re.compile('^[ \t]*\\..*$')
1835 out = []
1836 for line in lines:
1837 # Drop metadata directives (e.g. ".type")
1838 if not metadata.match(line):
1839 line = re.sub('@plt', '', line)
1840 instr = line.lstrip().split()
1841 # Drop empty lines.
1842 if not instr:
1843 continue
1844 # Drop operands, except for call instructions.
1845 elif instr[0] == 'call':
1846 out.append(instr[0] + ' ' + instr[1])
1847 else:
1848 out.append(instr[0])
1849 out = '\n'.join(out)
1850 return out
1851
1852 def if_verbose( n, s ):
1853 if config.verbose >= n:
1854 print(s)
1855
1856 def dump_file(f):
1857 try:
1858 with io.open(f) as file:
1859 print(file.read())
1860 except Exception:
1861 print('')
1862
1863 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1864 timeout_prog = strip_quotes(config.timeout_prog)
1865 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1866
1867 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1868 cmd = cmd.format(**config.__dict__)
1869 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1870
1871 stdin_file = io.open(stdin, 'rb') if stdin else None
1872 stdout_buffer = b''
1873 stderr_buffer = b''
1874
1875 hStdErr = subprocess.PIPE
1876 if stderr is subprocess.STDOUT:
1877 hStdErr = subprocess.STDOUT
1878
1879 try:
1880 # cmd is a complex command in Bourne-shell syntax
1881 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1882 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1883 # to invoke the Bourne shell
1884
1885 r = subprocess.Popen([timeout_prog, timeout, cmd],
1886 stdin=stdin_file,
1887 stdout=subprocess.PIPE,
1888 stderr=hStdErr,
1889 env=ghc_env)
1890
1891 stdout_buffer, stderr_buffer = r.communicate()
1892 finally:
1893 if stdin_file:
1894 stdin_file.close()
1895 if config.verbose >= 1 and print_output:
1896 if stdout_buffer:
1897 sys.stdout.buffer.write(stdout_buffer)
1898 if stderr_buffer:
1899 sys.stderr.buffer.write(stderr_buffer)
1900
1901 if stdout:
1902 with io.open(stdout, 'wb') as f:
1903 f.write(stdout_buffer)
1904 if stderr:
1905 if stderr is not subprocess.STDOUT:
1906 with io.open(stderr, 'wb') as f:
1907 f.write(stderr_buffer)
1908
1909 if r.returncode == 98:
1910 # The python timeout program uses 98 to signal that ^C was pressed
1911 stopNow()
1912 if r.returncode == 99 and getTestOpts().exit_code != 99:
1913 # Only print a message when timeout killed the process unexpectedly.
1914 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1915 return r.returncode
1916
1917 # -----------------------------------------------------------------------------
1918 # checking if ghostscript is available for checking the output of hp2ps
1919
1920 def genGSCmd(psfile):
1921 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1922
1923 def gsNotWorking():
1924 global gs_working
1925 print("GhostScript not available for hp2ps tests")
1926
1927 global gs_working
1928 gs_working = False
1929 if config.have_profiling:
1930 if config.gs != '':
1931 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1932 if resultGood == 0:
1933 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1934 ' >/dev/null 2>&1')
1935 if resultBad != 0:
1936 print("GhostScript available for hp2ps tests")
1937 gs_working = True
1938 else:
1939 gsNotWorking();
1940 else:
1941 gsNotWorking();
1942 else:
1943 gsNotWorking();
1944
1945 def add_suffix( name, suffix ):
1946 if suffix == '':
1947 return name
1948 else:
1949 return name + '.' + suffix
1950
1951 def add_hs_lhs_suffix(name):
1952 if getTestOpts().c_src:
1953 return add_suffix(name, 'c')
1954 elif getTestOpts().cmm_src:
1955 return add_suffix(name, 'cmm')
1956 elif getTestOpts().objc_src:
1957 return add_suffix(name, 'm')
1958 elif getTestOpts().objcpp_src:
1959 return add_suffix(name, 'mm')
1960 elif getTestOpts().literate:
1961 return add_suffix(name, 'lhs')
1962 else:
1963 return add_suffix(name, 'hs')
1964
1965 def replace_suffix( name, suffix ):
1966 base, suf = os.path.splitext(name)
1967 return base + '.' + suffix
1968
1969 def in_testdir(name, suffix=''):
1970 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1971
1972 def in_srcdir(name, suffix=''):
1973 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1974
1975 # Finding the sample output. The filename is of the form
1976 #
1977 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1978 #
1979 def find_expected_file(name, suff):
1980 basename = add_suffix(name, suff)
1981
1982 files = [basename + ws + plat
1983 for plat in ['-' + config.platform, '-' + config.os, '']
1984 for ws in ['-ws-' + config.wordsize, '']]
1985
1986 for f in files:
1987 if os.path.exists(in_srcdir(f)):
1988 return f
1989
1990 return basename
1991
1992 if config.msys:
1993 import stat
1994 def cleanup():
1995 testdir = getTestOpts().testdir
1996 max_attempts = 5
1997 retries = max_attempts
1998 def on_error(function, path, excinfo):
1999 # At least one test (T11489) removes the write bit from a file it
2000 # produces. Windows refuses to delete read-only files with a
2001 # permission error. Try setting the write bit and try again.
2002 os.chmod(path, stat.S_IWRITE)
2003 function(path)
2004
2005 # On Windows we have to retry the delete a couple of times.
2006 # The reason for this is that a FileDelete command just marks a
2007 # file for deletion. The file is really only removed when the last
2008 # handle to the file is closed. Unfortunately there are a lot of
2009 # system services that can have a file temporarily opened using a shared
2010 # readonly lock, such as the built in AV and search indexer.
2011 #
2012 # We can't really guarantee that these are all off, so what we can do is
2013 # whenever after a rmtree the folder still exists to try again and wait a bit.
2014 #
2015 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2016 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2017 # still locked then abort the current test by throwing an exception, this so it won't fail
2018 # with an even more cryptic error.
2019 #
2020 # See Trac #13162
2021 exception = None
2022 while retries > 0 and os.path.exists(testdir):
2023 time.sleep((max_attempts-retries)*6)
2024 try:
2025 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2026 except Exception as e:
2027 exception = e
2028 retries -= 1
2029
2030 if retries == 0 and os.path.exists(testdir):
2031 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2032 % (testdir, exception))
2033 else:
2034 def cleanup():
2035 testdir = getTestOpts().testdir
2036 if os.path.exists(testdir):
2037 shutil.rmtree(testdir, ignore_errors=False)
2038
2039
2040 # -----------------------------------------------------------------------------
2041 # Return a list of all the files ending in '.T' below directories roots.
2042
2043 def findTFiles(roots):
2044 for root in roots:
2045 for path, dirs, files in os.walk(root, topdown=True):
2046 # Never pick up .T files in uncleaned .run directories.
2047 dirs[:] = [dir for dir in sorted(dirs)
2048 if not dir.endswith(testdir_suffix)]
2049 for filename in files:
2050 if filename.endswith('.T'):
2051 yield os.path.join(path, filename)
2052
2053 # -----------------------------------------------------------------------------
2054 # Output a test summary to the specified file object
2055
2056 def summary(t, file, short=False, color=False):
2057
2058 file.write('\n')
2059 printUnexpectedTests(file,
2060 [t.unexpected_passes, t.unexpected_failures,
2061 t.unexpected_stat_failures, t.framework_failures])
2062
2063 if short:
2064 # Only print the list of unexpected tests above.
2065 return
2066
2067 colorize = lambda s: s
2068 if color:
2069 if len(t.unexpected_failures) > 0 or \
2070 len(t.unexpected_stat_failures) > 0 or \
2071 len(t.framework_failures) > 0:
2072 colorize = str_fail
2073 else:
2074 colorize = str_pass
2075
2076 file.write(colorize('SUMMARY') + ' for test run started at '
2077 + time.strftime("%c %Z", t.start_time) + '\n'
2078 + str(datetime.timedelta(seconds=
2079 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2080 + ' spent to go through\n'
2081 + repr(t.total_tests).rjust(8)
2082 + ' total tests, which gave rise to\n'
2083 + repr(t.total_test_cases).rjust(8)
2084 + ' test cases, of which\n'
2085 + repr(t.n_tests_skipped).rjust(8)
2086 + ' were skipped\n'
2087 + '\n'
2088 + repr(len(t.missing_libs)).rjust(8)
2089 + ' had missing libraries\n'
2090 + repr(t.n_expected_passes).rjust(8)
2091 + ' expected passes\n'
2092 + repr(t.n_expected_failures).rjust(8)
2093 + ' expected failures\n'
2094 + '\n'
2095 + repr(len(t.framework_failures)).rjust(8)
2096 + ' caused framework failures\n'
2097 + repr(len(t.framework_warnings)).rjust(8)
2098 + ' caused framework warnings\n'
2099 + repr(len(t.unexpected_passes)).rjust(8)
2100 + ' unexpected passes\n'
2101 + repr(len(t.unexpected_failures)).rjust(8)
2102 + ' unexpected failures\n'
2103 + repr(len(t.unexpected_stat_failures)).rjust(8)
2104 + ' unexpected stat failures\n'
2105 + '\n')
2106
2107 if t.unexpected_passes:
2108 file.write('Unexpected passes:\n')
2109 printTestInfosSummary(file, t.unexpected_passes)
2110
2111 if t.unexpected_failures:
2112 file.write('Unexpected failures:\n')
2113 printTestInfosSummary(file, t.unexpected_failures)
2114
2115 if t.unexpected_stat_failures:
2116 file.write('Unexpected stat failures:\n')
2117 printTestInfosSummary(file, t.unexpected_stat_failures)
2118
2119 if t.framework_failures:
2120 file.write('Framework failures:\n')
2121 printTestInfosSummary(file, t.framework_failures)
2122
2123 if t.framework_warnings:
2124 file.write('Framework warnings:\n')
2125 printTestInfosSummary(file, t.framework_warnings)
2126
2127 if stopping():
2128 file.write('WARNING: Testsuite run was terminated early\n')
2129
2130 def printUnexpectedTests(file, testInfoss):
2131 unexpected = set(name for testInfos in testInfoss
2132 for (_, name, _, _) in testInfos
2133 if not name.endswith('.T'))
2134 if unexpected:
2135 file.write('Unexpected results from:\n')
2136 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2137 file.write('\n')
2138
2139 def printTestInfosSummary(file, testInfos):
2140 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2141 for (directory, name, reason, way) in testInfos:
2142 directory = directory.ljust(maxDirLen)
2143 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2144 file.write('\n')
2145
2146 def modify_lines(s, f):
2147 s = '\n'.join([f(l) for l in s.splitlines()])
2148 if s and s[-1] != '\n':
2149 # Prevent '\ No newline at end of file' warnings when diffing.
2150 s += '\n'
2151 return s