d29024ddc05d3610767c002742fa87b99a41e305
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return bool(opts.stats_range_fields)
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 # -----
193
194 def expect_fail_for( ways ):
195 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
196
197 def _expect_fail_for( name, opts, ways ):
198 opts.expect_fail_for = ways
199
200 def expect_broken( bug ):
201 # This test is a expected not to work due to the indicated trac bug
202 # number.
203 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
204
205 def _expect_broken( name, opts, bug ):
206 record_broken(name, opts, bug)
207 opts.expect = 'fail';
208
209 def expect_broken_for( bug, ways ):
210 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
211
212 def _expect_broken_for( name, opts, bug, ways ):
213 record_broken(name, opts, bug)
214 opts.expect_fail_for = ways
215
216 def record_broken(name, opts, bug):
217 me = (bug, opts.testdir, name)
218 if not me in brokens:
219 brokens.append(me)
220
221 def _expect_pass(way):
222 # Helper function. Not intended for use in .T files.
223 opts = getTestOpts()
224 return opts.expect == 'pass' and way not in opts.expect_fail_for
225
226 # -----
227
228 def omit_ways( ways ):
229 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
230
231 def _omit_ways( name, opts, ways ):
232 opts.omit_ways = ways
233
234 # -----
235
236 def only_ways( ways ):
237 return lambda name, opts, w=ways: _only_ways( name, opts, w )
238
239 def _only_ways( name, opts, ways ):
240 opts.only_ways = ways
241
242 # -----
243
244 def extra_ways( ways ):
245 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
246
247 def _extra_ways( name, opts, ways ):
248 opts.extra_ways = ways
249
250 # -----
251
252 def set_stdin( file ):
253 return lambda name, opts, f=file: _set_stdin(name, opts, f);
254
255 def _set_stdin( name, opts, f ):
256 opts.stdin = f
257
258 # -----
259
260 def exit_code( val ):
261 return lambda name, opts, v=val: _exit_code(name, opts, v);
262
263 def _exit_code( name, opts, v ):
264 opts.exit_code = v
265
266 def signal_exit_code( val ):
267 if opsys('solaris2'):
268 return exit_code( val )
269 else:
270 # When application running on Linux receives fatal error
271 # signal, then its exit code is encoded as 128 + signal
272 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
273 # I assume that Mac OS X behaves in the same way at least Mac
274 # OS X builder behavior suggests this.
275 return exit_code( val+128 )
276
277 # -----
278
279 def compile_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
281
282 def _compile_timeout_multiplier( name, opts, v ):
283 opts.compile_timeout_multiplier = v
284
285 def run_timeout_multiplier( val ):
286 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
287
288 def _run_timeout_multiplier( name, opts, v ):
289 opts.run_timeout_multiplier = v
290
291 # -----
292
293 def extra_run_opts( val ):
294 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
295
296 def _extra_run_opts( name, opts, v ):
297 opts.extra_run_opts = v
298
299 # -----
300
301 def extra_hc_opts( val ):
302 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
303
304 def _extra_hc_opts( name, opts, v ):
305 opts.extra_hc_opts = v
306
307 # -----
308
309 def extra_clean( files ):
310 # TODO. Remove all calls to extra_clean.
311 return lambda _name, _opts: None
312
313 def extra_files(files):
314 return lambda name, opts: _extra_files(name, opts, files)
315
316 def _extra_files(name, opts, files):
317 opts.extra_files.extend(files)
318
319 # -----
320
321 # Defaults to "test everything, and only break on extreme cases"
322 #
323 # The inputs to this function are slightly interesting:
324 # metric can be either:
325 # - 'all', in which case all 3 possible metrics are collected and compared.
326 # - The specific metric one wants to use in the test.
327 # - A list of the metrics one wants to use in the test.
328 #
329 # Deviation defaults to 20% because the goal is correctness over performance.
330 # The testsuite should avoid breaking when there is not an actual error.
331 # Instead, the testsuite should notify of regressions in a non-breaking manner.
332 #
333 # collect_compiler_stats is used when the metrics collected are about the compiler.
334 # collect_stats is used in the majority case when the metrics to be collected
335 # are about the performance of the runtime code generated by the compiler.
336 def collect_compiler_stats(metric='all',deviation=20):
337 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
338
339 def collect_stats(metric='all', deviation=20):
340 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
341
342 def testing_metrics():
343 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
344
345 # This is an internal function that is used only in the implementation.
346 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
347 # If the boolean is set to true, it indicates that this test is one that
348 # measures the performance numbers of the compiler.
349 # As this is a fairly rare case in the testsuite, it defaults to false to
350 # indicate that it is a 'normal' performance test.
351 def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
352 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
353 failBecause('This test has an invalid name.')
354
355 tests = Perf.get_perf_stats('HEAD^')
356
357 # Might have multiple metrics being measured for a single test.
358 test = [t for t in tests if t.test == name]
359
360 if tests == [] or test == []:
361 # There are no prior metrics for this test.
362 if isinstance(metric, str):
363 if metric == 'all':
364 for field in testing_metrics():
365 opts.stats_range_fields[field] = None
366 else:
367 opts.stats_range_fields[metric] = None
368 if isinstance(metric, list):
369 for field in metric:
370 opts.stats_range_fields[field] = None
371
372 return
373
374 if is_compiler_stats_test:
375 opts.is_compiler_stats_test = True
376
377 # Compiler performance numbers change when debugging is on, making the results
378 # useless and confusing. Therefore, skip if debugging is on.
379 if config.compiler_debugged and is_compiler_stats_test:
380 opts.skip = 1
381
382 # get the average value of the given metric from test
383 def get_avg_val(metric_2):
384 metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
385 return sum(metric_2_metrics) / len(metric_2_metrics)
386
387 # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
388 if isinstance(metric, str):
389 if metric == 'all':
390 for field in testing_metrics():
391 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
392 return
393 else:
394 opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
395 return
396
397 if isinstance(metric, list):
398 for field in metric:
399 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
400
401 # -----
402
403 def when(b, f):
404 # When list_brokens is on, we want to see all expect_broken calls,
405 # so we always do f
406 if b or config.list_broken:
407 return f
408 else:
409 return normal
410
411 def unless(b, f):
412 return when(not b, f)
413
414 def doing_ghci():
415 return 'ghci' in config.run_ways
416
417 def ghc_dynamic():
418 return config.ghc_dynamic
419
420 def fast():
421 return config.speed == 2
422
423 def platform( plat ):
424 return config.platform == plat
425
426 def opsys( os ):
427 return config.os == os
428
429 def arch( arch ):
430 return config.arch == arch
431
432 def wordsize( ws ):
433 return config.wordsize == str(ws)
434
435 def msys( ):
436 return config.msys
437
438 def cygwin( ):
439 return config.cygwin
440
441 def have_vanilla( ):
442 return config.have_vanilla
443
444 def have_ncg( ):
445 return config.have_ncg
446
447 def have_dynamic( ):
448 return config.have_dynamic
449
450 def have_profiling( ):
451 return config.have_profiling
452
453 def in_tree_compiler( ):
454 return config.in_tree_compiler
455
456 def unregisterised( ):
457 return config.unregisterised
458
459 def compiler_profiled( ):
460 return config.compiler_profiled
461
462 def compiler_debugged( ):
463 return config.compiler_debugged
464
465 def have_gdb( ):
466 return config.have_gdb
467
468 def have_readelf( ):
469 return config.have_readelf
470
471 # ---
472
473 def high_memory_usage(name, opts):
474 opts.alone = True
475
476 # If a test is for a multi-CPU race, then running the test alone
477 # increases the chance that we'll actually see it.
478 def multi_cpu_race(name, opts):
479 opts.alone = True
480
481 # ---
482 def literate( name, opts ):
483 opts.literate = True
484
485 def c_src( name, opts ):
486 opts.c_src = True
487
488 def objc_src( name, opts ):
489 opts.objc_src = True
490
491 def objcpp_src( name, opts ):
492 opts.objcpp_src = True
493
494 def cmm_src( name, opts ):
495 opts.cmm_src = True
496
497 def outputdir( odir ):
498 return lambda name, opts, d=odir: _outputdir(name, opts, d)
499
500 def _outputdir( name, opts, odir ):
501 opts.outputdir = odir;
502
503 # ----
504
505 def pre_cmd( cmd ):
506 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
507
508 def _pre_cmd( name, opts, cmd ):
509 opts.pre_cmd = cmd
510
511 # ----
512
513 def cmd_prefix( prefix ):
514 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
515
516 def _cmd_prefix( name, opts, prefix ):
517 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
518
519 # ----
520
521 def cmd_wrapper( fun ):
522 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
523
524 def _cmd_wrapper( name, opts, fun ):
525 opts.cmd_wrapper = fun
526
527 # ----
528
529 def compile_cmd_prefix( prefix ):
530 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
531
532 def _compile_cmd_prefix( name, opts, prefix ):
533 opts.compile_cmd_prefix = prefix
534
535 # ----
536
537 def check_stdout( f ):
538 return lambda name, opts, f=f: _check_stdout(name, opts, f)
539
540 def _check_stdout( name, opts, f ):
541 opts.check_stdout = f
542
543 def no_check_hp(name, opts):
544 opts.check_hp = False
545
546 # ----
547
548 def filter_stdout_lines( regex ):
549 """ Filter lines of stdout with the given regular expression """
550 def f( name, opts ):
551 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
552 return f
553
554 def normalise_slashes( name, opts ):
555 _normalise_fun(name, opts, normalise_slashes_)
556
557 def normalise_exe( name, opts ):
558 _normalise_fun(name, opts, normalise_exe_)
559
560 def normalise_fun( *fs ):
561 return lambda name, opts: _normalise_fun(name, opts, fs)
562
563 def _normalise_fun( name, opts, *fs ):
564 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
565
566 def normalise_errmsg_fun( *fs ):
567 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
568
569 def _normalise_errmsg_fun( name, opts, *fs ):
570 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
571
572 def check_errmsg(needle):
573 def norm(str):
574 if needle in str:
575 return "%s contained in -ddump-simpl\n" % needle
576 else:
577 return "%s not contained in -ddump-simpl\n" % needle
578 return normalise_errmsg_fun(norm)
579
580 def grep_errmsg(needle):
581 def norm(str):
582 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
583 return normalise_errmsg_fun(norm)
584
585 def normalise_whitespace_fun(f):
586 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
587
588 def _normalise_whitespace_fun(name, opts, f):
589 opts.whitespace_normaliser = f
590
591 def normalise_version_( *pkgs ):
592 def normalise_version__( str ):
593 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
594 '\\1-<VERSION>', str)
595 return normalise_version__
596
597 def normalise_version( *pkgs ):
598 def normalise_version__( name, opts ):
599 _normalise_fun(name, opts, normalise_version_(*pkgs))
600 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
601 return normalise_version__
602
603 def normalise_drive_letter(name, opts):
604 # Windows only. Change D:\\ to C:\\.
605 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
606
607 def keep_prof_callstacks(name, opts):
608 """Keep profiling callstacks.
609
610 Use together with `only_ways(prof_ways)`.
611 """
612 opts.keep_prof_callstacks = True
613
614 def join_normalisers(*a):
615 """
616 Compose functions, flattening sequences.
617
618 join_normalisers(f1,[f2,f3],f4)
619
620 is the same as
621
622 lambda x: f1(f2(f3(f4(x))))
623 """
624
625 def flatten(l):
626 """
627 Taken from http://stackoverflow.com/a/2158532/946226
628 """
629 for el in l:
630 if (isinstance(el, collections.Iterable)
631 and not isinstance(el, (bytes, str))):
632 for sub in flatten(el):
633 yield sub
634 else:
635 yield el
636
637 a = flatten(a)
638
639 fn = lambda x:x # identity function
640 for f in a:
641 assert callable(f)
642 fn = lambda x,f=f,fn=fn: fn(f(x))
643 return fn
644
645 # ----
646 # Function for composing two opt-fns together
647
648 def executeSetups(fs, name, opts):
649 if type(fs) is list:
650 # If we have a list of setups, then execute each one
651 for f in fs:
652 executeSetups(f, name, opts)
653 else:
654 # fs is a single function, so just apply it
655 fs(name, opts)
656
657 # -----------------------------------------------------------------------------
658 # The current directory of tests
659
660 def newTestDir(tempdir, dir):
661
662 global thisdir_settings
663 # reset the options for this test directory
664 def settings(name, opts, tempdir=tempdir, dir=dir):
665 return _newTestDir(name, opts, tempdir, dir)
666 thisdir_settings = settings
667
668 # Should be equal to entry in toplevel .gitignore.
669 testdir_suffix = '.run'
670
671 def _newTestDir(name, opts, tempdir, dir):
672 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
673 opts.srcdir = os.path.join(os.getcwd(), dir)
674 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
675 opts.compiler_always_flags = config.compiler_always_flags
676
677 # -----------------------------------------------------------------------------
678 # Actually doing tests
679
680 parallelTests = []
681 aloneTests = []
682 allTestNames = set([])
683
684 def runTest(watcher, opts, name, func, args):
685 if config.use_threads:
686 pool_sema.acquire()
687 t = threading.Thread(target=test_common_thread,
688 name=name,
689 args=(watcher, name, opts, func, args))
690 t.daemon = False
691 t.start()
692 else:
693 test_common_work(watcher, name, opts, func, args)
694
695 # name :: String
696 # setup :: [TestOpt] -> IO ()
697 def test(name, setup, func, args):
698 global aloneTests
699 global parallelTests
700 global allTestNames
701 global thisdir_settings
702 if name in allTestNames:
703 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
704 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
705 framework_fail(name, 'bad_name', 'This test has an invalid name')
706
707 if config.run_only_some_tests:
708 if name not in config.only:
709 return
710 else:
711 # Note [Mutating config.only]
712 # config.only is initially the set of tests requested by
713 # the user (via 'make TEST='). We then remove all tests that
714 # we've already seen (in .T files), so that we can later
715 # report on any tests we couldn't find and error out.
716 config.only.remove(name)
717
718 # Make a deep copy of the default_testopts, as we need our own copy
719 # of any dictionaries etc inside it. Otherwise, if one test modifies
720 # them, all tests will see the modified version!
721 myTestOpts = copy.deepcopy(default_testopts)
722
723 executeSetups([thisdir_settings, setup], name, myTestOpts)
724
725 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
726 if myTestOpts.alone:
727 aloneTests.append(thisTest)
728 else:
729 parallelTests.append(thisTest)
730 allTestNames.add(name)
731
732 if config.use_threads:
733 def test_common_thread(watcher, name, opts, func, args):
734 try:
735 test_common_work(watcher, name, opts, func, args)
736 finally:
737 pool_sema.release()
738
739 def get_package_cache_timestamp():
740 if config.package_conf_cache_file == '':
741 return 0.0
742 else:
743 try:
744 return os.stat(config.package_conf_cache_file).st_mtime
745 except:
746 return 0.0
747
748 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
749
750 def test_common_work(watcher, name, opts, func, args):
751 try:
752 t.total_tests += 1
753 setLocalTestOpts(opts)
754
755 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
756
757 # All the ways we might run this test
758 if func == compile or func == multimod_compile:
759 all_ways = config.compile_ways
760 elif func == compile_and_run or func == multimod_compile_and_run:
761 all_ways = config.run_ways
762 elif func == ghci_script:
763 if 'ghci' in config.run_ways:
764 all_ways = ['ghci']
765 else:
766 all_ways = []
767 else:
768 all_ways = ['normal']
769
770 # A test itself can request extra ways by setting opts.extra_ways
771 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
772
773 t.total_test_cases += len(all_ways)
774
775 ok_way = lambda way: \
776 not getTestOpts().skip \
777 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
778 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
779 and (not (config.skip_perf_tests and isStatsTest())) \
780 and (not (config.only_perf_tests and not isStatsTest())) \
781 and way not in getTestOpts().omit_ways
782
783 # Which ways we are asked to skip
784 do_ways = list(filter (ok_way,all_ways))
785
786 # Only run all ways in slow mode.
787 # See Note [validate and testsuite speed] in toplevel Makefile.
788 if config.accept:
789 # Only ever run one way
790 do_ways = do_ways[:1]
791 elif config.speed > 0:
792 # However, if we EXPLICITLY asked for a way (with extra_ways)
793 # please test it!
794 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
795 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
796 do_ways = other_ways[:1] + explicit_ways
797
798 # Find all files in the source directory that this test
799 # depends on. Do this only once for all ways.
800 # Generously add all filenames that start with the name of
801 # the test to this set, as a convenience to test authors.
802 # They will have to use the `extra_files` setup function to
803 # specify all other files that their test depends on (but
804 # this seems to be necessary for only about 10% of all
805 # tests).
806 files = set(f for f in os.listdir(opts.srcdir)
807 if f.startswith(name) and not f == name and
808 not f.endswith(testdir_suffix) and
809 not os.path.splitext(f)[1] in do_not_copy)
810 for filename in (opts.extra_files + extra_src_files.get(name, [])):
811 if filename.startswith('/'):
812 framework_fail(name, 'whole-test',
813 'no absolute paths in extra_files please: ' + filename)
814
815 elif '*' in filename:
816 # Don't use wildcards in extra_files too much, as
817 # globbing is slow.
818 files.update((os.path.relpath(f, opts.srcdir)
819 for f in glob.iglob(in_srcdir(filename))))
820
821 elif filename:
822 files.add(filename)
823
824 else:
825 framework_fail(name, 'whole-test', 'extra_file is empty string')
826
827 # Run the required tests...
828 for way in do_ways:
829 if stopping():
830 break
831 try:
832 do_test(name, way, func, args, files)
833 except KeyboardInterrupt:
834 stopNow()
835 except Exception as e:
836 framework_fail(name, way, str(e))
837 traceback.print_exc()
838
839 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
840
841 if config.cleanup and do_ways:
842 try:
843 cleanup()
844 except Exception as e:
845 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
846
847 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
848
849 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
850 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
851
852 except Exception as e:
853 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
854 finally:
855 watcher.notify()
856
857 def do_test(name, way, func, args, files):
858 opts = getTestOpts()
859
860 full_name = name + '(' + way + ')'
861
862 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
863 full_name, t.total_tests, len(allTestNames),
864 [len(t.unexpected_passes),
865 len(t.unexpected_failures),
866 len(t.framework_failures)]))
867
868 # Clean up prior to the test, so that we can't spuriously conclude
869 # that it passed on the basis of old run outputs.
870 cleanup()
871 os.makedirs(opts.testdir)
872
873 # Link all source files for this test into a new directory in
874 # /tmp, and run the test in that directory. This makes it
875 # possible to run tests in parallel, without modification, that
876 # would otherwise (accidentally) write to the same output file.
877 # It also makes it easier to keep the testsuite clean.
878
879 for extra_file in files:
880 src = in_srcdir(extra_file)
881 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
882 if os.path.isfile(src):
883 link_or_copy_file(src, dst)
884 elif os.path.isdir(src):
885 if os.path.exists(dst):
886 shutil.rmtree(dst)
887 os.mkdir(dst)
888 lndir(src, dst)
889 else:
890 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
891 # When using a ghc built without haddock support, .t
892 # files are rightfully missing. Don't
893 # framework_fail. Test will be skipped later.
894 pass
895 else:
896 framework_fail(name, way,
897 'extra_file does not exist: ' + extra_file)
898
899 if func.__name__ == 'run_command' or opts.pre_cmd:
900 # When running 'MAKE' make sure 'TOP' still points to the
901 # root of the testsuite.
902 src_makefile = in_srcdir('Makefile')
903 dst_makefile = in_testdir('Makefile')
904 if os.path.exists(src_makefile):
905 with io.open(src_makefile, 'r', encoding='utf8') as src:
906 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
907 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
908 dst.write(makefile)
909
910 if opts.pre_cmd:
911 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
912 stderr = subprocess.STDOUT,
913 print_output = config.verbose >= 3)
914
915 # If user used expect_broken then don't record failures of pre_cmd
916 if exit_code != 0 and opts.expect not in ['fail']:
917 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
918 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
919
920 result = func(*[name,way] + args)
921
922 if opts.expect not in ['pass', 'fail', 'missing-lib']:
923 framework_fail(name, way, 'bad expected ' + opts.expect)
924
925 try:
926 passFail = result['passFail']
927 except (KeyError, TypeError):
928 passFail = 'No passFail found'
929
930 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
931
932 if passFail == 'pass':
933 if _expect_pass(way):
934 t.expected_passes.append((directory, name, way))
935 t.n_expected_passes += 1
936 else:
937 if_verbose(1, '*** unexpected pass for %s' % full_name)
938 t.unexpected_passes.append((directory, name, 'unexpected', way))
939 elif passFail == 'fail':
940 if _expect_pass(way):
941 reason = result['reason']
942 tag = result.get('tag')
943 if tag == 'stat':
944 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
945 t.unexpected_stat_failures.append((directory, name, reason, way))
946 else:
947 if_verbose(1, '*** unexpected failure for %s' % full_name)
948 t.unexpected_failures.append((directory, name, reason, way))
949 else:
950 if opts.expect == 'missing-lib':
951 t.missing_libs.append((directory, name, 'missing-lib', way))
952 else:
953 t.n_expected_failures += 1
954 else:
955 framework_fail(name, way, 'bad result ' + passFail)
956
957 # Make is often invoked with -s, which means if it fails, we get
958 # no feedback at all. This is annoying. So let's remove the option
959 # if found and instead have the testsuite decide on what to do
960 # with the output.
961 def override_options(pre_cmd):
962 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
963 return pre_cmd.replace('-s' , '') \
964 .replace('--silent', '') \
965 .replace('--quiet' , '')
966
967 return pre_cmd
968
969 def framework_fail(name, way, reason):
970 opts = getTestOpts()
971 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
972 full_name = name + '(' + way + ')'
973 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
974 t.framework_failures.append((directory, name, way, reason))
975
976 def framework_warn(name, way, reason):
977 opts = getTestOpts()
978 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
979 full_name = name + '(' + way + ')'
980 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
981 t.framework_warnings.append((directory, name, way, reason))
982
983 def badResult(result):
984 try:
985 if result['passFail'] == 'pass':
986 return False
987 return True
988 except (KeyError, TypeError):
989 return True
990
991 # -----------------------------------------------------------------------------
992 # Generic command tests
993
994 # A generic command test is expected to run and exit successfully.
995 #
996 # The expected exit code can be changed via exit_code() as normal, and
997 # the expected stdout/stderr are stored in <testname>.stdout and
998 # <testname>.stderr. The output of the command can be ignored
999 # altogether by using the setup function ignore_stdout instead of
1000 # run_command.
1001
1002 def run_command( name, way, cmd ):
1003 return simple_run( name, '', override_options(cmd), '' )
1004
1005 def makefile_test( name, way, target=None ):
1006 if target is None:
1007 target = name
1008
1009 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1010 return run_command(name, way, cmd)
1011
1012 # -----------------------------------------------------------------------------
1013 # GHCi tests
1014
1015 def ghci_script( name, way, script):
1016 flags = ' '.join(get_compiler_flags())
1017 way_flags = ' '.join(config.way_flags[way])
1018
1019 # We pass HC and HC_OPTS as environment variables, so that the
1020 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1021 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1022 ).format(flags=flags, way_flags=way_flags)
1023 # NB: put way_flags before flags so that flags in all.T can overrie others
1024
1025 getTestOpts().stdin = script
1026 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1027
1028 # -----------------------------------------------------------------------------
1029 # Compile-only tests
1030
1031 def compile( name, way, extra_hc_opts ):
1032 return do_compile( name, way, 0, '', [], extra_hc_opts )
1033
1034 def compile_fail( name, way, extra_hc_opts ):
1035 return do_compile( name, way, 1, '', [], extra_hc_opts )
1036
1037 def backpack_typecheck( name, way, extra_hc_opts ):
1038 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1039
1040 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1041 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1042
1043 def backpack_compile( name, way, extra_hc_opts ):
1044 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1045
1046 def backpack_compile_fail( name, way, extra_hc_opts ):
1047 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1048
1049 def backpack_run( name, way, extra_hc_opts ):
1050 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1051
1052 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1053 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1054
1055 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1056 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1057
1058 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1059 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1060
1061 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1062 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1063
1064 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1065 # print 'Compile only, extra args = ', extra_hc_opts
1066
1067 result = extras_build( way, extra_mods, extra_hc_opts )
1068 if badResult(result):
1069 return result
1070 extra_hc_opts = result['hc_opts']
1071
1072 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1073
1074 if badResult(result):
1075 return result
1076
1077 # the actual stderr should always match the expected, regardless
1078 # of whether we expected the compilation to fail or not (successful
1079 # compilations may generate warnings).
1080
1081 expected_stderr_file = find_expected_file(name, 'stderr')
1082 actual_stderr_file = add_suffix(name, 'comp.stderr')
1083
1084 if not compare_outputs(way, 'stderr',
1085 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1086 normalise_errmsg),
1087 expected_stderr_file, actual_stderr_file,
1088 whitespace_normaliser=getattr(getTestOpts(),
1089 "whitespace_normaliser",
1090 normalise_whitespace)):
1091 return failBecause('stderr mismatch')
1092
1093 # no problems found, this test passed
1094 return passed()
1095
1096 def compile_cmp_asm( name, way, extra_hc_opts ):
1097 print('Compile only, extra args = ', extra_hc_opts)
1098 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1099
1100 if badResult(result):
1101 return result
1102
1103 # the actual stderr should always match the expected, regardless
1104 # of whether we expected the compilation to fail or not (successful
1105 # compilations may generate warnings).
1106
1107 expected_asm_file = find_expected_file(name, 'asm')
1108 actual_asm_file = add_suffix(name, 's')
1109
1110 if not compare_outputs(way, 'asm',
1111 join_normalisers(normalise_errmsg, normalise_asm),
1112 expected_asm_file, actual_asm_file):
1113 return failBecause('asm mismatch')
1114
1115 # no problems found, this test passed
1116 return passed()
1117
1118 # -----------------------------------------------------------------------------
1119 # Compile-and-run tests
1120
1121 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1122 # print 'Compile and run, extra args = ', extra_hc_opts
1123
1124 result = extras_build( way, extra_mods, extra_hc_opts )
1125 if badResult(result):
1126 return result
1127 extra_hc_opts = result['hc_opts']
1128
1129 if way.startswith('ghci'): # interpreted...
1130 return interpreter_run(name, way, extra_hc_opts, top_mod)
1131 else: # compiled...
1132 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1133 if badResult(result):
1134 return result
1135
1136 cmd = './' + name;
1137
1138 # we don't check the compiler's stderr for a compile-and-run test
1139 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1140
1141 def compile_and_run( name, way, extra_hc_opts ):
1142 return compile_and_run__( name, way, '', [], extra_hc_opts)
1143
1144 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1145 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1146
1147 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1148 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1149
1150 def stats( name, way, stats_file ):
1151 opts = getTestOpts()
1152 return check_stats(name, way, stats_file, opts.stats_range_fields)
1153
1154 def metric_dict(name, way, metric, value):
1155 return Perf.PerfStat(
1156 test_env = config.test_env,
1157 test = name,
1158 way = way,
1159 metric = metric,
1160 value = value)
1161
1162 # -----------------------------------------------------------------------------
1163 # Check test stats. This prints the results for the user.
1164 # name: name of the test.
1165 # way: the way.
1166 # stats_file: the path of the stats_file containing the stats for the test.
1167 # range_fields
1168 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1169 # This prints the results for the user.
1170 def check_stats(name, way, stats_file, range_fields):
1171 result = passed()
1172 if range_fields:
1173 try:
1174 f = open(in_testdir(stats_file))
1175 except IOError as e:
1176 return failBecause(str(e))
1177 stats_file_contents = f.read()
1178 f.close()
1179
1180 for (metric, range_val_dev) in range_fields.items():
1181 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1182 if field_match == None:
1183 print('Failed to find metric: ', metric)
1184 metric_result = failBecause('no such stats metric')
1185 else:
1186 actual_val = int(field_match.group(1))
1187
1188 # Store the metric so it can later be stored in a git note.
1189 perf_stat = metric_dict(name, way, metric, actual_val)
1190 change = None
1191
1192 # If this is the first time running the benchmark, then pass.
1193 if range_val_dev == None:
1194 metric_result = passed()
1195 change = MetricChange.NewMetric
1196 else:
1197 (expected_val, tolerance_dev) = range_val_dev
1198 (change, metric_result) = Perf.check_stats_change(
1199 perf_stat,
1200 expected_val,
1201 tolerance_dev,
1202 config.allowed_perf_changes,
1203 config.verbose >= 4)
1204 t.metrics.append((change, perf_stat))
1205
1206 # If any metric fails then the test fails.
1207 # Note, the remaining metrics are still run so that
1208 # a complete list of changes can be presented to the user.
1209 if metric_result['passFail'] == 'fail':
1210 result = metric_result
1211
1212 return result
1213
1214 # -----------------------------------------------------------------------------
1215 # Build a single-module program
1216
1217 def extras_build( way, extra_mods, extra_hc_opts ):
1218 for mod, opts in extra_mods:
1219 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1220 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1221 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1222 if badResult(result):
1223 return result
1224
1225 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1226
1227 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1228 opts = getTestOpts()
1229
1230 # Redirect stdout and stderr to the same file
1231 stdout = in_testdir(name, 'comp.stderr')
1232 stderr = subprocess.STDOUT
1233
1234 if top_mod != '':
1235 srcname = top_mod
1236 elif addsuf:
1237 if backpack:
1238 srcname = add_suffix(name, 'bkp')
1239 else:
1240 srcname = add_hs_lhs_suffix(name)
1241 else:
1242 srcname = name
1243
1244 if top_mod != '':
1245 to_do = '--make '
1246 if link:
1247 to_do = to_do + '-o ' + name
1248 elif backpack:
1249 if link:
1250 to_do = '-o ' + name + ' '
1251 else:
1252 to_do = ''
1253 to_do = to_do + '--backpack '
1254 elif link:
1255 to_do = '-o ' + name
1256 else:
1257 to_do = '-c' # just compile
1258
1259 stats_file = name + '.comp.stats'
1260 if isCompilerStatsTest():
1261 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1262 if backpack:
1263 extra_hc_opts += ' -outputdir ' + name + '.out'
1264
1265 # Required by GHC 7.3+, harmless for earlier versions:
1266 if (getTestOpts().c_src or
1267 getTestOpts().objc_src or
1268 getTestOpts().objcpp_src or
1269 getTestOpts().cmm_src):
1270 extra_hc_opts += ' -no-hs-main '
1271
1272 if getTestOpts().compile_cmd_prefix == '':
1273 cmd_prefix = ''
1274 else:
1275 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1276
1277 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1278
1279 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1280 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1281 ).format(**locals())
1282
1283 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1284
1285 if exit_code != 0 and not should_fail:
1286 if config.verbose >= 1 and _expect_pass(way):
1287 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1288 actual_stderr_path = in_testdir(name, 'comp.stderr')
1289 dump_file(actual_stderr_path)
1290
1291 # ToDo: if the sub-shell was killed by ^C, then exit
1292
1293 if isCompilerStatsTest():
1294 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1295 if badResult(statsResult):
1296 return statsResult
1297
1298 if should_fail:
1299 if exit_code == 0:
1300 return failBecause('exit code 0')
1301 else:
1302 if exit_code != 0:
1303 return failBecause('exit code non-0')
1304
1305 return passed()
1306
1307 # -----------------------------------------------------------------------------
1308 # Run a program and check its output
1309 #
1310 # If testname.stdin exists, route input from that, else
1311 # from /dev/null. Route output to testname.run.stdout and
1312 # testname.run.stderr. Returns the exit code of the run.
1313
1314 def simple_run(name, way, prog, extra_run_opts):
1315 opts = getTestOpts()
1316
1317 # figure out what to use for stdin
1318 if opts.stdin:
1319 stdin = in_testdir(opts.stdin)
1320 elif os.path.exists(in_testdir(name, 'stdin')):
1321 stdin = in_testdir(name, 'stdin')
1322 else:
1323 stdin = None
1324
1325 stdout = in_testdir(name, 'run.stdout')
1326 if opts.combined_output:
1327 stderr = subprocess.STDOUT
1328 else:
1329 stderr = in_testdir(name, 'run.stderr')
1330
1331 my_rts_flags = rts_flags(way)
1332
1333 stats_file = name + '.stats'
1334 if isStatsTest() and not isCompilerStatsTest():
1335 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1336 else:
1337 stats_args = ''
1338
1339 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1340 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1341
1342 if opts.cmd_wrapper != None:
1343 cmd = opts.cmd_wrapper(cmd)
1344
1345 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1346
1347 # run the command
1348 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1349
1350 # check the exit code
1351 if exit_code != opts.exit_code:
1352 if config.verbose >= 1 and _expect_pass(way):
1353 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1354 dump_stdout(name)
1355 dump_stderr(name)
1356 return failBecause('bad exit code')
1357
1358 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1359 return failBecause('bad stderr')
1360 if not (opts.ignore_stdout or stdout_ok(name, way)):
1361 return failBecause('bad stdout')
1362
1363 check_hp = '-h' in my_rts_flags and opts.check_hp
1364 check_prof = '-p' in my_rts_flags
1365
1366 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1367 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1368 return failBecause('bad heap profile')
1369 if check_prof and not check_prof_ok(name, way):
1370 return failBecause('bad profile')
1371
1372 return check_stats(name, way, stats_file, opts.stats_range_fields)
1373
1374 def rts_flags(way):
1375 args = config.way_rts_flags.get(way, [])
1376 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1377
1378 # -----------------------------------------------------------------------------
1379 # Run a program in the interpreter and check its output
1380
1381 def interpreter_run(name, way, extra_hc_opts, top_mod):
1382 opts = getTestOpts()
1383
1384 stdout = in_testdir(name, 'interp.stdout')
1385 stderr = in_testdir(name, 'interp.stderr')
1386 script = in_testdir(name, 'genscript')
1387
1388 if opts.combined_output:
1389 framework_fail(name, 'unsupported',
1390 'WAY=ghci and combined_output together is not supported')
1391
1392 if (top_mod == ''):
1393 srcname = add_hs_lhs_suffix(name)
1394 else:
1395 srcname = top_mod
1396
1397 delimiter = '===== program output begins here\n'
1398
1399 with io.open(script, 'w', encoding='utf8') as f:
1400 # set the prog name and command-line args to match the compiled
1401 # environment.
1402 f.write(':set prog ' + name + '\n')
1403 f.write(':set args ' + opts.extra_run_opts + '\n')
1404 # Add marker lines to the stdout and stderr output files, so we
1405 # can separate GHCi's output from the program's.
1406 f.write(':! echo ' + delimiter)
1407 f.write(':! echo 1>&2 ' + delimiter)
1408 # Set stdout to be line-buffered to match the compiled environment.
1409 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1410 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1411 # in the event of an exception as for the compiled program.
1412 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1413
1414 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1415 if os.path.exists(stdin):
1416 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1417
1418 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1419
1420 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1421 ).format(**locals())
1422
1423 if getTestOpts().cmd_wrapper != None:
1424 cmd = opts.cmd_wrapper(cmd);
1425
1426 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1427
1428 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1429
1430 # split the stdout into compilation/program output
1431 split_file(stdout, delimiter,
1432 in_testdir(name, 'comp.stdout'),
1433 in_testdir(name, 'run.stdout'))
1434 split_file(stderr, delimiter,
1435 in_testdir(name, 'comp.stderr'),
1436 in_testdir(name, 'run.stderr'))
1437
1438 # check the exit code
1439 if exit_code != getTestOpts().exit_code:
1440 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1441 dump_stdout(name)
1442 dump_stderr(name)
1443 return failBecause('bad exit code')
1444
1445 # ToDo: if the sub-shell was killed by ^C, then exit
1446
1447 if not (opts.ignore_stderr or stderr_ok(name, way)):
1448 return failBecause('bad stderr')
1449 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1450 return failBecause('bad stdout')
1451 else:
1452 return passed()
1453
1454 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1455 # See Note [Universal newlines].
1456 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1457 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1458 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1459 line = infile.readline()
1460 while re.sub('^\s*','',line) != delimiter and line != '':
1461 out1.write(line)
1462 line = infile.readline()
1463
1464 line = infile.readline()
1465 while line != '':
1466 out2.write(line)
1467 line = infile.readline()
1468
1469 # -----------------------------------------------------------------------------
1470 # Utils
1471 def get_compiler_flags():
1472 opts = getTestOpts()
1473
1474 flags = copy.copy(opts.compiler_always_flags)
1475
1476 flags.append(opts.extra_hc_opts)
1477
1478 if opts.outputdir != None:
1479 flags.extend(["-outputdir", opts.outputdir])
1480
1481 return flags
1482
1483 def stdout_ok(name, way):
1484 actual_stdout_file = add_suffix(name, 'run.stdout')
1485 expected_stdout_file = find_expected_file(name, 'stdout')
1486
1487 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1488
1489 check_stdout = getTestOpts().check_stdout
1490 if check_stdout:
1491 actual_stdout_path = in_testdir(actual_stdout_file)
1492 return check_stdout(actual_stdout_path, extra_norm)
1493
1494 return compare_outputs(way, 'stdout', extra_norm,
1495 expected_stdout_file, actual_stdout_file)
1496
1497 def dump_stdout( name ):
1498 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1499 str = f.read().strip()
1500 if str:
1501 print("Stdout (", name, "):")
1502 print(str)
1503
1504 def stderr_ok(name, way):
1505 actual_stderr_file = add_suffix(name, 'run.stderr')
1506 expected_stderr_file = find_expected_file(name, 'stderr')
1507
1508 return compare_outputs(way, 'stderr',
1509 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1510 expected_stderr_file, actual_stderr_file,
1511 whitespace_normaliser=normalise_whitespace)
1512
1513 def dump_stderr( name ):
1514 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1515 str = f.read().strip()
1516 if str:
1517 print("Stderr (", name, "):")
1518 print(str)
1519
1520 def read_no_crs(file):
1521 str = ''
1522 try:
1523 # See Note [Universal newlines].
1524 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1525 str = h.read()
1526 except Exception:
1527 # On Windows, if the program fails very early, it seems the
1528 # files stdout/stderr are redirected to may not get created
1529 pass
1530 return str
1531
1532 def write_file(file, str):
1533 # See Note [Universal newlines].
1534 with io.open(file, 'w', encoding='utf8', newline='') as h:
1535 h.write(str)
1536
1537 # Note [Universal newlines]
1538 #
1539 # We don't want to write any Windows style line endings ever, because
1540 # it would mean that `make accept` would touch every line of the file
1541 # when switching between Linux and Windows.
1542 #
1543 # Furthermore, when reading a file, it is convenient to translate all
1544 # Windows style endings to '\n', as it simplifies searching or massaging
1545 # the content.
1546 #
1547 # Solution: use `io.open` instead of `open`
1548 # * when reading: use newline=None to translate '\r\n' to '\n'
1549 # * when writing: use newline='' to not translate '\n' to '\r\n'
1550 #
1551 # See https://docs.python.org/2/library/io.html#io.open.
1552 #
1553 # This should work with both python2 and python3, and with both mingw*
1554 # as msys2 style Python.
1555 #
1556 # Do note that io.open returns unicode strings. So we have to specify
1557 # the expected encoding. But there is at least one file which is not
1558 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1559 # Another solution would be to open files in binary mode always, and
1560 # operate on bytes.
1561
1562 def check_hp_ok(name):
1563 opts = getTestOpts()
1564
1565 # do not qualify for hp2ps because we should be in the right directory
1566 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1567
1568 hp2psResult = runCmd(hp2psCmd)
1569
1570 actual_ps_path = in_testdir(name, 'ps')
1571
1572 if hp2psResult == 0:
1573 if os.path.exists(actual_ps_path):
1574 if gs_working:
1575 gsResult = runCmd(genGSCmd(actual_ps_path))
1576 if (gsResult == 0):
1577 return (True)
1578 else:
1579 print("hp2ps output for " + name + "is not valid PostScript")
1580 else: return (True) # assume postscript is valid without ghostscript
1581 else:
1582 print("hp2ps did not generate PostScript for " + name)
1583 return (False)
1584 else:
1585 print("hp2ps error when processing heap profile for " + name)
1586 return(False)
1587
1588 def check_prof_ok(name, way):
1589 expected_prof_file = find_expected_file(name, 'prof.sample')
1590 expected_prof_path = in_testdir(expected_prof_file)
1591
1592 # Check actual prof file only if we have an expected prof file to
1593 # compare it with.
1594 if not os.path.exists(expected_prof_path):
1595 return True
1596
1597 actual_prof_file = add_suffix(name, 'prof')
1598 actual_prof_path = in_testdir(actual_prof_file)
1599
1600 if not os.path.exists(actual_prof_path):
1601 print(actual_prof_path + " does not exist")
1602 return(False)
1603
1604 if os.path.getsize(actual_prof_path) == 0:
1605 print(actual_prof_path + " is empty")
1606 return(False)
1607
1608 return compare_outputs(way, 'prof', normalise_prof,
1609 expected_prof_file, actual_prof_file,
1610 whitespace_normaliser=normalise_whitespace)
1611
1612 # Compare expected output to actual output, and optionally accept the
1613 # new output. Returns true if output matched or was accepted, false
1614 # otherwise. See Note [Output comparison] for the meaning of the
1615 # normaliser and whitespace_normaliser parameters.
1616 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1617 whitespace_normaliser=lambda x:x):
1618
1619 expected_path = in_srcdir(expected_file)
1620 actual_path = in_testdir(actual_file)
1621
1622 if os.path.exists(expected_path):
1623 expected_str = normaliser(read_no_crs(expected_path))
1624 # Create the .normalised file in the testdir, not in the srcdir.
1625 expected_normalised_file = add_suffix(expected_file, 'normalised')
1626 expected_normalised_path = in_testdir(expected_normalised_file)
1627 else:
1628 expected_str = ''
1629 expected_normalised_path = '/dev/null'
1630
1631 actual_raw = read_no_crs(actual_path)
1632 actual_str = normaliser(actual_raw)
1633
1634 # See Note [Output comparison].
1635 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1636 return True
1637 else:
1638 if config.verbose >= 1 and _expect_pass(way):
1639 print('Actual ' + kind + ' output differs from expected:')
1640
1641 if expected_normalised_path != '/dev/null':
1642 write_file(expected_normalised_path, expected_str)
1643
1644 actual_normalised_path = add_suffix(actual_path, 'normalised')
1645 write_file(actual_normalised_path, actual_str)
1646
1647 if config.verbose >= 1 and _expect_pass(way):
1648 # See Note [Output comparison].
1649 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1650 actual_normalised_path),
1651 print_output=True)
1652
1653 # If for some reason there were no non-whitespace differences,
1654 # then do a full diff
1655 if r == 0:
1656 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1657 actual_normalised_path),
1658 print_output=True)
1659
1660 if config.accept and (getTestOpts().expect == 'fail' or
1661 way in getTestOpts().expect_fail_for):
1662 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1663 return False
1664 elif config.accept and actual_raw:
1665 if config.accept_platform:
1666 if_verbose(1, 'Accepting new output for platform "'
1667 + config.platform + '".')
1668 expected_path += '-' + config.platform
1669 elif config.accept_os:
1670 if_verbose(1, 'Accepting new output for os "'
1671 + config.os + '".')
1672 expected_path += '-' + config.os
1673 else:
1674 if_verbose(1, 'Accepting new output.')
1675
1676 write_file(expected_path, actual_raw)
1677 return True
1678 elif config.accept:
1679 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1680 os.remove(expected_path)
1681 return True
1682 else:
1683 return False
1684
1685 # Note [Output comparison]
1686 #
1687 # We do two types of output comparison:
1688 #
1689 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1690 # optional `whitespace_normaliser` to the expected and the actual
1691 # output, before comparing the two.
1692 #
1693 # 2. To show as a diff to the user when the test indeed failed. We apply
1694 # the same `normaliser` function to the outputs, to make the diff as
1695 # small as possible (only showing the actual problem). But we don't
1696 # apply the `whitespace_normaliser` here, because it might completely
1697 # squash all whitespace, making the diff unreadable. Instead we rely
1698 # on the `diff` program to ignore whitespace changes as much as
1699 # possible (#10152).
1700
1701 def normalise_whitespace( str ):
1702 # Merge contiguous whitespace characters into a single space.
1703 return ' '.join(str.split())
1704
1705 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1706
1707 def normalise_callstacks(s):
1708 opts = getTestOpts()
1709 def repl(matches):
1710 location = matches.group(1)
1711 location = normalise_slashes_(location)
1712 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1713 # Ignore line number differences in call stacks (#10834).
1714 s = re.sub(callSite_re, repl, s)
1715 # Ignore the change in how we identify implicit call-stacks
1716 s = s.replace('from ImplicitParams', 'from HasCallStack')
1717 if not opts.keep_prof_callstacks:
1718 # Don't output prof callstacks. Test output should be
1719 # independent from the WAY we run the test.
1720 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1721 return s
1722
1723 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1724
1725 def normalise_type_reps(str):
1726 """ Normalise out fingerprints from Typeable TyCon representations """
1727 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1728
1729 def normalise_errmsg( str ):
1730 """Normalise error-messages emitted via stderr"""
1731 # IBM AIX's `ld` is a bit chatty
1732 if opsys('aix'):
1733 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1734 # remove " error:" and lower-case " Warning:" to make patch for
1735 # trac issue #10021 smaller
1736 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1737 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1738 str = normalise_callstacks(str)
1739 str = normalise_type_reps(str)
1740
1741 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1742 # the colon is there because it appears in error messages; this
1743 # hacky solution is used in place of more sophisticated filename
1744 # mangling
1745 str = re.sub('([^\\s])\\.exe', '\\1', str)
1746
1747 # normalise slashes, minimise Windows/Unix filename differences
1748 str = re.sub('\\\\', '/', str)
1749
1750 # The inplace ghc's are called ghc-stage[123] to avoid filename
1751 # collisions, so we need to normalise that to just "ghc"
1752 str = re.sub('ghc-stage[123]', 'ghc', str)
1753
1754 # Error messages sometimes contain integer implementation package
1755 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1756
1757 # Error messages sometimes contain this blurb which can vary
1758 # spuriously depending upon build configuration (e.g. based on integer
1759 # backend)
1760 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1761 '...plus N instances involving out-of-scope types', str)
1762
1763 # Also filter out bullet characters. This is because bullets are used to
1764 # separate error sections, and tests shouldn't be sensitive to how the
1765 # the division happens.
1766 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1767 str = str.replace(bullet, '')
1768
1769 # Windows only, this is a bug in hsc2hs but it is preventing
1770 # stable output for the testsuite. See Trac #9775. For now we filter out this
1771 # warning message to get clean output.
1772 if config.msys:
1773 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1774 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1775
1776 return str
1777
1778 # normalise a .prof file, so that we can reasonably compare it against
1779 # a sample. This doesn't compare any of the actual profiling data,
1780 # only the shape of the profile and the number of entries.
1781 def normalise_prof (str):
1782 # strip everything up to the line beginning "COST CENTRE"
1783 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1784
1785 # strip results for CAFs, these tend to change unpredictably
1786 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1787
1788 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1789 # sometimes under MAIN.
1790 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1791
1792 # We have something like this:
1793 #
1794 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1795 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1796 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1797 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1798 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1799 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1800 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1801 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1802 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1803 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1804 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1805 #
1806 # then we remove all the specific profiling data, leaving only the cost
1807 # centre name, module, src, and entries, to end up with this: (modulo
1808 # whitespace between columns)
1809 #
1810 # MAIN MAIN <built-in> 0
1811 # readPrec Main Main_1.hs:7:13-16 1
1812 # readPrec Main Main_1.hs:4:13-16 1
1813 # == Main Main_1.hs:7:25-26 1
1814 # == Main Main_1.hs:4:25-26 1
1815 # showsPrec Main Main_1.hs:7:19-22 2
1816 # showsPrec Main Main_1.hs:4:19-22 2
1817 # readPrec Main Main_1.hs:7:13-16 0
1818 # readPrec Main Main_1.hs:4:13-16 0
1819
1820 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1821 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1822 # this works fine.
1823 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1824 '\\1 \\2 \\3 \\5\n', str)
1825 return str
1826
1827 def normalise_slashes_( str ):
1828 str = re.sub('\\\\', '/', str)
1829 str = re.sub('//', '/', str)
1830 return str
1831
1832 def normalise_exe_( str ):
1833 str = re.sub('\.exe', '', str)
1834 return str
1835
1836 def normalise_output( str ):
1837 # remove " error:" and lower-case " Warning:" to make patch for
1838 # trac issue #10021 smaller
1839 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1840 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1841 # Remove a .exe extension (for Windows)
1842 # This can occur in error messages generated by the program.
1843 str = re.sub('([^\\s])\\.exe', '\\1', str)
1844 str = normalise_callstacks(str)
1845 str = normalise_type_reps(str)
1846 return str
1847
1848 def normalise_asm( str ):
1849 lines = str.split('\n')
1850 # Only keep instructions and labels not starting with a dot.
1851 metadata = re.compile('^[ \t]*\\..*$')
1852 out = []
1853 for line in lines:
1854 # Drop metadata directives (e.g. ".type")
1855 if not metadata.match(line):
1856 line = re.sub('@plt', '', line)
1857 instr = line.lstrip().split()
1858 # Drop empty lines.
1859 if not instr:
1860 continue
1861 # Drop operands, except for call instructions.
1862 elif instr[0] == 'call':
1863 out.append(instr[0] + ' ' + instr[1])
1864 else:
1865 out.append(instr[0])
1866 out = '\n'.join(out)
1867 return out
1868
1869 def if_verbose( n, s ):
1870 if config.verbose >= n:
1871 print(s)
1872
1873 def dump_file(f):
1874 try:
1875 with io.open(f) as file:
1876 print(file.read())
1877 except Exception:
1878 print('')
1879
1880 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1881 timeout_prog = strip_quotes(config.timeout_prog)
1882 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1883
1884 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1885 cmd = cmd.format(**config.__dict__)
1886 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1887
1888 stdin_file = io.open(stdin, 'rb') if stdin else None
1889 stdout_buffer = b''
1890 stderr_buffer = b''
1891
1892 hStdErr = subprocess.PIPE
1893 if stderr is subprocess.STDOUT:
1894 hStdErr = subprocess.STDOUT
1895
1896 try:
1897 # cmd is a complex command in Bourne-shell syntax
1898 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1899 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1900 # to invoke the Bourne shell
1901
1902 r = subprocess.Popen([timeout_prog, timeout, cmd],
1903 stdin=stdin_file,
1904 stdout=subprocess.PIPE,
1905 stderr=hStdErr,
1906 env=ghc_env)
1907
1908 stdout_buffer, stderr_buffer = r.communicate()
1909 finally:
1910 if stdin_file:
1911 stdin_file.close()
1912 if config.verbose >= 1 and print_output:
1913 if stdout_buffer:
1914 sys.stdout.buffer.write(stdout_buffer)
1915 if stderr_buffer:
1916 sys.stderr.buffer.write(stderr_buffer)
1917
1918 if stdout:
1919 with io.open(stdout, 'wb') as f:
1920 f.write(stdout_buffer)
1921 if stderr:
1922 if stderr is not subprocess.STDOUT:
1923 with io.open(stderr, 'wb') as f:
1924 f.write(stderr_buffer)
1925
1926 if r.returncode == 98:
1927 # The python timeout program uses 98 to signal that ^C was pressed
1928 stopNow()
1929 if r.returncode == 99 and getTestOpts().exit_code != 99:
1930 # Only print a message when timeout killed the process unexpectedly.
1931 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1932 return r.returncode
1933
1934 # -----------------------------------------------------------------------------
1935 # checking if ghostscript is available for checking the output of hp2ps
1936
1937 def genGSCmd(psfile):
1938 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1939
1940 def gsNotWorking():
1941 global gs_working
1942 print("GhostScript not available for hp2ps tests")
1943
1944 global gs_working
1945 gs_working = False
1946 if config.have_profiling:
1947 if config.gs != '':
1948 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1949 if resultGood == 0:
1950 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1951 ' >/dev/null 2>&1')
1952 if resultBad != 0:
1953 print("GhostScript available for hp2ps tests")
1954 gs_working = True
1955 else:
1956 gsNotWorking();
1957 else:
1958 gsNotWorking();
1959 else:
1960 gsNotWorking();
1961
1962 def add_suffix( name, suffix ):
1963 if suffix == '':
1964 return name
1965 else:
1966 return name + '.' + suffix
1967
1968 def add_hs_lhs_suffix(name):
1969 if getTestOpts().c_src:
1970 return add_suffix(name, 'c')
1971 elif getTestOpts().cmm_src:
1972 return add_suffix(name, 'cmm')
1973 elif getTestOpts().objc_src:
1974 return add_suffix(name, 'm')
1975 elif getTestOpts().objcpp_src:
1976 return add_suffix(name, 'mm')
1977 elif getTestOpts().literate:
1978 return add_suffix(name, 'lhs')
1979 else:
1980 return add_suffix(name, 'hs')
1981
1982 def replace_suffix( name, suffix ):
1983 base, suf = os.path.splitext(name)
1984 return base + '.' + suffix
1985
1986 def in_testdir(name, suffix=''):
1987 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1988
1989 def in_srcdir(name, suffix=''):
1990 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1991
1992 # Finding the sample output. The filename is of the form
1993 #
1994 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1995 #
1996 def find_expected_file(name, suff):
1997 basename = add_suffix(name, suff)
1998
1999 files = [basename + ws + plat
2000 for plat in ['-' + config.platform, '-' + config.os, '']
2001 for ws in ['-ws-' + config.wordsize, '']]
2002
2003 for f in files:
2004 if os.path.exists(in_srcdir(f)):
2005 return f
2006
2007 return basename
2008
2009 if config.msys:
2010 import stat
2011 def cleanup():
2012 testdir = getTestOpts().testdir
2013 max_attempts = 5
2014 retries = max_attempts
2015 def on_error(function, path, excinfo):
2016 # At least one test (T11489) removes the write bit from a file it
2017 # produces. Windows refuses to delete read-only files with a
2018 # permission error. Try setting the write bit and try again.
2019 os.chmod(path, stat.S_IWRITE)
2020 function(path)
2021
2022 # On Windows we have to retry the delete a couple of times.
2023 # The reason for this is that a FileDelete command just marks a
2024 # file for deletion. The file is really only removed when the last
2025 # handle to the file is closed. Unfortunately there are a lot of
2026 # system services that can have a file temporarily opened using a shared
2027 # readonly lock, such as the built in AV and search indexer.
2028 #
2029 # We can't really guarantee that these are all off, so what we can do is
2030 # whenever after a rmtree the folder still exists to try again and wait a bit.
2031 #
2032 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2033 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2034 # still locked then abort the current test by throwing an exception, this so it won't fail
2035 # with an even more cryptic error.
2036 #
2037 # See Trac #13162
2038 exception = None
2039 while retries > 0 and os.path.exists(testdir):
2040 time.sleep((max_attempts-retries)*6)
2041 try:
2042 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2043 except Exception as e:
2044 exception = e
2045 retries -= 1
2046
2047 if retries == 0 and os.path.exists(testdir):
2048 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2049 % (testdir, exception))
2050 else:
2051 def cleanup():
2052 testdir = getTestOpts().testdir
2053 if os.path.exists(testdir):
2054 shutil.rmtree(testdir, ignore_errors=False)
2055
2056
2057 # -----------------------------------------------------------------------------
2058 # Return a list of all the files ending in '.T' below directories roots.
2059
2060 def findTFiles(roots):
2061 for root in roots:
2062 for path, dirs, files in os.walk(root, topdown=True):
2063 # Never pick up .T files in uncleaned .run directories.
2064 dirs[:] = [dir for dir in sorted(dirs)
2065 if not dir.endswith(testdir_suffix)]
2066 for filename in files:
2067 if filename.endswith('.T'):
2068 yield os.path.join(path, filename)
2069
2070 # -----------------------------------------------------------------------------
2071 # Output a test summary to the specified file object
2072
2073 def summary(t, file, short=False, color=False):
2074
2075 file.write('\n')
2076 printUnexpectedTests(file,
2077 [t.unexpected_passes, t.unexpected_failures,
2078 t.unexpected_stat_failures, t.framework_failures])
2079
2080 if short:
2081 # Only print the list of unexpected tests above.
2082 return
2083
2084 colorize = lambda s: s
2085 if color:
2086 if len(t.unexpected_failures) > 0 or \
2087 len(t.unexpected_stat_failures) > 0 or \
2088 len(t.framework_failures) > 0:
2089 colorize = str_fail
2090 else:
2091 colorize = str_pass
2092
2093 file.write(colorize('SUMMARY') + ' for test run started at '
2094 + time.strftime("%c %Z", t.start_time) + '\n'
2095 + str(datetime.timedelta(seconds=
2096 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2097 + ' spent to go through\n'
2098 + repr(t.total_tests).rjust(8)
2099 + ' total tests, which gave rise to\n'
2100 + repr(t.total_test_cases).rjust(8)
2101 + ' test cases, of which\n'
2102 + repr(t.n_tests_skipped).rjust(8)
2103 + ' were skipped\n'
2104 + '\n'
2105 + repr(len(t.missing_libs)).rjust(8)
2106 + ' had missing libraries\n'
2107 + repr(t.n_expected_passes).rjust(8)
2108 + ' expected passes\n'
2109 + repr(t.n_expected_failures).rjust(8)
2110 + ' expected failures\n'
2111 + '\n'
2112 + repr(len(t.framework_failures)).rjust(8)
2113 + ' caused framework failures\n'
2114 + repr(len(t.framework_warnings)).rjust(8)
2115 + ' caused framework warnings\n'
2116 + repr(len(t.unexpected_passes)).rjust(8)
2117 + ' unexpected passes\n'
2118 + repr(len(t.unexpected_failures)).rjust(8)
2119 + ' unexpected failures\n'
2120 + repr(len(t.unexpected_stat_failures)).rjust(8)
2121 + ' unexpected stat failures\n'
2122 + '\n')
2123
2124 if t.unexpected_passes:
2125 file.write('Unexpected passes:\n')
2126 printTestInfosSummary(file, t.unexpected_passes)
2127
2128 if t.unexpected_failures:
2129 file.write('Unexpected failures:\n')
2130 printTestInfosSummary(file, t.unexpected_failures)
2131
2132 if t.unexpected_stat_failures:
2133 file.write('Unexpected stat failures:\n')
2134 printTestInfosSummary(file, t.unexpected_stat_failures)
2135
2136 if t.framework_failures:
2137 file.write('Framework failures:\n')
2138 printTestInfosSummary(file, t.framework_failures)
2139
2140 if t.framework_warnings:
2141 file.write('Framework warnings:\n')
2142 printTestInfosSummary(file, t.framework_warnings)
2143
2144 if stopping():
2145 file.write('WARNING: Testsuite run was terminated early\n')
2146
2147 def printUnexpectedTests(file, testInfoss):
2148 unexpected = set(name for testInfos in testInfoss
2149 for (_, name, _, _) in testInfos
2150 if not name.endswith('.T'))
2151 if unexpected:
2152 file.write('Unexpected results from:\n')
2153 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2154 file.write('\n')
2155
2156 def printTestInfosSummary(file, testInfos):
2157 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2158 for (directory, name, reason, way) in testInfos:
2159 directory = directory.ljust(maxDirLen)
2160 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2161 file.write('\n')
2162
2163 def modify_lines(s, f):
2164 s = '\n'.join([f(l) for l in s.splitlines()])
2165 if s and s[-1] != '\n':
2166 # Prevent '\ No newline at end of file' warnings when diffing.
2167 s += '\n'
2168 return s