Revert "Batch merge"
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return bool(opts.stats_range_fields)
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 # -----
193
194 def expect_fail_for( ways ):
195 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
196
197 def _expect_fail_for( name, opts, ways ):
198 opts.expect_fail_for = ways
199
200 def expect_broken( bug ):
201 # This test is a expected not to work due to the indicated trac bug
202 # number.
203 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
204
205 def _expect_broken( name, opts, bug ):
206 record_broken(name, opts, bug)
207 opts.expect = 'fail';
208
209 def expect_broken_for( bug, ways ):
210 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
211
212 def _expect_broken_for( name, opts, bug, ways ):
213 record_broken(name, opts, bug)
214 opts.expect_fail_for = ways
215
216 def record_broken(name, opts, bug):
217 me = (bug, opts.testdir, name)
218 if not me in brokens:
219 brokens.append(me)
220
221 def _expect_pass(way):
222 # Helper function. Not intended for use in .T files.
223 opts = getTestOpts()
224 return opts.expect == 'pass' and way not in opts.expect_fail_for
225
226 # -----
227
228 def omit_ways( ways ):
229 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
230
231 def _omit_ways( name, opts, ways ):
232 opts.omit_ways = ways
233
234 # -----
235
236 def only_ways( ways ):
237 return lambda name, opts, w=ways: _only_ways( name, opts, w )
238
239 def _only_ways( name, opts, ways ):
240 opts.only_ways = ways
241
242 # -----
243
244 def extra_ways( ways ):
245 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
246
247 def _extra_ways( name, opts, ways ):
248 opts.extra_ways = ways
249
250 # -----
251
252 def set_stdin( file ):
253 return lambda name, opts, f=file: _set_stdin(name, opts, f);
254
255 def _set_stdin( name, opts, f ):
256 opts.stdin = f
257
258 # -----
259
260 def exit_code( val ):
261 return lambda name, opts, v=val: _exit_code(name, opts, v);
262
263 def _exit_code( name, opts, v ):
264 opts.exit_code = v
265
266 def signal_exit_code( val ):
267 if opsys('solaris2'):
268 return exit_code( val )
269 else:
270 # When application running on Linux receives fatal error
271 # signal, then its exit code is encoded as 128 + signal
272 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
273 # I assume that Mac OS X behaves in the same way at least Mac
274 # OS X builder behavior suggests this.
275 return exit_code( val+128 )
276
277 # -----
278
279 def compile_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
281
282 def _compile_timeout_multiplier( name, opts, v ):
283 opts.compile_timeout_multiplier = v
284
285 def run_timeout_multiplier( val ):
286 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
287
288 def _run_timeout_multiplier( name, opts, v ):
289 opts.run_timeout_multiplier = v
290
291 # -----
292
293 def extra_run_opts( val ):
294 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
295
296 def _extra_run_opts( name, opts, v ):
297 opts.extra_run_opts = v
298
299 # -----
300
301 def extra_hc_opts( val ):
302 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
303
304 def _extra_hc_opts( name, opts, v ):
305 opts.extra_hc_opts = v
306
307 # -----
308
309 def extra_clean( files ):
310 # TODO. Remove all calls to extra_clean.
311 return lambda _name, _opts: None
312
313 def extra_files(files):
314 return lambda name, opts: _extra_files(name, opts, files)
315
316 def _extra_files(name, opts, files):
317 opts.extra_files.extend(files)
318
319 # -----
320
321 # Defaults to "test everything, and only break on extreme cases"
322 #
323 # The inputs to this function are slightly interesting:
324 # metric can be either:
325 # - 'all', in which case all 3 possible metrics are collected and compared.
326 # - The specific metric one wants to use in the test.
327 # - A list of the metrics one wants to use in the test.
328 #
329 # Deviation defaults to 20% because the goal is correctness over performance.
330 # The testsuite should avoid breaking when there is not an actual error.
331 # Instead, the testsuite should notify of regressions in a non-breaking manner.
332 #
333 # collect_compiler_stats is used when the metrics collected are about the compiler.
334 # collect_stats is used in the majority case when the metrics to be collected
335 # are about the performance of the runtime code generated by the compiler.
336 def collect_compiler_stats(metric='all',deviation=20):
337 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
338
339 def collect_stats(metric='all', deviation=20):
340 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
341
342 def testing_metrics():
343 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
344
345 # This is an internal function that is used only in the implementation.
346 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
347 # If the boolean is set to true, it indicates that this test is one that
348 # measures the performance numbers of the compiler.
349 # As this is a fairly rare case in the testsuite, it defaults to false to
350 # indicate that it is a 'normal' performance test.
351 def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
352 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
353 failBecause('This test has an invalid name.')
354
355 tests = Perf.get_perf_stats('HEAD^')
356
357 # Might have multiple metrics being measured for a single test.
358 test = [t for t in tests if t.test == name]
359
360 if tests == [] or test == []:
361 # There are no prior metrics for this test.
362 if isinstance(metric, str):
363 if metric == 'all':
364 for field in testing_metrics():
365 opts.stats_range_fields[field] = None
366 else:
367 opts.stats_range_fields[metric] = None
368 if isinstance(metric, list):
369 for field in metric:
370 opts.stats_range_fields[field] = None
371
372 return
373
374 if is_compiler_stats_test:
375 opts.is_compiler_stats_test = True
376
377 # Compiler performance numbers change when debugging is on, making the results
378 # useless and confusing. Therefore, skip if debugging is on.
379 if config.compiler_debugged and is_compiler_stats_test:
380 opts.skip = 1
381
382 # get the average value of the given metric from test
383 def get_avg_val(metric_2):
384 metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
385 return sum(metric_2_metrics) / len(metric_2_metrics)
386
387 # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
388 if isinstance(metric, str):
389 if metric == 'all':
390 for field in testing_metrics():
391 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
392 return
393 else:
394 opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
395 return
396
397 if isinstance(metric, list):
398 for field in metric:
399 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
400
401 # -----
402
403 def when(b, f):
404 # When list_brokens is on, we want to see all expect_broken calls,
405 # so we always do f
406 if b or config.list_broken:
407 return f
408 else:
409 return normal
410
411 def unless(b, f):
412 return when(not b, f)
413
414 def doing_ghci():
415 return 'ghci' in config.run_ways
416
417 def ghc_dynamic():
418 return config.ghc_dynamic
419
420 def fast():
421 return config.speed == 2
422
423 def platform( plat ):
424 return config.platform == plat
425
426 def opsys( os ):
427 return config.os == os
428
429 def arch( arch ):
430 return config.arch == arch
431
432 def wordsize( ws ):
433 return config.wordsize == str(ws)
434
435 def msys( ):
436 return config.msys
437
438 def cygwin( ):
439 return config.cygwin
440
441 def have_vanilla( ):
442 return config.have_vanilla
443
444 def have_ncg( ):
445 return config.have_ncg
446
447 def have_dynamic( ):
448 return config.have_dynamic
449
450 def have_profiling( ):
451 return config.have_profiling
452
453 def in_tree_compiler( ):
454 return config.in_tree_compiler
455
456 def unregisterised( ):
457 return config.unregisterised
458
459 def compiler_profiled( ):
460 return config.compiler_profiled
461
462 def compiler_debugged( ):
463 return config.compiler_debugged
464
465 def have_gdb( ):
466 return config.have_gdb
467
468 def have_readelf( ):
469 return config.have_readelf
470
471 # ---
472
473 def high_memory_usage(name, opts):
474 opts.alone = True
475
476 # If a test is for a multi-CPU race, then running the test alone
477 # increases the chance that we'll actually see it.
478 def multi_cpu_race(name, opts):
479 opts.alone = True
480
481 # ---
482 def literate( name, opts ):
483 opts.literate = True
484
485 def c_src( name, opts ):
486 opts.c_src = True
487
488 def objc_src( name, opts ):
489 opts.objc_src = True
490
491 def objcpp_src( name, opts ):
492 opts.objcpp_src = True
493
494 def cmm_src( name, opts ):
495 opts.cmm_src = True
496
497 def outputdir( odir ):
498 return lambda name, opts, d=odir: _outputdir(name, opts, d)
499
500 def _outputdir( name, opts, odir ):
501 opts.outputdir = odir;
502
503 # ----
504
505 def pre_cmd( cmd ):
506 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
507
508 def _pre_cmd( name, opts, cmd ):
509 opts.pre_cmd = cmd
510
511 # ----
512
513 def cmd_prefix( prefix ):
514 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
515
516 def _cmd_prefix( name, opts, prefix ):
517 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
518
519 # ----
520
521 def cmd_wrapper( fun ):
522 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
523
524 def _cmd_wrapper( name, opts, fun ):
525 opts.cmd_wrapper = fun
526
527 # ----
528
529 def compile_cmd_prefix( prefix ):
530 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
531
532 def _compile_cmd_prefix( name, opts, prefix ):
533 opts.compile_cmd_prefix = prefix
534
535 # ----
536
537 def check_stdout( f ):
538 return lambda name, opts, f=f: _check_stdout(name, opts, f)
539
540 def _check_stdout( name, opts, f ):
541 opts.check_stdout = f
542
543 def no_check_hp(name, opts):
544 opts.check_hp = False
545
546 # ----
547
548 def filter_stdout_lines( regex ):
549 """ Filter lines of stdout with the given regular expression """
550 def f( name, opts ):
551 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
552 return f
553
554 def normalise_slashes( name, opts ):
555 _normalise_fun(name, opts, normalise_slashes_)
556
557 def normalise_exe( name, opts ):
558 _normalise_fun(name, opts, normalise_exe_)
559
560 def normalise_fun( *fs ):
561 return lambda name, opts: _normalise_fun(name, opts, fs)
562
563 def _normalise_fun( name, opts, *fs ):
564 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
565
566 def normalise_errmsg_fun( *fs ):
567 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
568
569 def _normalise_errmsg_fun( name, opts, *fs ):
570 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
571
572 def check_errmsg(needle):
573 def norm(str):
574 if needle in str:
575 return "%s contained in -ddump-simpl\n" % needle
576 else:
577 return "%s not contained in -ddump-simpl\n" % needle
578 return normalise_errmsg_fun(norm)
579
580 def grep_errmsg(needle):
581 def norm(str):
582 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
583 return normalise_errmsg_fun(norm)
584
585 def normalise_whitespace_fun(f):
586 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
587
588 def _normalise_whitespace_fun(name, opts, f):
589 opts.whitespace_normaliser = f
590
591 def normalise_version_( *pkgs ):
592 def normalise_version__( str ):
593 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
594 '\\1-<VERSION>', str)
595 return normalise_version__
596
597 def normalise_version( *pkgs ):
598 def normalise_version__( name, opts ):
599 _normalise_fun(name, opts, normalise_version_(*pkgs))
600 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
601 return normalise_version__
602
603 def normalise_drive_letter(name, opts):
604 # Windows only. Change D:\\ to C:\\.
605 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
606
607 def keep_prof_callstacks(name, opts):
608 """Keep profiling callstacks.
609
610 Use together with `only_ways(prof_ways)`.
611 """
612 opts.keep_prof_callstacks = True
613
614 def join_normalisers(*a):
615 """
616 Compose functions, flattening sequences.
617
618 join_normalisers(f1,[f2,f3],f4)
619
620 is the same as
621
622 lambda x: f1(f2(f3(f4(x))))
623 """
624
625 def flatten(l):
626 """
627 Taken from http://stackoverflow.com/a/2158532/946226
628 """
629 for el in l:
630 if (isinstance(el, collections.Iterable)
631 and not isinstance(el, (bytes, str))):
632 for sub in flatten(el):
633 yield sub
634 else:
635 yield el
636
637 a = flatten(a)
638
639 fn = lambda x:x # identity function
640 for f in a:
641 assert callable(f)
642 fn = lambda x,f=f,fn=fn: fn(f(x))
643 return fn
644
645 # ----
646 # Function for composing two opt-fns together
647
648 def executeSetups(fs, name, opts):
649 if type(fs) is list:
650 # If we have a list of setups, then execute each one
651 for f in fs:
652 executeSetups(f, name, opts)
653 else:
654 # fs is a single function, so just apply it
655 fs(name, opts)
656
657 # -----------------------------------------------------------------------------
658 # The current directory of tests
659
660 def newTestDir(tempdir, dir):
661
662 global thisdir_settings
663 # reset the options for this test directory
664 def settings(name, opts, tempdir=tempdir, dir=dir):
665 return _newTestDir(name, opts, tempdir, dir)
666 thisdir_settings = settings
667
668 # Should be equal to entry in toplevel .gitignore.
669 testdir_suffix = '.run'
670
671 def _newTestDir(name, opts, tempdir, dir):
672 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
673 opts.srcdir = os.path.join(os.getcwd(), dir)
674 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
675 opts.compiler_always_flags = config.compiler_always_flags
676
677 # -----------------------------------------------------------------------------
678 # Actually doing tests
679
680 parallelTests = []
681 aloneTests = []
682 allTestNames = set([])
683
684 def runTest(watcher, opts, name, func, args):
685 if config.use_threads:
686 pool_sema.acquire()
687 t = threading.Thread(target=test_common_thread,
688 name=name,
689 args=(watcher, name, opts, func, args))
690 t.daemon = False
691 t.start()
692 else:
693 test_common_work(watcher, name, opts, func, args)
694
695 # name :: String
696 # setup :: [TestOpt] -> IO ()
697 def test(name, setup, func, args):
698 global aloneTests
699 global parallelTests
700 global allTestNames
701 global thisdir_settings
702 if name in allTestNames:
703 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
704 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
705 framework_fail(name, 'bad_name', 'This test has an invalid name')
706
707 if config.run_only_some_tests:
708 if name not in config.only:
709 return
710 else:
711 # Note [Mutating config.only]
712 # config.only is initially the set of tests requested by
713 # the user (via 'make TEST='). We then remove all tests that
714 # we've already seen (in .T files), so that we can later
715 # report on any tests we couldn't find and error out.
716 config.only.remove(name)
717
718 # Make a deep copy of the default_testopts, as we need our own copy
719 # of any dictionaries etc inside it. Otherwise, if one test modifies
720 # them, all tests will see the modified version!
721 myTestOpts = copy.deepcopy(default_testopts)
722
723 executeSetups([thisdir_settings, setup], name, myTestOpts)
724
725 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
726 if myTestOpts.alone:
727 aloneTests.append(thisTest)
728 else:
729 parallelTests.append(thisTest)
730 allTestNames.add(name)
731
732 if config.use_threads:
733 def test_common_thread(watcher, name, opts, func, args):
734 try:
735 test_common_work(watcher, name, opts, func, args)
736 finally:
737 pool_sema.release()
738
739 def get_package_cache_timestamp():
740 if config.package_conf_cache_file == '':
741 return 0.0
742 else:
743 try:
744 return os.stat(config.package_conf_cache_file).st_mtime
745 except:
746 return 0.0
747
748 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
749
750 def test_common_work(watcher, name, opts, func, args):
751 try:
752 t.total_tests += 1
753 setLocalTestOpts(opts)
754
755 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
756
757 # All the ways we might run this test
758 if func == compile or func == multimod_compile:
759 all_ways = config.compile_ways
760 elif func == compile_and_run or func == multimod_compile_and_run:
761 all_ways = config.run_ways
762 elif func == ghci_script:
763 if 'ghci' in config.run_ways:
764 all_ways = ['ghci']
765 else:
766 all_ways = []
767 else:
768 all_ways = ['normal']
769
770 # A test itself can request extra ways by setting opts.extra_ways
771 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
772
773 t.total_test_cases += len(all_ways)
774
775 ok_way = lambda way: \
776 not getTestOpts().skip \
777 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
778 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
779 and (not (config.skip_perf_tests and isStatsTest())) \
780 and (not (config.only_perf_tests and not isStatsTest())) \
781 and way not in getTestOpts().omit_ways
782
783 # Which ways we are asked to skip
784 do_ways = list(filter (ok_way,all_ways))
785
786 # Only run all ways in slow mode.
787 # See Note [validate and testsuite speed] in toplevel Makefile.
788 if config.accept:
789 # Only ever run one way
790 do_ways = do_ways[:1]
791 elif config.speed > 0:
792 # However, if we EXPLICITLY asked for a way (with extra_ways)
793 # please test it!
794 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
795 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
796 do_ways = other_ways[:1] + explicit_ways
797
798 # Find all files in the source directory that this test
799 # depends on. Do this only once for all ways.
800 # Generously add all filenames that start with the name of
801 # the test to this set, as a convenience to test authors.
802 # They will have to use the `extra_files` setup function to
803 # specify all other files that their test depends on (but
804 # this seems to be necessary for only about 10% of all
805 # tests).
806 files = set(f for f in os.listdir(opts.srcdir)
807 if f.startswith(name) and not f == name and
808 not f.endswith(testdir_suffix) and
809 not os.path.splitext(f)[1] in do_not_copy)
810 for filename in (opts.extra_files + extra_src_files.get(name, [])):
811 if filename.startswith('/'):
812 framework_fail(name, 'whole-test',
813 'no absolute paths in extra_files please: ' + filename)
814
815 elif '*' in filename:
816 # Don't use wildcards in extra_files too much, as
817 # globbing is slow.
818 files.update((os.path.relpath(f, opts.srcdir)
819 for f in glob.iglob(in_srcdir(filename))))
820
821 elif filename:
822 files.add(filename)
823
824 else:
825 framework_fail(name, 'whole-test', 'extra_file is empty string')
826
827 # Run the required tests...
828 for way in do_ways:
829 if stopping():
830 break
831 try:
832 do_test(name, way, func, args, files)
833 except KeyboardInterrupt:
834 stopNow()
835 except Exception as e:
836 framework_fail(name, way, str(e))
837 traceback.print_exc()
838
839 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
840
841 if config.cleanup and do_ways:
842 try:
843 cleanup()
844 except Exception as e:
845 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
846
847 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
848
849 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
850 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
851
852 except Exception as e:
853 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
854 finally:
855 watcher.notify()
856
857 def do_test(name, way, func, args, files):
858 opts = getTestOpts()
859
860 full_name = name + '(' + way + ')'
861
862 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
863 full_name, t.total_tests, len(allTestNames),
864 [len(t.unexpected_passes),
865 len(t.unexpected_failures),
866 len(t.framework_failures)]))
867
868 # Clean up prior to the test, so that we can't spuriously conclude
869 # that it passed on the basis of old run outputs.
870 cleanup()
871 os.makedirs(opts.testdir)
872
873 # Link all source files for this test into a new directory in
874 # /tmp, and run the test in that directory. This makes it
875 # possible to run tests in parallel, without modification, that
876 # would otherwise (accidentally) write to the same output file.
877 # It also makes it easier to keep the testsuite clean.
878
879 for extra_file in files:
880 src = in_srcdir(extra_file)
881 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
882 if os.path.isfile(src):
883 link_or_copy_file(src, dst)
884 elif os.path.isdir(src):
885 if os.path.exists(dst):
886 shutil.rmtree(dst)
887 os.mkdir(dst)
888 lndir(src, dst)
889 else:
890 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
891 # When using a ghc built without haddock support, .t
892 # files are rightfully missing. Don't
893 # framework_fail. Test will be skipped later.
894 pass
895 else:
896 framework_fail(name, way,
897 'extra_file does not exist: ' + extra_file)
898
899 if func.__name__ == 'run_command' or opts.pre_cmd:
900 # When running 'MAKE' make sure 'TOP' still points to the
901 # root of the testsuite.
902 src_makefile = in_srcdir('Makefile')
903 dst_makefile = in_testdir('Makefile')
904 if os.path.exists(src_makefile):
905 with io.open(src_makefile, 'r', encoding='utf8') as src:
906 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
907 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
908 dst.write(makefile)
909
910 if opts.pre_cmd:
911 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
912 stderr = subprocess.STDOUT,
913 print_output = config.verbose >= 3)
914
915 # If user used expect_broken then don't record failures of pre_cmd
916 if exit_code != 0 and opts.expect not in ['fail']:
917 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
918 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
919
920 result = func(*[name,way] + args)
921
922 if opts.expect not in ['pass', 'fail', 'missing-lib']:
923 framework_fail(name, way, 'bad expected ' + opts.expect)
924
925 try:
926 passFail = result['passFail']
927 except (KeyError, TypeError):
928 passFail = 'No passFail found'
929
930 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
931
932 if passFail == 'pass':
933 if _expect_pass(way):
934 t.expected_passes.append((directory, name, way))
935 t.n_expected_passes += 1
936 else:
937 if_verbose(1, '*** unexpected pass for %s' % full_name)
938 t.unexpected_passes.append((directory, name, 'unexpected', way))
939 elif passFail == 'fail':
940 if _expect_pass(way):
941 reason = result['reason']
942 tag = result.get('tag')
943 if tag == 'stat':
944 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
945 t.unexpected_stat_failures.append((directory, name, reason, way))
946 else:
947 if_verbose(1, '*** unexpected failure for %s' % full_name)
948 t.unexpected_failures.append((directory, name, reason, way))
949 else:
950 if opts.expect == 'missing-lib':
951 t.missing_libs.append((directory, name, 'missing-lib', way))
952 else:
953 t.n_expected_failures += 1
954 else:
955 framework_fail(name, way, 'bad result ' + passFail)
956
957 # Make is often invoked with -s, which means if it fails, we get
958 # no feedback at all. This is annoying. So let's remove the option
959 # if found and instead have the testsuite decide on what to do
960 # with the output.
961 def override_options(pre_cmd):
962 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
963 return pre_cmd.replace('-s' , '') \
964 .replace('--silent', '') \
965 .replace('--quiet' , '')
966
967 return pre_cmd
968
969 def framework_fail(name, way, reason):
970 opts = getTestOpts()
971 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
972 full_name = name + '(' + way + ')'
973 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
974 t.framework_failures.append((directory, name, way, reason))
975
976 def framework_warn(name, way, reason):
977 opts = getTestOpts()
978 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
979 full_name = name + '(' + way + ')'
980 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
981 t.framework_warnings.append((directory, name, way, reason))
982
983 def badResult(result):
984 try:
985 if result['passFail'] == 'pass':
986 return False
987 return True
988 except (KeyError, TypeError):
989 return True
990
991 # -----------------------------------------------------------------------------
992 # Generic command tests
993
994 # A generic command test is expected to run and exit successfully.
995 #
996 # The expected exit code can be changed via exit_code() as normal, and
997 # the expected stdout/stderr are stored in <testname>.stdout and
998 # <testname>.stderr. The output of the command can be ignored
999 # altogether by using the setup function ignore_stdout instead of
1000 # run_command.
1001
1002 def run_command( name, way, cmd ):
1003 return simple_run( name, '', override_options(cmd), '' )
1004
1005 # -----------------------------------------------------------------------------
1006 # GHCi tests
1007
1008 def ghci_script( name, way, script):
1009 flags = ' '.join(get_compiler_flags())
1010 way_flags = ' '.join(config.way_flags[way])
1011
1012 # We pass HC and HC_OPTS as environment variables, so that the
1013 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1014 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1015 ).format(flags=flags, way_flags=way_flags)
1016 # NB: put way_flags before flags so that flags in all.T can overrie others
1017
1018 getTestOpts().stdin = script
1019 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1020
1021 # -----------------------------------------------------------------------------
1022 # Compile-only tests
1023
1024 def compile( name, way, extra_hc_opts ):
1025 return do_compile( name, way, 0, '', [], extra_hc_opts )
1026
1027 def compile_fail( name, way, extra_hc_opts ):
1028 return do_compile( name, way, 1, '', [], extra_hc_opts )
1029
1030 def backpack_typecheck( name, way, extra_hc_opts ):
1031 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1032
1033 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1034 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1035
1036 def backpack_compile( name, way, extra_hc_opts ):
1037 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1038
1039 def backpack_compile_fail( name, way, extra_hc_opts ):
1040 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1041
1042 def backpack_run( name, way, extra_hc_opts ):
1043 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1044
1045 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1046 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1047
1048 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1049 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1050
1051 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1052 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1053
1054 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1055 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1056
1057 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1058 # print 'Compile only, extra args = ', extra_hc_opts
1059
1060 result = extras_build( way, extra_mods, extra_hc_opts )
1061 if badResult(result):
1062 return result
1063 extra_hc_opts = result['hc_opts']
1064
1065 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1066
1067 if badResult(result):
1068 return result
1069
1070 # the actual stderr should always match the expected, regardless
1071 # of whether we expected the compilation to fail or not (successful
1072 # compilations may generate warnings).
1073
1074 expected_stderr_file = find_expected_file(name, 'stderr')
1075 actual_stderr_file = add_suffix(name, 'comp.stderr')
1076
1077 if not compare_outputs(way, 'stderr',
1078 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1079 normalise_errmsg),
1080 expected_stderr_file, actual_stderr_file,
1081 whitespace_normaliser=getattr(getTestOpts(),
1082 "whitespace_normaliser",
1083 normalise_whitespace)):
1084 return failBecause('stderr mismatch')
1085
1086 # no problems found, this test passed
1087 return passed()
1088
1089 def compile_cmp_asm( name, way, extra_hc_opts ):
1090 print('Compile only, extra args = ', extra_hc_opts)
1091 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1092
1093 if badResult(result):
1094 return result
1095
1096 # the actual stderr should always match the expected, regardless
1097 # of whether we expected the compilation to fail or not (successful
1098 # compilations may generate warnings).
1099
1100 expected_asm_file = find_expected_file(name, 'asm')
1101 actual_asm_file = add_suffix(name, 's')
1102
1103 if not compare_outputs(way, 'asm',
1104 join_normalisers(normalise_errmsg, normalise_asm),
1105 expected_asm_file, actual_asm_file):
1106 return failBecause('asm mismatch')
1107
1108 # no problems found, this test passed
1109 return passed()
1110
1111 # -----------------------------------------------------------------------------
1112 # Compile-and-run tests
1113
1114 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1115 # print 'Compile and run, extra args = ', extra_hc_opts
1116
1117 result = extras_build( way, extra_mods, extra_hc_opts )
1118 if badResult(result):
1119 return result
1120 extra_hc_opts = result['hc_opts']
1121
1122 if way.startswith('ghci'): # interpreted...
1123 return interpreter_run(name, way, extra_hc_opts, top_mod)
1124 else: # compiled...
1125 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1126 if badResult(result):
1127 return result
1128
1129 cmd = './' + name;
1130
1131 # we don't check the compiler's stderr for a compile-and-run test
1132 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1133
1134 def compile_and_run( name, way, extra_hc_opts ):
1135 return compile_and_run__( name, way, '', [], extra_hc_opts)
1136
1137 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1138 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1139
1140 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1141 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1142
1143 def stats( name, way, stats_file ):
1144 opts = getTestOpts()
1145 return check_stats(name, way, stats_file, opts.stats_range_fields)
1146
1147 def metric_dict(name, way, metric, value):
1148 return Perf.PerfStat(
1149 test_env = config.test_env,
1150 test = name,
1151 way = way,
1152 metric = metric,
1153 value = value)
1154
1155 # -----------------------------------------------------------------------------
1156 # Check test stats. This prints the results for the user.
1157 # name: name of the test.
1158 # way: the way.
1159 # stats_file: the path of the stats_file containing the stats for the test.
1160 # range_fields
1161 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1162 # This prints the results for the user.
1163 def check_stats(name, way, stats_file, range_fields):
1164 result = passed()
1165 if range_fields:
1166 try:
1167 f = open(in_testdir(stats_file))
1168 except IOError as e:
1169 return failBecause(str(e))
1170 stats_file_contents = f.read()
1171 f.close()
1172
1173 for (metric, range_val_dev) in range_fields.items():
1174 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1175 if field_match == None:
1176 print('Failed to find metric: ', metric)
1177 metric_result = failBecause('no such stats metric')
1178 else:
1179 actual_val = int(field_match.group(1))
1180
1181 # Store the metric so it can later be stored in a git note.
1182 perf_stat = metric_dict(name, way, metric, actual_val)
1183 change = None
1184
1185 # If this is the first time running the benchmark, then pass.
1186 if range_val_dev == None:
1187 metric_result = passed()
1188 change = MetricChange.NewMetric
1189 else:
1190 (expected_val, tolerance_dev) = range_val_dev
1191 (change, metric_result) = Perf.check_stats_change(
1192 perf_stat,
1193 expected_val,
1194 tolerance_dev,
1195 config.allowed_perf_changes,
1196 config.verbose >= 4)
1197 t.metrics.append((change, perf_stat))
1198
1199 # If any metric fails then the test fails.
1200 # Note, the remaining metrics are still run so that
1201 # a complete list of changes can be presented to the user.
1202 if metric_result['passFail'] == 'fail':
1203 result = metric_result
1204
1205 return result
1206
1207 # -----------------------------------------------------------------------------
1208 # Build a single-module program
1209
1210 def extras_build( way, extra_mods, extra_hc_opts ):
1211 for mod, opts in extra_mods:
1212 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1213 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1214 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1215 if badResult(result):
1216 return result
1217
1218 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1219
1220 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1221 opts = getTestOpts()
1222
1223 # Redirect stdout and stderr to the same file
1224 stdout = in_testdir(name, 'comp.stderr')
1225 stderr = subprocess.STDOUT
1226
1227 if top_mod != '':
1228 srcname = top_mod
1229 elif addsuf:
1230 if backpack:
1231 srcname = add_suffix(name, 'bkp')
1232 else:
1233 srcname = add_hs_lhs_suffix(name)
1234 else:
1235 srcname = name
1236
1237 if top_mod != '':
1238 to_do = '--make '
1239 if link:
1240 to_do = to_do + '-o ' + name
1241 elif backpack:
1242 if link:
1243 to_do = '-o ' + name + ' '
1244 else:
1245 to_do = ''
1246 to_do = to_do + '--backpack '
1247 elif link:
1248 to_do = '-o ' + name
1249 else:
1250 to_do = '-c' # just compile
1251
1252 stats_file = name + '.comp.stats'
1253 if isCompilerStatsTest():
1254 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1255 if backpack:
1256 extra_hc_opts += ' -outputdir ' + name + '.out'
1257
1258 # Required by GHC 7.3+, harmless for earlier versions:
1259 if (getTestOpts().c_src or
1260 getTestOpts().objc_src or
1261 getTestOpts().objcpp_src or
1262 getTestOpts().cmm_src):
1263 extra_hc_opts += ' -no-hs-main '
1264
1265 if getTestOpts().compile_cmd_prefix == '':
1266 cmd_prefix = ''
1267 else:
1268 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1269
1270 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1271
1272 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1273 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1274 ).format(**locals())
1275
1276 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1277
1278 if exit_code != 0 and not should_fail:
1279 if config.verbose >= 1 and _expect_pass(way):
1280 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1281 actual_stderr_path = in_testdir(name, 'comp.stderr')
1282 dump_file(actual_stderr_path)
1283
1284 # ToDo: if the sub-shell was killed by ^C, then exit
1285
1286 if isCompilerStatsTest():
1287 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1288 if badResult(statsResult):
1289 return statsResult
1290
1291 if should_fail:
1292 if exit_code == 0:
1293 return failBecause('exit code 0')
1294 else:
1295 if exit_code != 0:
1296 return failBecause('exit code non-0')
1297
1298 return passed()
1299
1300 # -----------------------------------------------------------------------------
1301 # Run a program and check its output
1302 #
1303 # If testname.stdin exists, route input from that, else
1304 # from /dev/null. Route output to testname.run.stdout and
1305 # testname.run.stderr. Returns the exit code of the run.
1306
1307 def simple_run(name, way, prog, extra_run_opts):
1308 opts = getTestOpts()
1309
1310 # figure out what to use for stdin
1311 if opts.stdin:
1312 stdin = in_testdir(opts.stdin)
1313 elif os.path.exists(in_testdir(name, 'stdin')):
1314 stdin = in_testdir(name, 'stdin')
1315 else:
1316 stdin = None
1317
1318 stdout = in_testdir(name, 'run.stdout')
1319 if opts.combined_output:
1320 stderr = subprocess.STDOUT
1321 else:
1322 stderr = in_testdir(name, 'run.stderr')
1323
1324 my_rts_flags = rts_flags(way)
1325
1326 stats_file = name + '.stats'
1327 if isStatsTest() and not isCompilerStatsTest():
1328 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1329 else:
1330 stats_args = ''
1331
1332 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1333 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1334
1335 if opts.cmd_wrapper != None:
1336 cmd = opts.cmd_wrapper(cmd)
1337
1338 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1339
1340 # run the command
1341 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1342
1343 # check the exit code
1344 if exit_code != opts.exit_code:
1345 if config.verbose >= 1 and _expect_pass(way):
1346 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1347 dump_stdout(name)
1348 dump_stderr(name)
1349 return failBecause('bad exit code')
1350
1351 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1352 return failBecause('bad stderr')
1353 if not (opts.ignore_stdout or stdout_ok(name, way)):
1354 return failBecause('bad stdout')
1355
1356 check_hp = '-h' in my_rts_flags and opts.check_hp
1357 check_prof = '-p' in my_rts_flags
1358
1359 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1360 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1361 return failBecause('bad heap profile')
1362 if check_prof and not check_prof_ok(name, way):
1363 return failBecause('bad profile')
1364
1365 return check_stats(name, way, stats_file, opts.stats_range_fields)
1366
1367 def rts_flags(way):
1368 args = config.way_rts_flags.get(way, [])
1369 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1370
1371 # -----------------------------------------------------------------------------
1372 # Run a program in the interpreter and check its output
1373
1374 def interpreter_run(name, way, extra_hc_opts, top_mod):
1375 opts = getTestOpts()
1376
1377 stdout = in_testdir(name, 'interp.stdout')
1378 stderr = in_testdir(name, 'interp.stderr')
1379 script = in_testdir(name, 'genscript')
1380
1381 if opts.combined_output:
1382 framework_fail(name, 'unsupported',
1383 'WAY=ghci and combined_output together is not supported')
1384
1385 if (top_mod == ''):
1386 srcname = add_hs_lhs_suffix(name)
1387 else:
1388 srcname = top_mod
1389
1390 delimiter = '===== program output begins here\n'
1391
1392 with io.open(script, 'w', encoding='utf8') as f:
1393 # set the prog name and command-line args to match the compiled
1394 # environment.
1395 f.write(':set prog ' + name + '\n')
1396 f.write(':set args ' + opts.extra_run_opts + '\n')
1397 # Add marker lines to the stdout and stderr output files, so we
1398 # can separate GHCi's output from the program's.
1399 f.write(':! echo ' + delimiter)
1400 f.write(':! echo 1>&2 ' + delimiter)
1401 # Set stdout to be line-buffered to match the compiled environment.
1402 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1403 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1404 # in the event of an exception as for the compiled program.
1405 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1406
1407 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1408 if os.path.exists(stdin):
1409 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1410
1411 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1412
1413 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1414 ).format(**locals())
1415
1416 if getTestOpts().cmd_wrapper != None:
1417 cmd = opts.cmd_wrapper(cmd);
1418
1419 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1420
1421 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1422
1423 # split the stdout into compilation/program output
1424 split_file(stdout, delimiter,
1425 in_testdir(name, 'comp.stdout'),
1426 in_testdir(name, 'run.stdout'))
1427 split_file(stderr, delimiter,
1428 in_testdir(name, 'comp.stderr'),
1429 in_testdir(name, 'run.stderr'))
1430
1431 # check the exit code
1432 if exit_code != getTestOpts().exit_code:
1433 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1434 dump_stdout(name)
1435 dump_stderr(name)
1436 return failBecause('bad exit code')
1437
1438 # ToDo: if the sub-shell was killed by ^C, then exit
1439
1440 if not (opts.ignore_stderr or stderr_ok(name, way)):
1441 return failBecause('bad stderr')
1442 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1443 return failBecause('bad stdout')
1444 else:
1445 return passed()
1446
1447 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1448 # See Note [Universal newlines].
1449 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1450 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1451 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1452 line = infile.readline()
1453 while re.sub('^\s*','',line) != delimiter and line != '':
1454 out1.write(line)
1455 line = infile.readline()
1456
1457 line = infile.readline()
1458 while line != '':
1459 out2.write(line)
1460 line = infile.readline()
1461
1462 # -----------------------------------------------------------------------------
1463 # Utils
1464 def get_compiler_flags():
1465 opts = getTestOpts()
1466
1467 flags = copy.copy(opts.compiler_always_flags)
1468
1469 flags.append(opts.extra_hc_opts)
1470
1471 if opts.outputdir != None:
1472 flags.extend(["-outputdir", opts.outputdir])
1473
1474 return flags
1475
1476 def stdout_ok(name, way):
1477 actual_stdout_file = add_suffix(name, 'run.stdout')
1478 expected_stdout_file = find_expected_file(name, 'stdout')
1479
1480 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1481
1482 check_stdout = getTestOpts().check_stdout
1483 if check_stdout:
1484 actual_stdout_path = in_testdir(actual_stdout_file)
1485 return check_stdout(actual_stdout_path, extra_norm)
1486
1487 return compare_outputs(way, 'stdout', extra_norm,
1488 expected_stdout_file, actual_stdout_file)
1489
1490 def dump_stdout( name ):
1491 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1492 str = f.read().strip()
1493 if str:
1494 print("Stdout (", name, "):")
1495 print(str)
1496
1497 def stderr_ok(name, way):
1498 actual_stderr_file = add_suffix(name, 'run.stderr')
1499 expected_stderr_file = find_expected_file(name, 'stderr')
1500
1501 return compare_outputs(way, 'stderr',
1502 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1503 expected_stderr_file, actual_stderr_file,
1504 whitespace_normaliser=normalise_whitespace)
1505
1506 def dump_stderr( name ):
1507 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1508 str = f.read().strip()
1509 if str:
1510 print("Stderr (", name, "):")
1511 print(str)
1512
1513 def read_no_crs(file):
1514 str = ''
1515 try:
1516 # See Note [Universal newlines].
1517 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1518 str = h.read()
1519 except Exception:
1520 # On Windows, if the program fails very early, it seems the
1521 # files stdout/stderr are redirected to may not get created
1522 pass
1523 return str
1524
1525 def write_file(file, str):
1526 # See Note [Universal newlines].
1527 with io.open(file, 'w', encoding='utf8', newline='') as h:
1528 h.write(str)
1529
1530 # Note [Universal newlines]
1531 #
1532 # We don't want to write any Windows style line endings ever, because
1533 # it would mean that `make accept` would touch every line of the file
1534 # when switching between Linux and Windows.
1535 #
1536 # Furthermore, when reading a file, it is convenient to translate all
1537 # Windows style endings to '\n', as it simplifies searching or massaging
1538 # the content.
1539 #
1540 # Solution: use `io.open` instead of `open`
1541 # * when reading: use newline=None to translate '\r\n' to '\n'
1542 # * when writing: use newline='' to not translate '\n' to '\r\n'
1543 #
1544 # See https://docs.python.org/2/library/io.html#io.open.
1545 #
1546 # This should work with both python2 and python3, and with both mingw*
1547 # as msys2 style Python.
1548 #
1549 # Do note that io.open returns unicode strings. So we have to specify
1550 # the expected encoding. But there is at least one file which is not
1551 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1552 # Another solution would be to open files in binary mode always, and
1553 # operate on bytes.
1554
1555 def check_hp_ok(name):
1556 opts = getTestOpts()
1557
1558 # do not qualify for hp2ps because we should be in the right directory
1559 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1560
1561 hp2psResult = runCmd(hp2psCmd)
1562
1563 actual_ps_path = in_testdir(name, 'ps')
1564
1565 if hp2psResult == 0:
1566 if os.path.exists(actual_ps_path):
1567 if gs_working:
1568 gsResult = runCmd(genGSCmd(actual_ps_path))
1569 if (gsResult == 0):
1570 return (True)
1571 else:
1572 print("hp2ps output for " + name + "is not valid PostScript")
1573 else: return (True) # assume postscript is valid without ghostscript
1574 else:
1575 print("hp2ps did not generate PostScript for " + name)
1576 return (False)
1577 else:
1578 print("hp2ps error when processing heap profile for " + name)
1579 return(False)
1580
1581 def check_prof_ok(name, way):
1582 expected_prof_file = find_expected_file(name, 'prof.sample')
1583 expected_prof_path = in_testdir(expected_prof_file)
1584
1585 # Check actual prof file only if we have an expected prof file to
1586 # compare it with.
1587 if not os.path.exists(expected_prof_path):
1588 return True
1589
1590 actual_prof_file = add_suffix(name, 'prof')
1591 actual_prof_path = in_testdir(actual_prof_file)
1592
1593 if not os.path.exists(actual_prof_path):
1594 print(actual_prof_path + " does not exist")
1595 return(False)
1596
1597 if os.path.getsize(actual_prof_path) == 0:
1598 print(actual_prof_path + " is empty")
1599 return(False)
1600
1601 return compare_outputs(way, 'prof', normalise_prof,
1602 expected_prof_file, actual_prof_file,
1603 whitespace_normaliser=normalise_whitespace)
1604
1605 # Compare expected output to actual output, and optionally accept the
1606 # new output. Returns true if output matched or was accepted, false
1607 # otherwise. See Note [Output comparison] for the meaning of the
1608 # normaliser and whitespace_normaliser parameters.
1609 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1610 whitespace_normaliser=lambda x:x):
1611
1612 expected_path = in_srcdir(expected_file)
1613 actual_path = in_testdir(actual_file)
1614
1615 if os.path.exists(expected_path):
1616 expected_str = normaliser(read_no_crs(expected_path))
1617 # Create the .normalised file in the testdir, not in the srcdir.
1618 expected_normalised_file = add_suffix(expected_file, 'normalised')
1619 expected_normalised_path = in_testdir(expected_normalised_file)
1620 else:
1621 expected_str = ''
1622 expected_normalised_path = '/dev/null'
1623
1624 actual_raw = read_no_crs(actual_path)
1625 actual_str = normaliser(actual_raw)
1626
1627 # See Note [Output comparison].
1628 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1629 return True
1630 else:
1631 if config.verbose >= 1 and _expect_pass(way):
1632 print('Actual ' + kind + ' output differs from expected:')
1633
1634 if expected_normalised_path != '/dev/null':
1635 write_file(expected_normalised_path, expected_str)
1636
1637 actual_normalised_path = add_suffix(actual_path, 'normalised')
1638 write_file(actual_normalised_path, actual_str)
1639
1640 if config.verbose >= 1 and _expect_pass(way):
1641 # See Note [Output comparison].
1642 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1643 actual_normalised_path),
1644 print_output=True)
1645
1646 # If for some reason there were no non-whitespace differences,
1647 # then do a full diff
1648 if r == 0:
1649 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1650 actual_normalised_path),
1651 print_output=True)
1652
1653 if config.accept and (getTestOpts().expect == 'fail' or
1654 way in getTestOpts().expect_fail_for):
1655 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1656 return False
1657 elif config.accept and actual_raw:
1658 if config.accept_platform:
1659 if_verbose(1, 'Accepting new output for platform "'
1660 + config.platform + '".')
1661 expected_path += '-' + config.platform
1662 elif config.accept_os:
1663 if_verbose(1, 'Accepting new output for os "'
1664 + config.os + '".')
1665 expected_path += '-' + config.os
1666 else:
1667 if_verbose(1, 'Accepting new output.')
1668
1669 write_file(expected_path, actual_raw)
1670 return True
1671 elif config.accept:
1672 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1673 os.remove(expected_path)
1674 return True
1675 else:
1676 return False
1677
1678 # Note [Output comparison]
1679 #
1680 # We do two types of output comparison:
1681 #
1682 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1683 # optional `whitespace_normaliser` to the expected and the actual
1684 # output, before comparing the two.
1685 #
1686 # 2. To show as a diff to the user when the test indeed failed. We apply
1687 # the same `normaliser` function to the outputs, to make the diff as
1688 # small as possible (only showing the actual problem). But we don't
1689 # apply the `whitespace_normaliser` here, because it might completely
1690 # squash all whitespace, making the diff unreadable. Instead we rely
1691 # on the `diff` program to ignore whitespace changes as much as
1692 # possible (#10152).
1693
1694 def normalise_whitespace( str ):
1695 # Merge contiguous whitespace characters into a single space.
1696 return ' '.join(str.split())
1697
1698 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1699
1700 def normalise_callstacks(s):
1701 opts = getTestOpts()
1702 def repl(matches):
1703 location = matches.group(1)
1704 location = normalise_slashes_(location)
1705 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1706 # Ignore line number differences in call stacks (#10834).
1707 s = re.sub(callSite_re, repl, s)
1708 # Ignore the change in how we identify implicit call-stacks
1709 s = s.replace('from ImplicitParams', 'from HasCallStack')
1710 if not opts.keep_prof_callstacks:
1711 # Don't output prof callstacks. Test output should be
1712 # independent from the WAY we run the test.
1713 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1714 return s
1715
1716 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1717
1718 def normalise_type_reps(str):
1719 """ Normalise out fingerprints from Typeable TyCon representations """
1720 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1721
1722 def normalise_errmsg( str ):
1723 """Normalise error-messages emitted via stderr"""
1724 # IBM AIX's `ld` is a bit chatty
1725 if opsys('aix'):
1726 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1727 # remove " error:" and lower-case " Warning:" to make patch for
1728 # trac issue #10021 smaller
1729 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1730 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1731 str = normalise_callstacks(str)
1732 str = normalise_type_reps(str)
1733
1734 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1735 # the colon is there because it appears in error messages; this
1736 # hacky solution is used in place of more sophisticated filename
1737 # mangling
1738 str = re.sub('([^\\s])\\.exe', '\\1', str)
1739
1740 # normalise slashes, minimise Windows/Unix filename differences
1741 str = re.sub('\\\\', '/', str)
1742
1743 # The inplace ghc's are called ghc-stage[123] to avoid filename
1744 # collisions, so we need to normalise that to just "ghc"
1745 str = re.sub('ghc-stage[123]', 'ghc', str)
1746
1747 # Error messages sometimes contain integer implementation package
1748 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1749
1750 # Error messages sometimes contain this blurb which can vary
1751 # spuriously depending upon build configuration (e.g. based on integer
1752 # backend)
1753 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1754 '...plus N instances involving out-of-scope types', str)
1755
1756 # Also filter out bullet characters. This is because bullets are used to
1757 # separate error sections, and tests shouldn't be sensitive to how the
1758 # the division happens.
1759 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1760 str = str.replace(bullet, '')
1761
1762 # Windows only, this is a bug in hsc2hs but it is preventing
1763 # stable output for the testsuite. See Trac #9775. For now we filter out this
1764 # warning message to get clean output.
1765 if config.msys:
1766 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1767 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1768
1769 return str
1770
1771 # normalise a .prof file, so that we can reasonably compare it against
1772 # a sample. This doesn't compare any of the actual profiling data,
1773 # only the shape of the profile and the number of entries.
1774 def normalise_prof (str):
1775 # strip everything up to the line beginning "COST CENTRE"
1776 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1777
1778 # strip results for CAFs, these tend to change unpredictably
1779 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1780
1781 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1782 # sometimes under MAIN.
1783 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1784
1785 # We have something like this:
1786 #
1787 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1788 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1789 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1790 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1791 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1792 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1793 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1794 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1795 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1796 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1797 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1798 #
1799 # then we remove all the specific profiling data, leaving only the cost
1800 # centre name, module, src, and entries, to end up with this: (modulo
1801 # whitespace between columns)
1802 #
1803 # MAIN MAIN <built-in> 0
1804 # readPrec Main Main_1.hs:7:13-16 1
1805 # readPrec Main Main_1.hs:4:13-16 1
1806 # == Main Main_1.hs:7:25-26 1
1807 # == Main Main_1.hs:4:25-26 1
1808 # showsPrec Main Main_1.hs:7:19-22 2
1809 # showsPrec Main Main_1.hs:4:19-22 2
1810 # readPrec Main Main_1.hs:7:13-16 0
1811 # readPrec Main Main_1.hs:4:13-16 0
1812
1813 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1814 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1815 # this works fine.
1816 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1817 '\\1 \\2 \\3 \\5\n', str)
1818 return str
1819
1820 def normalise_slashes_( str ):
1821 str = re.sub('\\\\', '/', str)
1822 str = re.sub('//', '/', str)
1823 return str
1824
1825 def normalise_exe_( str ):
1826 str = re.sub('\.exe', '', str)
1827 return str
1828
1829 def normalise_output( str ):
1830 # remove " error:" and lower-case " Warning:" to make patch for
1831 # trac issue #10021 smaller
1832 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1833 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1834 # Remove a .exe extension (for Windows)
1835 # This can occur in error messages generated by the program.
1836 str = re.sub('([^\\s])\\.exe', '\\1', str)
1837 str = normalise_callstacks(str)
1838 str = normalise_type_reps(str)
1839 return str
1840
1841 def normalise_asm( str ):
1842 lines = str.split('\n')
1843 # Only keep instructions and labels not starting with a dot.
1844 metadata = re.compile('^[ \t]*\\..*$')
1845 out = []
1846 for line in lines:
1847 # Drop metadata directives (e.g. ".type")
1848 if not metadata.match(line):
1849 line = re.sub('@plt', '', line)
1850 instr = line.lstrip().split()
1851 # Drop empty lines.
1852 if not instr:
1853 continue
1854 # Drop operands, except for call instructions.
1855 elif instr[0] == 'call':
1856 out.append(instr[0] + ' ' + instr[1])
1857 else:
1858 out.append(instr[0])
1859 out = '\n'.join(out)
1860 return out
1861
1862 def if_verbose( n, s ):
1863 if config.verbose >= n:
1864 print(s)
1865
1866 def dump_file(f):
1867 try:
1868 with io.open(f) as file:
1869 print(file.read())
1870 except Exception:
1871 print('')
1872
1873 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1874 timeout_prog = strip_quotes(config.timeout_prog)
1875 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1876
1877 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1878 cmd = cmd.format(**config.__dict__)
1879 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1880
1881 stdin_file = io.open(stdin, 'rb') if stdin else None
1882 stdout_buffer = b''
1883 stderr_buffer = b''
1884
1885 hStdErr = subprocess.PIPE
1886 if stderr is subprocess.STDOUT:
1887 hStdErr = subprocess.STDOUT
1888
1889 try:
1890 # cmd is a complex command in Bourne-shell syntax
1891 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1892 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1893 # to invoke the Bourne shell
1894
1895 r = subprocess.Popen([timeout_prog, timeout, cmd],
1896 stdin=stdin_file,
1897 stdout=subprocess.PIPE,
1898 stderr=hStdErr,
1899 env=ghc_env)
1900
1901 stdout_buffer, stderr_buffer = r.communicate()
1902 finally:
1903 if stdin_file:
1904 stdin_file.close()
1905 if config.verbose >= 1 and print_output:
1906 if stdout_buffer:
1907 sys.stdout.buffer.write(stdout_buffer)
1908 if stderr_buffer:
1909 sys.stderr.buffer.write(stderr_buffer)
1910
1911 if stdout:
1912 with io.open(stdout, 'wb') as f:
1913 f.write(stdout_buffer)
1914 if stderr:
1915 if stderr is not subprocess.STDOUT:
1916 with io.open(stderr, 'wb') as f:
1917 f.write(stderr_buffer)
1918
1919 if r.returncode == 98:
1920 # The python timeout program uses 98 to signal that ^C was pressed
1921 stopNow()
1922 if r.returncode == 99 and getTestOpts().exit_code != 99:
1923 # Only print a message when timeout killed the process unexpectedly.
1924 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1925 return r.returncode
1926
1927 # -----------------------------------------------------------------------------
1928 # checking if ghostscript is available for checking the output of hp2ps
1929
1930 def genGSCmd(psfile):
1931 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1932
1933 def gsNotWorking():
1934 global gs_working
1935 print("GhostScript not available for hp2ps tests")
1936
1937 global gs_working
1938 gs_working = False
1939 if config.have_profiling:
1940 if config.gs != '':
1941 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1942 if resultGood == 0:
1943 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1944 ' >/dev/null 2>&1')
1945 if resultBad != 0:
1946 print("GhostScript available for hp2ps tests")
1947 gs_working = True
1948 else:
1949 gsNotWorking();
1950 else:
1951 gsNotWorking();
1952 else:
1953 gsNotWorking();
1954
1955 def add_suffix( name, suffix ):
1956 if suffix == '':
1957 return name
1958 else:
1959 return name + '.' + suffix
1960
1961 def add_hs_lhs_suffix(name):
1962 if getTestOpts().c_src:
1963 return add_suffix(name, 'c')
1964 elif getTestOpts().cmm_src:
1965 return add_suffix(name, 'cmm')
1966 elif getTestOpts().objc_src:
1967 return add_suffix(name, 'm')
1968 elif getTestOpts().objcpp_src:
1969 return add_suffix(name, 'mm')
1970 elif getTestOpts().literate:
1971 return add_suffix(name, 'lhs')
1972 else:
1973 return add_suffix(name, 'hs')
1974
1975 def replace_suffix( name, suffix ):
1976 base, suf = os.path.splitext(name)
1977 return base + '.' + suffix
1978
1979 def in_testdir(name, suffix=''):
1980 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1981
1982 def in_srcdir(name, suffix=''):
1983 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1984
1985 # Finding the sample output. The filename is of the form
1986 #
1987 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1988 #
1989 def find_expected_file(name, suff):
1990 basename = add_suffix(name, suff)
1991
1992 files = [basename + ws + plat
1993 for plat in ['-' + config.platform, '-' + config.os, '']
1994 for ws in ['-ws-' + config.wordsize, '']]
1995
1996 for f in files:
1997 if os.path.exists(in_srcdir(f)):
1998 return f
1999
2000 return basename
2001
2002 if config.msys:
2003 import stat
2004 def cleanup():
2005 testdir = getTestOpts().testdir
2006 max_attempts = 5
2007 retries = max_attempts
2008 def on_error(function, path, excinfo):
2009 # At least one test (T11489) removes the write bit from a file it
2010 # produces. Windows refuses to delete read-only files with a
2011 # permission error. Try setting the write bit and try again.
2012 os.chmod(path, stat.S_IWRITE)
2013 function(path)
2014
2015 # On Windows we have to retry the delete a couple of times.
2016 # The reason for this is that a FileDelete command just marks a
2017 # file for deletion. The file is really only removed when the last
2018 # handle to the file is closed. Unfortunately there are a lot of
2019 # system services that can have a file temporarily opened using a shared
2020 # readonly lock, such as the built in AV and search indexer.
2021 #
2022 # We can't really guarantee that these are all off, so what we can do is
2023 # whenever after a rmtree the folder still exists to try again and wait a bit.
2024 #
2025 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2026 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2027 # still locked then abort the current test by throwing an exception, this so it won't fail
2028 # with an even more cryptic error.
2029 #
2030 # See Trac #13162
2031 exception = None
2032 while retries > 0 and os.path.exists(testdir):
2033 time.sleep((max_attempts-retries)*6)
2034 try:
2035 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2036 except Exception as e:
2037 exception = e
2038 retries -= 1
2039
2040 if retries == 0 and os.path.exists(testdir):
2041 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2042 % (testdir, exception))
2043 else:
2044 def cleanup():
2045 testdir = getTestOpts().testdir
2046 if os.path.exists(testdir):
2047 shutil.rmtree(testdir, ignore_errors=False)
2048
2049
2050 # -----------------------------------------------------------------------------
2051 # Return a list of all the files ending in '.T' below directories roots.
2052
2053 def findTFiles(roots):
2054 for root in roots:
2055 for path, dirs, files in os.walk(root, topdown=True):
2056 # Never pick up .T files in uncleaned .run directories.
2057 dirs[:] = [dir for dir in sorted(dirs)
2058 if not dir.endswith(testdir_suffix)]
2059 for filename in files:
2060 if filename.endswith('.T'):
2061 yield os.path.join(path, filename)
2062
2063 # -----------------------------------------------------------------------------
2064 # Output a test summary to the specified file object
2065
2066 def summary(t, file, short=False, color=False):
2067
2068 file.write('\n')
2069 printUnexpectedTests(file,
2070 [t.unexpected_passes, t.unexpected_failures,
2071 t.unexpected_stat_failures, t.framework_failures])
2072
2073 if short:
2074 # Only print the list of unexpected tests above.
2075 return
2076
2077 colorize = lambda s: s
2078 if color:
2079 if len(t.unexpected_failures) > 0 or \
2080 len(t.unexpected_stat_failures) > 0 or \
2081 len(t.framework_failures) > 0:
2082 colorize = str_fail
2083 else:
2084 colorize = str_pass
2085
2086 file.write(colorize('SUMMARY') + ' for test run started at '
2087 + time.strftime("%c %Z", t.start_time) + '\n'
2088 + str(datetime.timedelta(seconds=
2089 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2090 + ' spent to go through\n'
2091 + repr(t.total_tests).rjust(8)
2092 + ' total tests, which gave rise to\n'
2093 + repr(t.total_test_cases).rjust(8)
2094 + ' test cases, of which\n'
2095 + repr(t.n_tests_skipped).rjust(8)
2096 + ' were skipped\n'
2097 + '\n'
2098 + repr(len(t.missing_libs)).rjust(8)
2099 + ' had missing libraries\n'
2100 + repr(t.n_expected_passes).rjust(8)
2101 + ' expected passes\n'
2102 + repr(t.n_expected_failures).rjust(8)
2103 + ' expected failures\n'
2104 + '\n'
2105 + repr(len(t.framework_failures)).rjust(8)
2106 + ' caused framework failures\n'
2107 + repr(len(t.framework_warnings)).rjust(8)
2108 + ' caused framework warnings\n'
2109 + repr(len(t.unexpected_passes)).rjust(8)
2110 + ' unexpected passes\n'
2111 + repr(len(t.unexpected_failures)).rjust(8)
2112 + ' unexpected failures\n'
2113 + repr(len(t.unexpected_stat_failures)).rjust(8)
2114 + ' unexpected stat failures\n'
2115 + '\n')
2116
2117 if t.unexpected_passes:
2118 file.write('Unexpected passes:\n')
2119 printTestInfosSummary(file, t.unexpected_passes)
2120
2121 if t.unexpected_failures:
2122 file.write('Unexpected failures:\n')
2123 printTestInfosSummary(file, t.unexpected_failures)
2124
2125 if t.unexpected_stat_failures:
2126 file.write('Unexpected stat failures:\n')
2127 printTestInfosSummary(file, t.unexpected_stat_failures)
2128
2129 if t.framework_failures:
2130 file.write('Framework failures:\n')
2131 printTestInfosSummary(file, t.framework_failures)
2132
2133 if t.framework_warnings:
2134 file.write('Framework warnings:\n')
2135 printTestInfosSummary(file, t.framework_warnings)
2136
2137 if stopping():
2138 file.write('WARNING: Testsuite run was terminated early\n')
2139
2140 def printUnexpectedTests(file, testInfoss):
2141 unexpected = set(name for testInfos in testInfoss
2142 for (_, name, _, _) in testInfos
2143 if not name.endswith('.T'))
2144 if unexpected:
2145 file.write('Unexpected results from:\n')
2146 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2147 file.write('\n')
2148
2149 def printTestInfosSummary(file, testInfos):
2150 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2151 for (directory, name, reason, way) in testInfos:
2152 directory = directory.ljust(maxDirLen)
2153 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2154 file.write('\n')
2155
2156 def modify_lines(s, f):
2157 s = '\n'.join([f(l) for l in s.splitlines()])
2158 if s and s[-1] != '\n':
2159 # Prevent '\ No newline at end of file' warnings when diffing.
2160 s += '\n'
2161 return s