Revert "testsuite: Fix broken_without_gmp"
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 import perf_notes as Perf
24 from perf_notes import MetricChange
25 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
26
27 global pool_sema
28 if config.use_threads:
29 import threading
30 pool_sema = threading.BoundedSemaphore(value=config.threads)
31
32 global wantToStop
33 wantToStop = False
34
35 def stopNow():
36 global wantToStop
37 wantToStop = True
38
39 def stopping():
40 return wantToStop
41
42
43 # Options valid for the current test only (these get reset to
44 # testdir_testopts after each test).
45
46 global testopts_local
47 if config.use_threads:
48 testopts_local = threading.local()
49 else:
50 class TestOpts_Local:
51 pass
52 testopts_local = TestOpts_Local()
53
54 def getTestOpts():
55 return testopts_local.x
56
57 def setLocalTestOpts(opts):
58 global testopts_local
59 testopts_local.x=opts
60
61 def isCompilerStatsTest():
62 opts = getTestOpts()
63 return bool(opts.is_compiler_stats_test)
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return bool(opts.stats_range_fields)
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86 #
87 # type TestOpt = (name :: String, opts :: Object) -> IO ()
88
89 def normal( name, opts ):
90 return;
91
92 def skip( name, opts ):
93 opts.skip = True
94
95 def expect_fail( name, opts ):
96 # The compiler, testdriver, OS or platform is missing a certain
97 # feature, and we don't plan to or can't fix it now or in the
98 # future.
99 opts.expect = 'fail';
100
101 def reqlib( lib ):
102 return lambda name, opts, l=lib: _reqlib (name, opts, l )
103
104 def stage1(name, opts):
105 # See Note [Why is there no stage1 setup function?]
106 framework_fail(name, 'stage1 setup function does not exist',
107 'add your test to testsuite/tests/stage1 instead')
108
109 # Note [Why is there no stage1 setup function?]
110 #
111 # Presumably a stage1 setup function would signal that the stage1
112 # compiler should be used to compile a test.
113 #
114 # Trouble is, the path to the compiler + the `ghc --info` settings for
115 # that compiler are currently passed in from the `make` part of the
116 # testsuite driver.
117 #
118 # Switching compilers in the Python part would be entirely too late, as
119 # all ghc_with_* settings would be wrong. See config/ghc for possible
120 # consequences (for example, config.run_ways would still be
121 # based on the default compiler, quite likely causing ./validate --slow
122 # to fail).
123 #
124 # It would be possible to let the Python part of the testsuite driver
125 # make the call to `ghc --info`, but doing so would require quite some
126 # work. Care has to be taken to not affect the run_command tests for
127 # example, as they also use the `ghc --info` settings:
128 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
129 #
130 # If you want a test to run using the stage1 compiler, add it to the
131 # testsuite/tests/stage1 directory. Validate runs the tests in that
132 # directory with `make stage=1`.
133
134 # Cache the results of looking to see if we have a library or not.
135 # This makes quite a difference, especially on Windows.
136 have_lib = {}
137
138 def _reqlib( name, opts, lib ):
139 if lib in have_lib:
140 got_it = have_lib[lib]
141 else:
142 cmd = strip_quotes(config.ghc_pkg)
143 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
144 stdout=subprocess.PIPE,
145 stderr=subprocess.PIPE,
146 env=ghc_env)
147 # read from stdout and stderr to avoid blocking due to
148 # buffers filling
149 p.communicate()
150 r = p.wait()
151 got_it = r == 0
152 have_lib[lib] = got_it
153
154 if not got_it:
155 opts.expect = 'missing-lib'
156
157 def req_haddock( name, opts ):
158 if not config.haddock:
159 opts.expect = 'missing-lib'
160
161 def req_profiling( name, opts ):
162 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
163 if not config.have_profiling:
164 opts.expect = 'fail'
165
166 def req_shared_libs( name, opts ):
167 if not config.have_shared_libs:
168 opts.expect = 'fail'
169
170 def req_interp( name, opts ):
171 if not config.have_interp:
172 opts.expect = 'fail'
173
174 def req_smp( name, opts ):
175 if not config.have_smp:
176 opts.expect = 'fail'
177
178 def ignore_stdout(name, opts):
179 opts.ignore_stdout = True
180
181 def ignore_stderr(name, opts):
182 opts.ignore_stderr = True
183
184 def combined_output( name, opts ):
185 opts.combined_output = True
186
187 # -----
188
189 def expect_fail_for( ways ):
190 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
191
192 def _expect_fail_for( name, opts, ways ):
193 opts.expect_fail_for = ways
194
195 def expect_broken( bug ):
196 # This test is a expected not to work due to the indicated trac bug
197 # number.
198 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
199
200 def _expect_broken( name, opts, bug ):
201 record_broken(name, opts, bug)
202 opts.expect = 'fail';
203
204 def expect_broken_for( bug, ways ):
205 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
206
207 def _expect_broken_for( name, opts, bug, ways ):
208 record_broken(name, opts, bug)
209 opts.expect_fail_for = ways
210
211 def record_broken(name, opts, bug):
212 me = (bug, opts.testdir, name)
213 if not me in brokens:
214 brokens.append(me)
215
216 def broken_without_gmp(name, opts):
217 # Many tests sadly break with integer-simple due to GHCi's ignorance of it.
218 when(config.integer_backend != "integer-gmp",
219 expect_broken(16043))
220
221 def _expect_pass(way):
222 # Helper function. Not intended for use in .T files.
223 opts = getTestOpts()
224 return opts.expect == 'pass' and way not in opts.expect_fail_for
225
226 # -----
227
228 def omit_ways( ways ):
229 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
230
231 def _omit_ways( name, opts, ways ):
232 opts.omit_ways = ways
233
234 # -----
235
236 def only_ways( ways ):
237 return lambda name, opts, w=ways: _only_ways( name, opts, w )
238
239 def _only_ways( name, opts, ways ):
240 opts.only_ways = ways
241
242 # -----
243
244 def extra_ways( ways ):
245 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
246
247 def _extra_ways( name, opts, ways ):
248 opts.extra_ways = ways
249
250 # -----
251
252 def set_stdin( file ):
253 return lambda name, opts, f=file: _set_stdin(name, opts, f);
254
255 def _set_stdin( name, opts, f ):
256 opts.stdin = f
257
258 # -----
259
260 def exit_code( val ):
261 return lambda name, opts, v=val: _exit_code(name, opts, v);
262
263 def _exit_code( name, opts, v ):
264 opts.exit_code = v
265
266 def signal_exit_code( val ):
267 if opsys('solaris2'):
268 return exit_code( val )
269 else:
270 # When application running on Linux receives fatal error
271 # signal, then its exit code is encoded as 128 + signal
272 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
273 # I assume that Mac OS X behaves in the same way at least Mac
274 # OS X builder behavior suggests this.
275 return exit_code( val+128 )
276
277 # -----
278
279 def compile_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
281
282 def _compile_timeout_multiplier( name, opts, v ):
283 opts.compile_timeout_multiplier = v
284
285 def run_timeout_multiplier( val ):
286 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
287
288 def _run_timeout_multiplier( name, opts, v ):
289 opts.run_timeout_multiplier = v
290
291 # -----
292
293 def extra_run_opts( val ):
294 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
295
296 def _extra_run_opts( name, opts, v ):
297 opts.extra_run_opts = v
298
299 # -----
300
301 def extra_hc_opts( val ):
302 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
303
304 def _extra_hc_opts( name, opts, v ):
305 opts.extra_hc_opts = v
306
307 # -----
308
309 def extra_clean( files ):
310 # TODO. Remove all calls to extra_clean.
311 return lambda _name, _opts: None
312
313 def extra_files(files):
314 return lambda name, opts: _extra_files(name, opts, files)
315
316 def _extra_files(name, opts, files):
317 opts.extra_files.extend(files)
318
319 # -----
320
321 # Defaults to "test everything, and only break on extreme cases"
322 #
323 # The inputs to this function are slightly interesting:
324 # metric can be either:
325 # - 'all', in which case all 3 possible metrics are collected and compared.
326 # - The specific metric one wants to use in the test.
327 # - A list of the metrics one wants to use in the test.
328 #
329 # Deviation defaults to 20% because the goal is correctness over performance.
330 # The testsuite should avoid breaking when there is not an actual error.
331 # Instead, the testsuite should notify of regressions in a non-breaking manner.
332 #
333 # collect_compiler_stats is used when the metrics collected are about the compiler.
334 # collect_stats is used in the majority case when the metrics to be collected
335 # are about the performance of the runtime code generated by the compiler.
336 def collect_compiler_stats(metric='all',deviation=20):
337 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
338
339 def collect_stats(metric='all', deviation=20):
340 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
341
342 def testing_metrics():
343 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
344
345 # This is an internal function that is used only in the implementation.
346 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
347 # If the boolean is set to true, it indicates that this test is one that
348 # measures the performance numbers of the compiler.
349 # As this is a fairly rare case in the testsuite, it defaults to false to
350 # indicate that it is a 'normal' performance test.
351 def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
352 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
353 failBecause('This test has an invalid name.')
354
355 tests = Perf.get_perf_stats('HEAD^')
356
357 # Might have multiple metrics being measured for a single test.
358 test = [t for t in tests if t.test == name]
359
360 if tests == [] or test == []:
361 # There are no prior metrics for this test.
362 if isinstance(metric, str):
363 if metric == 'all':
364 for field in testing_metrics():
365 opts.stats_range_fields[field] = None
366 else:
367 opts.stats_range_fields[metric] = None
368 if isinstance(metric, list):
369 for field in metric:
370 opts.stats_range_fields[field] = None
371
372 return
373
374 if is_compiler_stats_test:
375 opts.is_compiler_stats_test = True
376
377 # Compiler performance numbers change when debugging is on, making the results
378 # useless and confusing. Therefore, skip if debugging is on.
379 if config.compiler_debugged and is_compiler_stats_test:
380 opts.skip = 1
381
382 # get the average value of the given metric from test
383 def get_avg_val(metric_2):
384 metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
385 return sum(metric_2_metrics) / len(metric_2_metrics)
386
387 # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
388 if isinstance(metric, str):
389 if metric == 'all':
390 for field in testing_metrics():
391 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
392 return
393 else:
394 opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
395 return
396
397 if isinstance(metric, list):
398 for field in metric:
399 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
400
401 # -----
402
403 def when(b, f):
404 # When list_brokens is on, we want to see all expect_broken calls,
405 # so we always do f
406 if b or config.list_broken:
407 return f
408 else:
409 return normal
410
411 def unless(b, f):
412 return when(not b, f)
413
414 def doing_ghci():
415 return 'ghci' in config.run_ways
416
417 def ghc_dynamic():
418 return config.ghc_dynamic
419
420 def fast():
421 return config.speed == 2
422
423 def platform( plat ):
424 return config.platform == plat
425
426 def opsys( os ):
427 return config.os == os
428
429 def arch( arch ):
430 return config.arch == arch
431
432 def wordsize( ws ):
433 return config.wordsize == str(ws)
434
435 def msys( ):
436 return config.msys
437
438 def cygwin( ):
439 return config.cygwin
440
441 def have_vanilla( ):
442 return config.have_vanilla
443
444 def have_dynamic( ):
445 return config.have_dynamic
446
447 def have_profiling( ):
448 return config.have_profiling
449
450 def in_tree_compiler( ):
451 return config.in_tree_compiler
452
453 def unregisterised( ):
454 return config.unregisterised
455
456 def compiler_profiled( ):
457 return config.compiler_profiled
458
459 def compiler_debugged( ):
460 return config.compiler_debugged
461
462 def have_gdb( ):
463 return config.have_gdb
464
465 def have_readelf( ):
466 return config.have_readelf
467
468 # ---
469
470 def high_memory_usage(name, opts):
471 opts.alone = True
472
473 # If a test is for a multi-CPU race, then running the test alone
474 # increases the chance that we'll actually see it.
475 def multi_cpu_race(name, opts):
476 opts.alone = True
477
478 # ---
479 def literate( name, opts ):
480 opts.literate = True
481
482 def c_src( name, opts ):
483 opts.c_src = True
484
485 def objc_src( name, opts ):
486 opts.objc_src = True
487
488 def objcpp_src( name, opts ):
489 opts.objcpp_src = True
490
491 def cmm_src( name, opts ):
492 opts.cmm_src = True
493
494 def outputdir( odir ):
495 return lambda name, opts, d=odir: _outputdir(name, opts, d)
496
497 def _outputdir( name, opts, odir ):
498 opts.outputdir = odir;
499
500 # ----
501
502 def pre_cmd( cmd ):
503 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
504
505 def _pre_cmd( name, opts, cmd ):
506 opts.pre_cmd = cmd
507
508 # ----
509
510 def cmd_prefix( prefix ):
511 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
512
513 def _cmd_prefix( name, opts, prefix ):
514 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
515
516 # ----
517
518 def cmd_wrapper( fun ):
519 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
520
521 def _cmd_wrapper( name, opts, fun ):
522 opts.cmd_wrapper = fun
523
524 # ----
525
526 def compile_cmd_prefix( prefix ):
527 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
528
529 def _compile_cmd_prefix( name, opts, prefix ):
530 opts.compile_cmd_prefix = prefix
531
532 # ----
533
534 def check_stdout( f ):
535 return lambda name, opts, f=f: _check_stdout(name, opts, f)
536
537 def _check_stdout( name, opts, f ):
538 opts.check_stdout = f
539
540 def no_check_hp(name, opts):
541 opts.check_hp = False
542
543 # ----
544
545 def filter_stdout_lines( regex ):
546 """ Filter lines of stdout with the given regular expression """
547 def f( name, opts ):
548 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
549 return f
550
551 def normalise_slashes( name, opts ):
552 _normalise_fun(name, opts, normalise_slashes_)
553
554 def normalise_exe( name, opts ):
555 _normalise_fun(name, opts, normalise_exe_)
556
557 def normalise_fun( *fs ):
558 return lambda name, opts: _normalise_fun(name, opts, fs)
559
560 def _normalise_fun( name, opts, *fs ):
561 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
562
563 def normalise_errmsg_fun( *fs ):
564 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
565
566 def _normalise_errmsg_fun( name, opts, *fs ):
567 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
568
569 def check_errmsg(needle):
570 def norm(str):
571 if needle in str:
572 return "%s contained in -ddump-simpl\n" % needle
573 else:
574 return "%s not contained in -ddump-simpl\n" % needle
575 return normalise_errmsg_fun(norm)
576
577 def grep_errmsg(needle):
578 def norm(str):
579 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
580 return normalise_errmsg_fun(norm)
581
582 def normalise_whitespace_fun(f):
583 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
584
585 def _normalise_whitespace_fun(name, opts, f):
586 opts.whitespace_normaliser = f
587
588 def normalise_version_( *pkgs ):
589 def normalise_version__( str ):
590 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
591 '\\1-<VERSION>', str)
592 return normalise_version__
593
594 def normalise_version( *pkgs ):
595 def normalise_version__( name, opts ):
596 _normalise_fun(name, opts, normalise_version_(*pkgs))
597 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
598 return normalise_version__
599
600 def normalise_drive_letter(name, opts):
601 # Windows only. Change D:\\ to C:\\.
602 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
603
604 def keep_prof_callstacks(name, opts):
605 """Keep profiling callstacks.
606
607 Use together with `only_ways(prof_ways)`.
608 """
609 opts.keep_prof_callstacks = True
610
611 def join_normalisers(*a):
612 """
613 Compose functions, flattening sequences.
614
615 join_normalisers(f1,[f2,f3],f4)
616
617 is the same as
618
619 lambda x: f1(f2(f3(f4(x))))
620 """
621
622 def flatten(l):
623 """
624 Taken from http://stackoverflow.com/a/2158532/946226
625 """
626 for el in l:
627 if (isinstance(el, collections.Iterable)
628 and not isinstance(el, (bytes, str))):
629 for sub in flatten(el):
630 yield sub
631 else:
632 yield el
633
634 a = flatten(a)
635
636 fn = lambda x:x # identity function
637 for f in a:
638 assert callable(f)
639 fn = lambda x,f=f,fn=fn: fn(f(x))
640 return fn
641
642 # ----
643 # Function for composing two opt-fns together
644
645 def executeSetups(fs, name, opts):
646 if type(fs) is list:
647 # If we have a list of setups, then execute each one
648 for f in fs:
649 executeSetups(f, name, opts)
650 else:
651 # fs is a single function, so just apply it
652 fs(name, opts)
653
654 # -----------------------------------------------------------------------------
655 # The current directory of tests
656
657 def newTestDir(tempdir, dir):
658
659 global thisdir_settings
660 # reset the options for this test directory
661 def settings(name, opts, tempdir=tempdir, dir=dir):
662 return _newTestDir(name, opts, tempdir, dir)
663 thisdir_settings = settings
664
665 # Should be equal to entry in toplevel .gitignore.
666 testdir_suffix = '.run'
667
668 def _newTestDir(name, opts, tempdir, dir):
669 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
670 opts.srcdir = os.path.join(os.getcwd(), dir)
671 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
672 opts.compiler_always_flags = config.compiler_always_flags
673
674 # -----------------------------------------------------------------------------
675 # Actually doing tests
676
677 parallelTests = []
678 aloneTests = []
679 allTestNames = set([])
680
681 def runTest(watcher, opts, name, func, args):
682 if config.use_threads:
683 pool_sema.acquire()
684 t = threading.Thread(target=test_common_thread,
685 name=name,
686 args=(watcher, name, opts, func, args))
687 t.daemon = False
688 t.start()
689 else:
690 test_common_work(watcher, name, opts, func, args)
691
692 # name :: String
693 # setup :: [TestOpt] -> IO ()
694 def test(name, setup, func, args):
695 global aloneTests
696 global parallelTests
697 global allTestNames
698 global thisdir_settings
699 if name in allTestNames:
700 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
701 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
702 framework_fail(name, 'bad_name', 'This test has an invalid name')
703
704 if config.run_only_some_tests:
705 if name not in config.only:
706 return
707 else:
708 # Note [Mutating config.only]
709 # config.only is initially the set of tests requested by
710 # the user (via 'make TEST='). We then remove all tests that
711 # we've already seen (in .T files), so that we can later
712 # report on any tests we couldn't find and error out.
713 config.only.remove(name)
714
715 # Make a deep copy of the default_testopts, as we need our own copy
716 # of any dictionaries etc inside it. Otherwise, if one test modifies
717 # them, all tests will see the modified version!
718 myTestOpts = copy.deepcopy(default_testopts)
719
720 executeSetups([thisdir_settings, setup], name, myTestOpts)
721
722 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
723 if myTestOpts.alone:
724 aloneTests.append(thisTest)
725 else:
726 parallelTests.append(thisTest)
727 allTestNames.add(name)
728
729 if config.use_threads:
730 def test_common_thread(watcher, name, opts, func, args):
731 try:
732 test_common_work(watcher, name, opts, func, args)
733 finally:
734 pool_sema.release()
735
736 def get_package_cache_timestamp():
737 if config.package_conf_cache_file == '':
738 return 0.0
739 else:
740 try:
741 return os.stat(config.package_conf_cache_file).st_mtime
742 except:
743 return 0.0
744
745 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
746
747 def test_common_work(watcher, name, opts, func, args):
748 try:
749 t.total_tests += 1
750 setLocalTestOpts(opts)
751
752 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
753
754 # All the ways we might run this test
755 if func == compile or func == multimod_compile:
756 all_ways = config.compile_ways
757 elif func == compile_and_run or func == multimod_compile_and_run:
758 all_ways = config.run_ways
759 elif func == ghci_script:
760 if 'ghci' in config.run_ways:
761 all_ways = ['ghci']
762 else:
763 all_ways = []
764 else:
765 all_ways = ['normal']
766
767 # A test itself can request extra ways by setting opts.extra_ways
768 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
769
770 t.total_test_cases += len(all_ways)
771
772 ok_way = lambda way: \
773 not getTestOpts().skip \
774 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
775 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
776 and (not (config.skip_perf_tests and isStatsTest())) \
777 and (not (config.only_perf_tests and not isStatsTest())) \
778 and way not in getTestOpts().omit_ways
779
780 # Which ways we are asked to skip
781 do_ways = list(filter (ok_way,all_ways))
782
783 # Only run all ways in slow mode.
784 # See Note [validate and testsuite speed] in toplevel Makefile.
785 if config.accept:
786 # Only ever run one way
787 do_ways = do_ways[:1]
788 elif config.speed > 0:
789 # However, if we EXPLICITLY asked for a way (with extra_ways)
790 # please test it!
791 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
792 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
793 do_ways = other_ways[:1] + explicit_ways
794
795 # Find all files in the source directory that this test
796 # depends on. Do this only once for all ways.
797 # Generously add all filenames that start with the name of
798 # the test to this set, as a convenience to test authors.
799 # They will have to use the `extra_files` setup function to
800 # specify all other files that their test depends on (but
801 # this seems to be necessary for only about 10% of all
802 # tests).
803 files = set(f for f in os.listdir(opts.srcdir)
804 if f.startswith(name) and not f == name and
805 not f.endswith(testdir_suffix) and
806 not os.path.splitext(f)[1] in do_not_copy)
807 for filename in (opts.extra_files + extra_src_files.get(name, [])):
808 if filename.startswith('/'):
809 framework_fail(name, 'whole-test',
810 'no absolute paths in extra_files please: ' + filename)
811
812 elif '*' in filename:
813 # Don't use wildcards in extra_files too much, as
814 # globbing is slow.
815 files.update((os.path.relpath(f, opts.srcdir)
816 for f in glob.iglob(in_srcdir(filename))))
817
818 elif filename:
819 files.add(filename)
820
821 else:
822 framework_fail(name, 'whole-test', 'extra_file is empty string')
823
824 # Run the required tests...
825 for way in do_ways:
826 if stopping():
827 break
828 try:
829 do_test(name, way, func, args, files)
830 except KeyboardInterrupt:
831 stopNow()
832 except Exception as e:
833 framework_fail(name, way, str(e))
834 traceback.print_exc()
835
836 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
837
838 if config.cleanup and do_ways:
839 try:
840 cleanup()
841 except Exception as e:
842 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
843
844 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
845
846 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
847 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
848
849 except Exception as e:
850 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
851 finally:
852 watcher.notify()
853
854 def do_test(name, way, func, args, files):
855 opts = getTestOpts()
856
857 full_name = name + '(' + way + ')'
858
859 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
860 full_name, t.total_tests, len(allTestNames),
861 [len(t.unexpected_passes),
862 len(t.unexpected_failures),
863 len(t.framework_failures)]))
864
865 # Clean up prior to the test, so that we can't spuriously conclude
866 # that it passed on the basis of old run outputs.
867 cleanup()
868 os.makedirs(opts.testdir)
869
870 # Link all source files for this test into a new directory in
871 # /tmp, and run the test in that directory. This makes it
872 # possible to run tests in parallel, without modification, that
873 # would otherwise (accidentally) write to the same output file.
874 # It also makes it easier to keep the testsuite clean.
875
876 for extra_file in files:
877 src = in_srcdir(extra_file)
878 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
879 if os.path.isfile(src):
880 link_or_copy_file(src, dst)
881 elif os.path.isdir(src):
882 os.mkdir(dst)
883 lndir(src, dst)
884 else:
885 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
886 # When using a ghc built without haddock support, .t
887 # files are rightfully missing. Don't
888 # framework_fail. Test will be skipped later.
889 pass
890 else:
891 framework_fail(name, way,
892 'extra_file does not exist: ' + extra_file)
893
894 if func.__name__ == 'run_command' or opts.pre_cmd:
895 # When running 'MAKE' make sure 'TOP' still points to the
896 # root of the testsuite.
897 src_makefile = in_srcdir('Makefile')
898 dst_makefile = in_testdir('Makefile')
899 if os.path.exists(src_makefile):
900 with io.open(src_makefile, 'r', encoding='utf8') as src:
901 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
902 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
903 dst.write(makefile)
904
905 if opts.pre_cmd:
906 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
907 stderr = subprocess.STDOUT,
908 print_output = config.verbose >= 3)
909
910 # If user used expect_broken then don't record failures of pre_cmd
911 if exit_code != 0 and opts.expect not in ['fail']:
912 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
913 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
914
915 result = func(*[name,way] + args)
916
917 if opts.expect not in ['pass', 'fail', 'missing-lib']:
918 framework_fail(name, way, 'bad expected ' + opts.expect)
919
920 try:
921 passFail = result['passFail']
922 except (KeyError, TypeError):
923 passFail = 'No passFail found'
924
925 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
926
927 if passFail == 'pass':
928 if _expect_pass(way):
929 t.expected_passes.append((directory, name, way))
930 t.n_expected_passes += 1
931 else:
932 if_verbose(1, '*** unexpected pass for %s' % full_name)
933 t.unexpected_passes.append((directory, name, 'unexpected', way))
934 elif passFail == 'fail':
935 if _expect_pass(way):
936 reason = result['reason']
937 tag = result.get('tag')
938 if tag == 'stat':
939 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
940 t.unexpected_stat_failures.append((directory, name, reason, way))
941 else:
942 if_verbose(1, '*** unexpected failure for %s' % full_name)
943 t.unexpected_failures.append((directory, name, reason, way))
944 else:
945 if opts.expect == 'missing-lib':
946 t.missing_libs.append((directory, name, 'missing-lib', way))
947 else:
948 t.n_expected_failures += 1
949 else:
950 framework_fail(name, way, 'bad result ' + passFail)
951
952 # Make is often invoked with -s, which means if it fails, we get
953 # no feedback at all. This is annoying. So let's remove the option
954 # if found and instead have the testsuite decide on what to do
955 # with the output.
956 def override_options(pre_cmd):
957 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
958 return pre_cmd.replace('-s' , '') \
959 .replace('--silent', '') \
960 .replace('--quiet' , '')
961
962 return pre_cmd
963
964 def framework_fail(name, way, reason):
965 opts = getTestOpts()
966 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
967 full_name = name + '(' + way + ')'
968 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
969 t.framework_failures.append((directory, name, way, reason))
970
971 def framework_warn(name, way, reason):
972 opts = getTestOpts()
973 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
974 full_name = name + '(' + way + ')'
975 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
976 t.framework_warnings.append((directory, name, way, reason))
977
978 def badResult(result):
979 try:
980 if result['passFail'] == 'pass':
981 return False
982 return True
983 except (KeyError, TypeError):
984 return True
985
986 # -----------------------------------------------------------------------------
987 # Generic command tests
988
989 # A generic command test is expected to run and exit successfully.
990 #
991 # The expected exit code can be changed via exit_code() as normal, and
992 # the expected stdout/stderr are stored in <testname>.stdout and
993 # <testname>.stderr. The output of the command can be ignored
994 # altogether by using the setup function ignore_stdout instead of
995 # run_command.
996
997 def run_command( name, way, cmd ):
998 return simple_run( name, '', override_options(cmd), '' )
999
1000 # -----------------------------------------------------------------------------
1001 # GHCi tests
1002
1003 def ghci_script( name, way, script):
1004 flags = ' '.join(get_compiler_flags())
1005 way_flags = ' '.join(config.way_flags[way])
1006
1007 # We pass HC and HC_OPTS as environment variables, so that the
1008 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1009 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1010 ).format(flags=flags, way_flags=way_flags)
1011 # NB: put way_flags before flags so that flags in all.T can overrie others
1012
1013 getTestOpts().stdin = script
1014 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1015
1016 # -----------------------------------------------------------------------------
1017 # Compile-only tests
1018
1019 def compile( name, way, extra_hc_opts ):
1020 return do_compile( name, way, 0, '', [], extra_hc_opts )
1021
1022 def compile_fail( name, way, extra_hc_opts ):
1023 return do_compile( name, way, 1, '', [], extra_hc_opts )
1024
1025 def backpack_typecheck( name, way, extra_hc_opts ):
1026 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1027
1028 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1029 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1030
1031 def backpack_compile( name, way, extra_hc_opts ):
1032 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1033
1034 def backpack_compile_fail( name, way, extra_hc_opts ):
1035 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1036
1037 def backpack_run( name, way, extra_hc_opts ):
1038 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1039
1040 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1041 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1042
1043 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1044 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1045
1046 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1047 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1048
1049 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1050 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1051
1052 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1053 # print 'Compile only, extra args = ', extra_hc_opts
1054
1055 result = extras_build( way, extra_mods, extra_hc_opts )
1056 if badResult(result):
1057 return result
1058 extra_hc_opts = result['hc_opts']
1059
1060 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1061
1062 if badResult(result):
1063 return result
1064
1065 # the actual stderr should always match the expected, regardless
1066 # of whether we expected the compilation to fail or not (successful
1067 # compilations may generate warnings).
1068
1069 expected_stderr_file = find_expected_file(name, 'stderr')
1070 actual_stderr_file = add_suffix(name, 'comp.stderr')
1071
1072 if not compare_outputs(way, 'stderr',
1073 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1074 normalise_errmsg),
1075 expected_stderr_file, actual_stderr_file,
1076 whitespace_normaliser=getattr(getTestOpts(),
1077 "whitespace_normaliser",
1078 normalise_whitespace)):
1079 return failBecause('stderr mismatch')
1080
1081 # no problems found, this test passed
1082 return passed()
1083
1084 def compile_cmp_asm( name, way, extra_hc_opts ):
1085 print('Compile only, extra args = ', extra_hc_opts)
1086 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1087
1088 if badResult(result):
1089 return result
1090
1091 # the actual stderr should always match the expected, regardless
1092 # of whether we expected the compilation to fail or not (successful
1093 # compilations may generate warnings).
1094
1095 expected_asm_file = find_expected_file(name, 'asm')
1096 actual_asm_file = add_suffix(name, 's')
1097
1098 if not compare_outputs(way, 'asm',
1099 join_normalisers(normalise_errmsg, normalise_asm),
1100 expected_asm_file, actual_asm_file):
1101 return failBecause('asm mismatch')
1102
1103 # no problems found, this test passed
1104 return passed()
1105
1106 # -----------------------------------------------------------------------------
1107 # Compile-and-run tests
1108
1109 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1110 # print 'Compile and run, extra args = ', extra_hc_opts
1111
1112 result = extras_build( way, extra_mods, extra_hc_opts )
1113 if badResult(result):
1114 return result
1115 extra_hc_opts = result['hc_opts']
1116
1117 if way.startswith('ghci'): # interpreted...
1118 return interpreter_run(name, way, extra_hc_opts, top_mod)
1119 else: # compiled...
1120 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1121 if badResult(result):
1122 return result
1123
1124 cmd = './' + name;
1125
1126 # we don't check the compiler's stderr for a compile-and-run test
1127 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1128
1129 def compile_and_run( name, way, extra_hc_opts ):
1130 return compile_and_run__( name, way, '', [], extra_hc_opts)
1131
1132 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1133 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1134
1135 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1136 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1137
1138 def stats( name, way, stats_file ):
1139 opts = getTestOpts()
1140 return check_stats(name, way, stats_file, opts.stats_range_fields)
1141
1142 def metric_dict(name, way, metric, value):
1143 return Perf.PerfStat(
1144 test_env = config.test_env,
1145 test = name,
1146 way = way,
1147 metric = metric,
1148 value = value)
1149
1150 # -----------------------------------------------------------------------------
1151 # Check test stats. This prints the results for the user.
1152 # name: name of the test.
1153 # way: the way.
1154 # stats_file: the path of the stats_file containing the stats for the test.
1155 # range_fields
1156 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1157 # This prints the results for the user.
1158 def check_stats(name, way, stats_file, range_fields):
1159 result = passed()
1160 if range_fields:
1161 try:
1162 f = open(in_testdir(stats_file))
1163 except IOError as e:
1164 return failBecause(str(e))
1165 stats_file_contents = f.read()
1166 f.close()
1167
1168 for (metric, range_val_dev) in range_fields.items():
1169 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1170 if field_match == None:
1171 print('Failed to find metric: ', metric)
1172 metric_result = failBecause('no such stats metric')
1173 else:
1174 actual_val = int(field_match.group(1))
1175
1176 # Store the metric so it can later be stored in a git note.
1177 perf_stat = metric_dict(name, way, metric, actual_val)
1178 change = None
1179
1180 # If this is the first time running the benchmark, then pass.
1181 if range_val_dev == None:
1182 metric_result = passed()
1183 change = MetricChange.NewMetric
1184 else:
1185 (expected_val, tolerance_dev) = range_val_dev
1186 (change, metric_result) = Perf.check_stats_change(
1187 perf_stat,
1188 expected_val,
1189 tolerance_dev,
1190 config.allowed_perf_changes,
1191 config.verbose >= 4)
1192 t.metrics.append((change, perf_stat))
1193
1194 # If any metric fails then the test fails.
1195 # Note, the remaining metrics are still run so that
1196 # a complete list of changes can be presented to the user.
1197 if metric_result['passFail'] == 'fail':
1198 result = metric_result
1199
1200 return result
1201
1202 # -----------------------------------------------------------------------------
1203 # Build a single-module program
1204
1205 def extras_build( way, extra_mods, extra_hc_opts ):
1206 for mod, opts in extra_mods:
1207 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1208 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1209 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1210 if badResult(result):
1211 return result
1212
1213 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1214
1215 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1216 opts = getTestOpts()
1217
1218 # Redirect stdout and stderr to the same file
1219 stdout = in_testdir(name, 'comp.stderr')
1220 stderr = subprocess.STDOUT
1221
1222 if top_mod != '':
1223 srcname = top_mod
1224 elif addsuf:
1225 if backpack:
1226 srcname = add_suffix(name, 'bkp')
1227 else:
1228 srcname = add_hs_lhs_suffix(name)
1229 else:
1230 srcname = name
1231
1232 if top_mod != '':
1233 to_do = '--make '
1234 if link:
1235 to_do = to_do + '-o ' + name
1236 elif backpack:
1237 if link:
1238 to_do = '-o ' + name + ' '
1239 else:
1240 to_do = ''
1241 to_do = to_do + '--backpack '
1242 elif link:
1243 to_do = '-o ' + name
1244 else:
1245 to_do = '-c' # just compile
1246
1247 stats_file = name + '.comp.stats'
1248 if isCompilerStatsTest():
1249 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1250 if backpack:
1251 extra_hc_opts += ' -outputdir ' + name + '.out'
1252
1253 # Required by GHC 7.3+, harmless for earlier versions:
1254 if (getTestOpts().c_src or
1255 getTestOpts().objc_src or
1256 getTestOpts().objcpp_src or
1257 getTestOpts().cmm_src):
1258 extra_hc_opts += ' -no-hs-main '
1259
1260 if getTestOpts().compile_cmd_prefix == '':
1261 cmd_prefix = ''
1262 else:
1263 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1264
1265 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1266
1267 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1268 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1269 ).format(**locals())
1270
1271 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1272
1273 if exit_code != 0 and not should_fail:
1274 if config.verbose >= 1 and _expect_pass(way):
1275 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1276 actual_stderr_path = in_testdir(name, 'comp.stderr')
1277 dump_file(actual_stderr_path)
1278
1279 # ToDo: if the sub-shell was killed by ^C, then exit
1280
1281 if isCompilerStatsTest():
1282 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1283 if badResult(statsResult):
1284 return statsResult
1285
1286 if should_fail:
1287 if exit_code == 0:
1288 return failBecause('exit code 0')
1289 else:
1290 if exit_code != 0:
1291 return failBecause('exit code non-0')
1292
1293 return passed()
1294
1295 # -----------------------------------------------------------------------------
1296 # Run a program and check its output
1297 #
1298 # If testname.stdin exists, route input from that, else
1299 # from /dev/null. Route output to testname.run.stdout and
1300 # testname.run.stderr. Returns the exit code of the run.
1301
1302 def simple_run(name, way, prog, extra_run_opts):
1303 opts = getTestOpts()
1304
1305 # figure out what to use for stdin
1306 if opts.stdin:
1307 stdin = in_testdir(opts.stdin)
1308 elif os.path.exists(in_testdir(name, 'stdin')):
1309 stdin = in_testdir(name, 'stdin')
1310 else:
1311 stdin = None
1312
1313 stdout = in_testdir(name, 'run.stdout')
1314 if opts.combined_output:
1315 stderr = subprocess.STDOUT
1316 else:
1317 stderr = in_testdir(name, 'run.stderr')
1318
1319 my_rts_flags = rts_flags(way)
1320
1321 stats_file = name + '.stats'
1322 if isStatsTest() and not isCompilerStatsTest():
1323 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1324 else:
1325 stats_args = ''
1326
1327 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1328 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1329
1330 if opts.cmd_wrapper != None:
1331 cmd = opts.cmd_wrapper(cmd)
1332
1333 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1334
1335 # run the command
1336 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1337
1338 # check the exit code
1339 if exit_code != opts.exit_code:
1340 if config.verbose >= 1 and _expect_pass(way):
1341 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1342 dump_stdout(name)
1343 dump_stderr(name)
1344 return failBecause('bad exit code')
1345
1346 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1347 return failBecause('bad stderr')
1348 if not (opts.ignore_stdout or stdout_ok(name, way)):
1349 return failBecause('bad stdout')
1350
1351 check_hp = '-h' in my_rts_flags and opts.check_hp
1352 check_prof = '-p' in my_rts_flags
1353
1354 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1355 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1356 return failBecause('bad heap profile')
1357 if check_prof and not check_prof_ok(name, way):
1358 return failBecause('bad profile')
1359
1360 return check_stats(name, way, stats_file, opts.stats_range_fields)
1361
1362 def rts_flags(way):
1363 args = config.way_rts_flags.get(way, [])
1364 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1365
1366 # -----------------------------------------------------------------------------
1367 # Run a program in the interpreter and check its output
1368
1369 def interpreter_run(name, way, extra_hc_opts, top_mod):
1370 opts = getTestOpts()
1371
1372 stdout = in_testdir(name, 'interp.stdout')
1373 stderr = in_testdir(name, 'interp.stderr')
1374 script = in_testdir(name, 'genscript')
1375
1376 if opts.combined_output:
1377 framework_fail(name, 'unsupported',
1378 'WAY=ghci and combined_output together is not supported')
1379
1380 if (top_mod == ''):
1381 srcname = add_hs_lhs_suffix(name)
1382 else:
1383 srcname = top_mod
1384
1385 delimiter = '===== program output begins here\n'
1386
1387 with io.open(script, 'w', encoding='utf8') as f:
1388 # set the prog name and command-line args to match the compiled
1389 # environment.
1390 f.write(':set prog ' + name + '\n')
1391 f.write(':set args ' + opts.extra_run_opts + '\n')
1392 # Add marker lines to the stdout and stderr output files, so we
1393 # can separate GHCi's output from the program's.
1394 f.write(':! echo ' + delimiter)
1395 f.write(':! echo 1>&2 ' + delimiter)
1396 # Set stdout to be line-buffered to match the compiled environment.
1397 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1398 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1399 # in the event of an exception as for the compiled program.
1400 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1401
1402 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1403 if os.path.exists(stdin):
1404 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1405
1406 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1407
1408 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1409 ).format(**locals())
1410
1411 if getTestOpts().cmd_wrapper != None:
1412 cmd = opts.cmd_wrapper(cmd);
1413
1414 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1415
1416 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1417
1418 # split the stdout into compilation/program output
1419 split_file(stdout, delimiter,
1420 in_testdir(name, 'comp.stdout'),
1421 in_testdir(name, 'run.stdout'))
1422 split_file(stderr, delimiter,
1423 in_testdir(name, 'comp.stderr'),
1424 in_testdir(name, 'run.stderr'))
1425
1426 # check the exit code
1427 if exit_code != getTestOpts().exit_code:
1428 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1429 dump_stdout(name)
1430 dump_stderr(name)
1431 return failBecause('bad exit code')
1432
1433 # ToDo: if the sub-shell was killed by ^C, then exit
1434
1435 if not (opts.ignore_stderr or stderr_ok(name, way)):
1436 return failBecause('bad stderr')
1437 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1438 return failBecause('bad stdout')
1439 else:
1440 return passed()
1441
1442 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1443 # See Note [Universal newlines].
1444 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1445 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1446 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1447 line = infile.readline()
1448 while re.sub('^\s*','',line) != delimiter and line != '':
1449 out1.write(line)
1450 line = infile.readline()
1451
1452 line = infile.readline()
1453 while line != '':
1454 out2.write(line)
1455 line = infile.readline()
1456
1457 # -----------------------------------------------------------------------------
1458 # Utils
1459 def get_compiler_flags():
1460 opts = getTestOpts()
1461
1462 flags = copy.copy(opts.compiler_always_flags)
1463
1464 flags.append(opts.extra_hc_opts)
1465
1466 if opts.outputdir != None:
1467 flags.extend(["-outputdir", opts.outputdir])
1468
1469 return flags
1470
1471 def stdout_ok(name, way):
1472 actual_stdout_file = add_suffix(name, 'run.stdout')
1473 expected_stdout_file = find_expected_file(name, 'stdout')
1474
1475 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1476
1477 check_stdout = getTestOpts().check_stdout
1478 if check_stdout:
1479 actual_stdout_path = in_testdir(actual_stdout_file)
1480 return check_stdout(actual_stdout_path, extra_norm)
1481
1482 return compare_outputs(way, 'stdout', extra_norm,
1483 expected_stdout_file, actual_stdout_file)
1484
1485 def dump_stdout( name ):
1486 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1487 str = f.read().strip()
1488 if str:
1489 print("Stdout (", name, "):")
1490 print(str)
1491
1492 def stderr_ok(name, way):
1493 actual_stderr_file = add_suffix(name, 'run.stderr')
1494 expected_stderr_file = find_expected_file(name, 'stderr')
1495
1496 return compare_outputs(way, 'stderr',
1497 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1498 expected_stderr_file, actual_stderr_file,
1499 whitespace_normaliser=normalise_whitespace)
1500
1501 def dump_stderr( name ):
1502 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1503 str = f.read().strip()
1504 if str:
1505 print("Stderr (", name, "):")
1506 print(str)
1507
1508 def read_no_crs(file):
1509 str = ''
1510 try:
1511 # See Note [Universal newlines].
1512 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1513 str = h.read()
1514 except Exception:
1515 # On Windows, if the program fails very early, it seems the
1516 # files stdout/stderr are redirected to may not get created
1517 pass
1518 return str
1519
1520 def write_file(file, str):
1521 # See Note [Universal newlines].
1522 with io.open(file, 'w', encoding='utf8', newline='') as h:
1523 h.write(str)
1524
1525 # Note [Universal newlines]
1526 #
1527 # We don't want to write any Windows style line endings ever, because
1528 # it would mean that `make accept` would touch every line of the file
1529 # when switching between Linux and Windows.
1530 #
1531 # Furthermore, when reading a file, it is convenient to translate all
1532 # Windows style endings to '\n', as it simplifies searching or massaging
1533 # the content.
1534 #
1535 # Solution: use `io.open` instead of `open`
1536 # * when reading: use newline=None to translate '\r\n' to '\n'
1537 # * when writing: use newline='' to not translate '\n' to '\r\n'
1538 #
1539 # See https://docs.python.org/2/library/io.html#io.open.
1540 #
1541 # This should work with both python2 and python3, and with both mingw*
1542 # as msys2 style Python.
1543 #
1544 # Do note that io.open returns unicode strings. So we have to specify
1545 # the expected encoding. But there is at least one file which is not
1546 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1547 # Another solution would be to open files in binary mode always, and
1548 # operate on bytes.
1549
1550 def check_hp_ok(name):
1551 opts = getTestOpts()
1552
1553 # do not qualify for hp2ps because we should be in the right directory
1554 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1555
1556 hp2psResult = runCmd(hp2psCmd)
1557
1558 actual_ps_path = in_testdir(name, 'ps')
1559
1560 if hp2psResult == 0:
1561 if os.path.exists(actual_ps_path):
1562 if gs_working:
1563 gsResult = runCmd(genGSCmd(actual_ps_path))
1564 if (gsResult == 0):
1565 return (True)
1566 else:
1567 print("hp2ps output for " + name + "is not valid PostScript")
1568 else: return (True) # assume postscript is valid without ghostscript
1569 else:
1570 print("hp2ps did not generate PostScript for " + name)
1571 return (False)
1572 else:
1573 print("hp2ps error when processing heap profile for " + name)
1574 return(False)
1575
1576 def check_prof_ok(name, way):
1577 expected_prof_file = find_expected_file(name, 'prof.sample')
1578 expected_prof_path = in_testdir(expected_prof_file)
1579
1580 # Check actual prof file only if we have an expected prof file to
1581 # compare it with.
1582 if not os.path.exists(expected_prof_path):
1583 return True
1584
1585 actual_prof_file = add_suffix(name, 'prof')
1586 actual_prof_path = in_testdir(actual_prof_file)
1587
1588 if not os.path.exists(actual_prof_path):
1589 print(actual_prof_path + " does not exist")
1590 return(False)
1591
1592 if os.path.getsize(actual_prof_path) == 0:
1593 print(actual_prof_path + " is empty")
1594 return(False)
1595
1596 return compare_outputs(way, 'prof', normalise_prof,
1597 expected_prof_file, actual_prof_file,
1598 whitespace_normaliser=normalise_whitespace)
1599
1600 # Compare expected output to actual output, and optionally accept the
1601 # new output. Returns true if output matched or was accepted, false
1602 # otherwise. See Note [Output comparison] for the meaning of the
1603 # normaliser and whitespace_normaliser parameters.
1604 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1605 whitespace_normaliser=lambda x:x):
1606
1607 expected_path = in_srcdir(expected_file)
1608 actual_path = in_testdir(actual_file)
1609
1610 if os.path.exists(expected_path):
1611 expected_str = normaliser(read_no_crs(expected_path))
1612 # Create the .normalised file in the testdir, not in the srcdir.
1613 expected_normalised_file = add_suffix(expected_file, 'normalised')
1614 expected_normalised_path = in_testdir(expected_normalised_file)
1615 else:
1616 expected_str = ''
1617 expected_normalised_path = '/dev/null'
1618
1619 actual_raw = read_no_crs(actual_path)
1620 actual_str = normaliser(actual_raw)
1621
1622 # See Note [Output comparison].
1623 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1624 return True
1625 else:
1626 if config.verbose >= 1 and _expect_pass(way):
1627 print('Actual ' + kind + ' output differs from expected:')
1628
1629 if expected_normalised_path != '/dev/null':
1630 write_file(expected_normalised_path, expected_str)
1631
1632 actual_normalised_path = add_suffix(actual_path, 'normalised')
1633 write_file(actual_normalised_path, actual_str)
1634
1635 if config.verbose >= 1 and _expect_pass(way):
1636 # See Note [Output comparison].
1637 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1638 actual_normalised_path),
1639 print_output=True)
1640
1641 # If for some reason there were no non-whitespace differences,
1642 # then do a full diff
1643 if r == 0:
1644 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1645 actual_normalised_path),
1646 print_output=True)
1647
1648 if config.accept and (getTestOpts().expect == 'fail' or
1649 way in getTestOpts().expect_fail_for):
1650 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1651 return False
1652 elif config.accept and actual_raw:
1653 if config.accept_platform:
1654 if_verbose(1, 'Accepting new output for platform "'
1655 + config.platform + '".')
1656 expected_path += '-' + config.platform
1657 elif config.accept_os:
1658 if_verbose(1, 'Accepting new output for os "'
1659 + config.os + '".')
1660 expected_path += '-' + config.os
1661 else:
1662 if_verbose(1, 'Accepting new output.')
1663
1664 write_file(expected_path, actual_raw)
1665 return True
1666 elif config.accept:
1667 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1668 os.remove(expected_path)
1669 return True
1670 else:
1671 return False
1672
1673 # Note [Output comparison]
1674 #
1675 # We do two types of output comparison:
1676 #
1677 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1678 # optional `whitespace_normaliser` to the expected and the actual
1679 # output, before comparing the two.
1680 #
1681 # 2. To show as a diff to the user when the test indeed failed. We apply
1682 # the same `normaliser` function to the outputs, to make the diff as
1683 # small as possible (only showing the actual problem). But we don't
1684 # apply the `whitespace_normaliser` here, because it might completely
1685 # squash all whitespace, making the diff unreadable. Instead we rely
1686 # on the `diff` program to ignore whitespace changes as much as
1687 # possible (#10152).
1688
1689 def normalise_whitespace( str ):
1690 # Merge contiguous whitespace characters into a single space.
1691 return ' '.join(str.split())
1692
1693 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1694
1695 def normalise_callstacks(s):
1696 opts = getTestOpts()
1697 def repl(matches):
1698 location = matches.group(1)
1699 location = normalise_slashes_(location)
1700 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1701 # Ignore line number differences in call stacks (#10834).
1702 s = re.sub(callSite_re, repl, s)
1703 # Ignore the change in how we identify implicit call-stacks
1704 s = s.replace('from ImplicitParams', 'from HasCallStack')
1705 if not opts.keep_prof_callstacks:
1706 # Don't output prof callstacks. Test output should be
1707 # independent from the WAY we run the test.
1708 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1709 return s
1710
1711 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1712
1713 def normalise_type_reps(str):
1714 """ Normalise out fingerprints from Typeable TyCon representations """
1715 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1716
1717 def normalise_errmsg( str ):
1718 """Normalise error-messages emitted via stderr"""
1719 # IBM AIX's `ld` is a bit chatty
1720 if opsys('aix'):
1721 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1722 # remove " error:" and lower-case " Warning:" to make patch for
1723 # trac issue #10021 smaller
1724 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1725 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1726 str = normalise_callstacks(str)
1727 str = normalise_type_reps(str)
1728
1729 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1730 # the colon is there because it appears in error messages; this
1731 # hacky solution is used in place of more sophisticated filename
1732 # mangling
1733 str = re.sub('([^\\s])\\.exe', '\\1', str)
1734
1735 # normalise slashes, minimise Windows/Unix filename differences
1736 str = re.sub('\\\\', '/', str)
1737
1738 # The inplace ghc's are called ghc-stage[123] to avoid filename
1739 # collisions, so we need to normalise that to just "ghc"
1740 str = re.sub('ghc-stage[123]', 'ghc', str)
1741
1742 # Error messages sometimes contain integer implementation package
1743 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1744
1745 # Error messages sometimes contain this blurb which can vary
1746 # spuriously depending upon build configuration (e.g. based on integer
1747 # backend)
1748 str = re.sub('...plus [a-z]+ instances involving out-of-scope types',
1749 '...plus N instances involving out-of-scope types', str)
1750
1751 # Also filter out bullet characters. This is because bullets are used to
1752 # separate error sections, and tests shouldn't be sensitive to how the
1753 # the division happens.
1754 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1755 str = str.replace(bullet, '')
1756
1757 # Windows only, this is a bug in hsc2hs but it is preventing
1758 # stable output for the testsuite. See Trac #9775. For now we filter out this
1759 # warning message to get clean output.
1760 if config.msys:
1761 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1762 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1763
1764 return str
1765
1766 # normalise a .prof file, so that we can reasonably compare it against
1767 # a sample. This doesn't compare any of the actual profiling data,
1768 # only the shape of the profile and the number of entries.
1769 def normalise_prof (str):
1770 # strip everything up to the line beginning "COST CENTRE"
1771 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1772
1773 # strip results for CAFs, these tend to change unpredictably
1774 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1775
1776 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1777 # sometimes under MAIN.
1778 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1779
1780 # We have something like this:
1781 #
1782 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1783 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1784 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1785 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1786 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1787 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1788 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1789 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1790 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1791 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1792 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1793 #
1794 # then we remove all the specific profiling data, leaving only the cost
1795 # centre name, module, src, and entries, to end up with this: (modulo
1796 # whitespace between columns)
1797 #
1798 # MAIN MAIN <built-in> 0
1799 # readPrec Main Main_1.hs:7:13-16 1
1800 # readPrec Main Main_1.hs:4:13-16 1
1801 # == Main Main_1.hs:7:25-26 1
1802 # == Main Main_1.hs:4:25-26 1
1803 # showsPrec Main Main_1.hs:7:19-22 2
1804 # showsPrec Main Main_1.hs:4:19-22 2
1805 # readPrec Main Main_1.hs:7:13-16 0
1806 # readPrec Main Main_1.hs:4:13-16 0
1807
1808 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1809 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1810 # this works fine.
1811 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1812 '\\1 \\2 \\3 \\5\n', str)
1813 return str
1814
1815 def normalise_slashes_( str ):
1816 str = re.sub('\\\\', '/', str)
1817 str = re.sub('//', '/', str)
1818 return str
1819
1820 def normalise_exe_( str ):
1821 str = re.sub('\.exe', '', str)
1822 return str
1823
1824 def normalise_output( str ):
1825 # remove " error:" and lower-case " Warning:" to make patch for
1826 # trac issue #10021 smaller
1827 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1828 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1829 # Remove a .exe extension (for Windows)
1830 # This can occur in error messages generated by the program.
1831 str = re.sub('([^\\s])\\.exe', '\\1', str)
1832 str = normalise_callstacks(str)
1833 str = normalise_type_reps(str)
1834 return str
1835
1836 def normalise_asm( str ):
1837 lines = str.split('\n')
1838 # Only keep instructions and labels not starting with a dot.
1839 metadata = re.compile('^[ \t]*\\..*$')
1840 out = []
1841 for line in lines:
1842 # Drop metadata directives (e.g. ".type")
1843 if not metadata.match(line):
1844 line = re.sub('@plt', '', line)
1845 instr = line.lstrip().split()
1846 # Drop empty lines.
1847 if not instr:
1848 continue
1849 # Drop operands, except for call instructions.
1850 elif instr[0] == 'call':
1851 out.append(instr[0] + ' ' + instr[1])
1852 else:
1853 out.append(instr[0])
1854 out = '\n'.join(out)
1855 return out
1856
1857 def if_verbose( n, s ):
1858 if config.verbose >= n:
1859 print(s)
1860
1861 def dump_file(f):
1862 try:
1863 with io.open(f) as file:
1864 print(file.read())
1865 except Exception:
1866 print('')
1867
1868 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1869 timeout_prog = strip_quotes(config.timeout_prog)
1870 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1871
1872 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1873 cmd = cmd.format(**config.__dict__)
1874 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1875
1876 stdin_file = io.open(stdin, 'rb') if stdin else None
1877 stdout_buffer = b''
1878 stderr_buffer = b''
1879
1880 hStdErr = subprocess.PIPE
1881 if stderr is subprocess.STDOUT:
1882 hStdErr = subprocess.STDOUT
1883
1884 try:
1885 # cmd is a complex command in Bourne-shell syntax
1886 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1887 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1888 # to invoke the Bourne shell
1889
1890 r = subprocess.Popen([timeout_prog, timeout, cmd],
1891 stdin=stdin_file,
1892 stdout=subprocess.PIPE,
1893 stderr=hStdErr,
1894 env=ghc_env)
1895
1896 stdout_buffer, stderr_buffer = r.communicate()
1897 finally:
1898 if stdin_file:
1899 stdin_file.close()
1900 if config.verbose >= 1 and print_output:
1901 if stdout_buffer:
1902 sys.stdout.buffer.write(stdout_buffer)
1903 if stderr_buffer:
1904 sys.stderr.buffer.write(stderr_buffer)
1905
1906 if stdout:
1907 with io.open(stdout, 'wb') as f:
1908 f.write(stdout_buffer)
1909 if stderr:
1910 if stderr is not subprocess.STDOUT:
1911 with io.open(stderr, 'wb') as f:
1912 f.write(stderr_buffer)
1913
1914 if r.returncode == 98:
1915 # The python timeout program uses 98 to signal that ^C was pressed
1916 stopNow()
1917 if r.returncode == 99 and getTestOpts().exit_code != 99:
1918 # Only print a message when timeout killed the process unexpectedly.
1919 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1920 return r.returncode
1921
1922 # -----------------------------------------------------------------------------
1923 # checking if ghostscript is available for checking the output of hp2ps
1924
1925 def genGSCmd(psfile):
1926 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1927
1928 def gsNotWorking():
1929 global gs_working
1930 print("GhostScript not available for hp2ps tests")
1931
1932 global gs_working
1933 gs_working = False
1934 if config.have_profiling:
1935 if config.gs != '':
1936 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1937 if resultGood == 0:
1938 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1939 ' >/dev/null 2>&1')
1940 if resultBad != 0:
1941 print("GhostScript available for hp2ps tests")
1942 gs_working = True
1943 else:
1944 gsNotWorking();
1945 else:
1946 gsNotWorking();
1947 else:
1948 gsNotWorking();
1949
1950 def add_suffix( name, suffix ):
1951 if suffix == '':
1952 return name
1953 else:
1954 return name + '.' + suffix
1955
1956 def add_hs_lhs_suffix(name):
1957 if getTestOpts().c_src:
1958 return add_suffix(name, 'c')
1959 elif getTestOpts().cmm_src:
1960 return add_suffix(name, 'cmm')
1961 elif getTestOpts().objc_src:
1962 return add_suffix(name, 'm')
1963 elif getTestOpts().objcpp_src:
1964 return add_suffix(name, 'mm')
1965 elif getTestOpts().literate:
1966 return add_suffix(name, 'lhs')
1967 else:
1968 return add_suffix(name, 'hs')
1969
1970 def replace_suffix( name, suffix ):
1971 base, suf = os.path.splitext(name)
1972 return base + '.' + suffix
1973
1974 def in_testdir(name, suffix=''):
1975 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1976
1977 def in_srcdir(name, suffix=''):
1978 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1979
1980 # Finding the sample output. The filename is of the form
1981 #
1982 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1983 #
1984 def find_expected_file(name, suff):
1985 basename = add_suffix(name, suff)
1986
1987 files = [basename + ws + plat
1988 for plat in ['-' + config.platform, '-' + config.os, '']
1989 for ws in ['-ws-' + config.wordsize, '']]
1990
1991 for f in files:
1992 if os.path.exists(in_srcdir(f)):
1993 return f
1994
1995 return basename
1996
1997 if config.msys:
1998 import stat
1999 def cleanup():
2000 testdir = getTestOpts().testdir
2001 max_attempts = 5
2002 retries = max_attempts
2003 def on_error(function, path, excinfo):
2004 # At least one test (T11489) removes the write bit from a file it
2005 # produces. Windows refuses to delete read-only files with a
2006 # permission error. Try setting the write bit and try again.
2007 os.chmod(path, stat.S_IWRITE)
2008 function(path)
2009
2010 # On Windows we have to retry the delete a couple of times.
2011 # The reason for this is that a FileDelete command just marks a
2012 # file for deletion. The file is really only removed when the last
2013 # handle to the file is closed. Unfortunately there are a lot of
2014 # system services that can have a file temporarily opened using a shared
2015 # readonly lock, such as the built in AV and search indexer.
2016 #
2017 # We can't really guarantee that these are all off, so what we can do is
2018 # whenever after a rmtree the folder still exists to try again and wait a bit.
2019 #
2020 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2021 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2022 # still locked then abort the current test by throwing an exception, this so it won't fail
2023 # with an even more cryptic error.
2024 #
2025 # See Trac #13162
2026 exception = None
2027 while retries > 0 and os.path.exists(testdir):
2028 time.sleep((max_attempts-retries)*6)
2029 try:
2030 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2031 except Exception as e:
2032 exception = e
2033 retries -= 1
2034
2035 if retries == 0 and os.path.exists(testdir):
2036 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2037 % (testdir, exception))
2038 else:
2039 def cleanup():
2040 testdir = getTestOpts().testdir
2041 if os.path.exists(testdir):
2042 shutil.rmtree(testdir, ignore_errors=False)
2043
2044
2045 # -----------------------------------------------------------------------------
2046 # Return a list of all the files ending in '.T' below directories roots.
2047
2048 def findTFiles(roots):
2049 for root in roots:
2050 for path, dirs, files in os.walk(root, topdown=True):
2051 # Never pick up .T files in uncleaned .run directories.
2052 dirs[:] = [dir for dir in sorted(dirs)
2053 if not dir.endswith(testdir_suffix)]
2054 for filename in files:
2055 if filename.endswith('.T'):
2056 yield os.path.join(path, filename)
2057
2058 # -----------------------------------------------------------------------------
2059 # Output a test summary to the specified file object
2060
2061 def summary(t, file, short=False, color=False):
2062
2063 file.write('\n')
2064 printUnexpectedTests(file,
2065 [t.unexpected_passes, t.unexpected_failures,
2066 t.unexpected_stat_failures, t.framework_failures])
2067
2068 if short:
2069 # Only print the list of unexpected tests above.
2070 return
2071
2072 colorize = lambda s: s
2073 if color:
2074 if len(t.unexpected_failures) > 0 or \
2075 len(t.unexpected_stat_failures) > 0 or \
2076 len(t.framework_failures) > 0:
2077 colorize = str_fail
2078 else:
2079 colorize = str_pass
2080
2081 file.write(colorize('SUMMARY') + ' for test run started at '
2082 + time.strftime("%c %Z", t.start_time) + '\n'
2083 + str(datetime.timedelta(seconds=
2084 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2085 + ' spent to go through\n'
2086 + repr(t.total_tests).rjust(8)
2087 + ' total tests, which gave rise to\n'
2088 + repr(t.total_test_cases).rjust(8)
2089 + ' test cases, of which\n'
2090 + repr(t.n_tests_skipped).rjust(8)
2091 + ' were skipped\n'
2092 + '\n'
2093 + repr(len(t.missing_libs)).rjust(8)
2094 + ' had missing libraries\n'
2095 + repr(t.n_expected_passes).rjust(8)
2096 + ' expected passes\n'
2097 + repr(t.n_expected_failures).rjust(8)
2098 + ' expected failures\n'
2099 + '\n'
2100 + repr(len(t.framework_failures)).rjust(8)
2101 + ' caused framework failures\n'
2102 + repr(len(t.framework_warnings)).rjust(8)
2103 + ' caused framework warnings\n'
2104 + repr(len(t.unexpected_passes)).rjust(8)
2105 + ' unexpected passes\n'
2106 + repr(len(t.unexpected_failures)).rjust(8)
2107 + ' unexpected failures\n'
2108 + repr(len(t.unexpected_stat_failures)).rjust(8)
2109 + ' unexpected stat failures\n'
2110 + '\n')
2111
2112 if t.unexpected_passes:
2113 file.write('Unexpected passes:\n')
2114 printTestInfosSummary(file, t.unexpected_passes)
2115
2116 if t.unexpected_failures:
2117 file.write('Unexpected failures:\n')
2118 printTestInfosSummary(file, t.unexpected_failures)
2119
2120 if t.unexpected_stat_failures:
2121 file.write('Unexpected stat failures:\n')
2122 printTestInfosSummary(file, t.unexpected_stat_failures)
2123
2124 if t.framework_failures:
2125 file.write('Framework failures:\n')
2126 printTestInfosSummary(file, t.framework_failures)
2127
2128 if t.framework_warnings:
2129 file.write('Framework warnings:\n')
2130 printTestInfosSummary(file, t.framework_warnings)
2131
2132 if stopping():
2133 file.write('WARNING: Testsuite run was terminated early\n')
2134
2135 def printUnexpectedTests(file, testInfoss):
2136 unexpected = set(name for testInfos in testInfoss
2137 for (_, name, _, _) in testInfos
2138 if not name.endswith('.T'))
2139 if unexpected:
2140 file.write('Unexpected results from:\n')
2141 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2142 file.write('\n')
2143
2144 def printTestInfosSummary(file, testInfos):
2145 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2146 for (directory, name, reason, way) in testInfos:
2147 directory = directory.ljust(maxDirLen)
2148 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2149 file.write('\n')
2150
2151 def modify_lines(s, f):
2152 s = '\n'.join([f(l) for l in s.splitlines()])
2153 if s and s[-1] != '\n':
2154 # Prevent '\ No newline at end of file' warnings when diffing.
2155 s += '\n'
2156 return s