5f8486307028c906f286465c37949880ceee241c
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 import perf_notes as Perf
24 from perf_notes import MetricChange
25 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
26
27 global pool_sema
28 if config.use_threads:
29 import threading
30 pool_sema = threading.BoundedSemaphore(value=config.threads)
31
32 global wantToStop
33 wantToStop = False
34
35 def stopNow():
36 global wantToStop
37 wantToStop = True
38
39 def stopping():
40 return wantToStop
41
42
43 # Options valid for the current test only (these get reset to
44 # testdir_testopts after each test).
45
46 global testopts_local
47 if config.use_threads:
48 testopts_local = threading.local()
49 else:
50 class TestOpts_Local:
51 pass
52 testopts_local = TestOpts_Local()
53
54 def getTestOpts():
55 return testopts_local.x
56
57 def setLocalTestOpts(opts):
58 global testopts_local
59 testopts_local.x=opts
60
61 def isCompilerStatsTest():
62 opts = getTestOpts()
63 return bool(opts.is_compiler_stats_test)
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return bool(opts.stats_range_fields)
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86 #
87 # type TestOpt = (name :: String, opts :: Object) -> IO ()
88
89 def normal( name, opts ):
90 return;
91
92 def skip( name, opts ):
93 opts.skip = True
94
95 def expect_fail( name, opts ):
96 # The compiler, testdriver, OS or platform is missing a certain
97 # feature, and we don't plan to or can't fix it now or in the
98 # future.
99 opts.expect = 'fail';
100
101 def reqlib( lib ):
102 return lambda name, opts, l=lib: _reqlib (name, opts, l )
103
104 def stage1(name, opts):
105 # See Note [Why is there no stage1 setup function?]
106 framework_fail(name, 'stage1 setup function does not exist',
107 'add your test to testsuite/tests/stage1 instead')
108
109 # Note [Why is there no stage1 setup function?]
110 #
111 # Presumably a stage1 setup function would signal that the stage1
112 # compiler should be used to compile a test.
113 #
114 # Trouble is, the path to the compiler + the `ghc --info` settings for
115 # that compiler are currently passed in from the `make` part of the
116 # testsuite driver.
117 #
118 # Switching compilers in the Python part would be entirely too late, as
119 # all ghc_with_* settings would be wrong. See config/ghc for possible
120 # consequences (for example, config.run_ways would still be
121 # based on the default compiler, quite likely causing ./validate --slow
122 # to fail).
123 #
124 # It would be possible to let the Python part of the testsuite driver
125 # make the call to `ghc --info`, but doing so would require quite some
126 # work. Care has to be taken to not affect the run_command tests for
127 # example, as they also use the `ghc --info` settings:
128 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
129 #
130 # If you want a test to run using the stage1 compiler, add it to the
131 # testsuite/tests/stage1 directory. Validate runs the tests in that
132 # directory with `make stage=1`.
133
134 # Cache the results of looking to see if we have a library or not.
135 # This makes quite a difference, especially on Windows.
136 have_lib_cache = {}
137
138 def have_library(lib):
139 """ Test whether the given library is available """
140 if lib in have_lib_cache:
141 got_it = have_lib_cache[lib]
142 else:
143 cmd = strip_quotes(config.ghc_pkg)
144 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
145 stdout=subprocess.PIPE,
146 stderr=subprocess.PIPE,
147 env=ghc_env)
148 # read from stdout and stderr to avoid blocking due to
149 # buffers filling
150 p.communicate()
151 r = p.wait()
152 got_it = r == 0
153 have_lib_cache[lib] = got_it
154
155 return got_it
156
157 def _reqlib( name, opts, lib ):
158 if not have_library(lib):
159 opts.expect = 'missing-lib'
160
161 def req_haddock( name, opts ):
162 if not config.haddock:
163 opts.expect = 'missing-lib'
164
165 def req_profiling( name, opts ):
166 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
167 if not config.have_profiling:
168 opts.expect = 'fail'
169
170 def req_shared_libs( name, opts ):
171 if not config.have_shared_libs:
172 opts.expect = 'fail'
173
174 def req_interp( name, opts ):
175 if not config.have_interp:
176 opts.expect = 'fail'
177
178 def req_smp( name, opts ):
179 if not config.have_smp:
180 opts.expect = 'fail'
181
182 def ignore_stdout(name, opts):
183 opts.ignore_stdout = True
184
185 def ignore_stderr(name, opts):
186 opts.ignore_stderr = True
187
188 def combined_output( name, opts ):
189 opts.combined_output = True
190
191 # -----
192
193 def expect_fail_for( ways ):
194 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
195
196 def _expect_fail_for( name, opts, ways ):
197 opts.expect_fail_for = ways
198
199 def expect_broken( bug ):
200 # This test is a expected not to work due to the indicated trac bug
201 # number.
202 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
203
204 def _expect_broken( name, opts, bug ):
205 record_broken(name, opts, bug)
206 opts.expect = 'fail';
207
208 def expect_broken_for( bug, ways ):
209 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
210
211 def _expect_broken_for( name, opts, bug, ways ):
212 record_broken(name, opts, bug)
213 opts.expect_fail_for = ways
214
215 def record_broken(name, opts, bug):
216 me = (bug, opts.testdir, name)
217 if not me in brokens:
218 brokens.append(me)
219
220 def _expect_pass(way):
221 # Helper function. Not intended for use in .T files.
222 opts = getTestOpts()
223 return opts.expect == 'pass' and way not in opts.expect_fail_for
224
225 # -----
226
227 def omit_ways( ways ):
228 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
229
230 def _omit_ways( name, opts, ways ):
231 opts.omit_ways = ways
232
233 # -----
234
235 def only_ways( ways ):
236 return lambda name, opts, w=ways: _only_ways( name, opts, w )
237
238 def _only_ways( name, opts, ways ):
239 opts.only_ways = ways
240
241 # -----
242
243 def extra_ways( ways ):
244 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
245
246 def _extra_ways( name, opts, ways ):
247 opts.extra_ways = ways
248
249 # -----
250
251 def set_stdin( file ):
252 return lambda name, opts, f=file: _set_stdin(name, opts, f);
253
254 def _set_stdin( name, opts, f ):
255 opts.stdin = f
256
257 # -----
258
259 def exit_code( val ):
260 return lambda name, opts, v=val: _exit_code(name, opts, v);
261
262 def _exit_code( name, opts, v ):
263 opts.exit_code = v
264
265 def signal_exit_code( val ):
266 if opsys('solaris2'):
267 return exit_code( val )
268 else:
269 # When application running on Linux receives fatal error
270 # signal, then its exit code is encoded as 128 + signal
271 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
272 # I assume that Mac OS X behaves in the same way at least Mac
273 # OS X builder behavior suggests this.
274 return exit_code( val+128 )
275
276 # -----
277
278 def compile_timeout_multiplier( val ):
279 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
280
281 def _compile_timeout_multiplier( name, opts, v ):
282 opts.compile_timeout_multiplier = v
283
284 def run_timeout_multiplier( val ):
285 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
286
287 def _run_timeout_multiplier( name, opts, v ):
288 opts.run_timeout_multiplier = v
289
290 # -----
291
292 def extra_run_opts( val ):
293 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
294
295 def _extra_run_opts( name, opts, v ):
296 opts.extra_run_opts = v
297
298 # -----
299
300 def extra_hc_opts( val ):
301 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
302
303 def _extra_hc_opts( name, opts, v ):
304 opts.extra_hc_opts = v
305
306 # -----
307
308 def extra_clean( files ):
309 # TODO. Remove all calls to extra_clean.
310 return lambda _name, _opts: None
311
312 def extra_files(files):
313 return lambda name, opts: _extra_files(name, opts, files)
314
315 def _extra_files(name, opts, files):
316 opts.extra_files.extend(files)
317
318 # -----
319
320 # Defaults to "test everything, and only break on extreme cases"
321 #
322 # The inputs to this function are slightly interesting:
323 # metric can be either:
324 # - 'all', in which case all 3 possible metrics are collected and compared.
325 # - The specific metric one wants to use in the test.
326 # - A list of the metrics one wants to use in the test.
327 #
328 # Deviation defaults to 20% because the goal is correctness over performance.
329 # The testsuite should avoid breaking when there is not an actual error.
330 # Instead, the testsuite should notify of regressions in a non-breaking manner.
331 #
332 # collect_compiler_stats is used when the metrics collected are about the compiler.
333 # collect_stats is used in the majority case when the metrics to be collected
334 # are about the performance of the runtime code generated by the compiler.
335 def collect_compiler_stats(metric='all',deviation=20):
336 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
337
338 def collect_stats(metric='all', deviation=20):
339 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
340
341 def testing_metrics():
342 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
343
344 # This is an internal function that is used only in the implementation.
345 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
346 # If the boolean is set to true, it indicates that this test is one that
347 # measures the performance numbers of the compiler.
348 # As this is a fairly rare case in the testsuite, it defaults to false to
349 # indicate that it is a 'normal' performance test.
350 def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
351 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
352 failBecause('This test has an invalid name.')
353
354 tests = Perf.get_perf_stats('HEAD^')
355
356 # Might have multiple metrics being measured for a single test.
357 test = [t for t in tests if t.test == name]
358
359 if tests == [] or test == []:
360 # There are no prior metrics for this test.
361 if isinstance(metric, str):
362 if metric == 'all':
363 for field in testing_metrics():
364 opts.stats_range_fields[field] = None
365 else:
366 opts.stats_range_fields[metric] = None
367 if isinstance(metric, list):
368 for field in metric:
369 opts.stats_range_fields[field] = None
370
371 return
372
373 if is_compiler_stats_test:
374 opts.is_compiler_stats_test = True
375
376 # Compiler performance numbers change when debugging is on, making the results
377 # useless and confusing. Therefore, skip if debugging is on.
378 if config.compiler_debugged and is_compiler_stats_test:
379 opts.skip = 1
380
381 # get the average value of the given metric from test
382 def get_avg_val(metric_2):
383 metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
384 return sum(metric_2_metrics) / len(metric_2_metrics)
385
386 # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
387 if isinstance(metric, str):
388 if metric == 'all':
389 for field in testing_metrics():
390 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
391 return
392 else:
393 opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
394 return
395
396 if isinstance(metric, list):
397 for field in metric:
398 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
399
400 # -----
401
402 def when(b, f):
403 # When list_brokens is on, we want to see all expect_broken calls,
404 # so we always do f
405 if b or config.list_broken:
406 return f
407 else:
408 return normal
409
410 def unless(b, f):
411 return when(not b, f)
412
413 def doing_ghci():
414 return 'ghci' in config.run_ways
415
416 def ghc_dynamic():
417 return config.ghc_dynamic
418
419 def fast():
420 return config.speed == 2
421
422 def platform( plat ):
423 return config.platform == plat
424
425 def opsys( os ):
426 return config.os == os
427
428 def arch( arch ):
429 return config.arch == arch
430
431 def wordsize( ws ):
432 return config.wordsize == str(ws)
433
434 def msys( ):
435 return config.msys
436
437 def cygwin( ):
438 return config.cygwin
439
440 def have_vanilla( ):
441 return config.have_vanilla
442
443 def have_dynamic( ):
444 return config.have_dynamic
445
446 def have_profiling( ):
447 return config.have_profiling
448
449 def in_tree_compiler( ):
450 return config.in_tree_compiler
451
452 def unregisterised( ):
453 return config.unregisterised
454
455 def compiler_profiled( ):
456 return config.compiler_profiled
457
458 def compiler_debugged( ):
459 return config.compiler_debugged
460
461 def have_gdb( ):
462 return config.have_gdb
463
464 def have_readelf( ):
465 return config.have_readelf
466
467 # Many tests sadly break with integer-simple due to GHCi's ignorance of it.
468 broken_without_gmp = unless(have_library('integer-gmp'), expect_broken(16043))
469
470 # ---
471
472 def high_memory_usage(name, opts):
473 opts.alone = True
474
475 # If a test is for a multi-CPU race, then running the test alone
476 # increases the chance that we'll actually see it.
477 def multi_cpu_race(name, opts):
478 opts.alone = True
479
480 # ---
481 def literate( name, opts ):
482 opts.literate = True
483
484 def c_src( name, opts ):
485 opts.c_src = True
486
487 def objc_src( name, opts ):
488 opts.objc_src = True
489
490 def objcpp_src( name, opts ):
491 opts.objcpp_src = True
492
493 def cmm_src( name, opts ):
494 opts.cmm_src = True
495
496 def outputdir( odir ):
497 return lambda name, opts, d=odir: _outputdir(name, opts, d)
498
499 def _outputdir( name, opts, odir ):
500 opts.outputdir = odir;
501
502 # ----
503
504 def pre_cmd( cmd ):
505 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
506
507 def _pre_cmd( name, opts, cmd ):
508 opts.pre_cmd = cmd
509
510 # ----
511
512 def cmd_prefix( prefix ):
513 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
514
515 def _cmd_prefix( name, opts, prefix ):
516 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
517
518 # ----
519
520 def cmd_wrapper( fun ):
521 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
522
523 def _cmd_wrapper( name, opts, fun ):
524 opts.cmd_wrapper = fun
525
526 # ----
527
528 def compile_cmd_prefix( prefix ):
529 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
530
531 def _compile_cmd_prefix( name, opts, prefix ):
532 opts.compile_cmd_prefix = prefix
533
534 # ----
535
536 def check_stdout( f ):
537 return lambda name, opts, f=f: _check_stdout(name, opts, f)
538
539 def _check_stdout( name, opts, f ):
540 opts.check_stdout = f
541
542 def no_check_hp(name, opts):
543 opts.check_hp = False
544
545 # ----
546
547 def filter_stdout_lines( regex ):
548 """ Filter lines of stdout with the given regular expression """
549 def f( name, opts ):
550 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
551 return f
552
553 def normalise_slashes( name, opts ):
554 _normalise_fun(name, opts, normalise_slashes_)
555
556 def normalise_exe( name, opts ):
557 _normalise_fun(name, opts, normalise_exe_)
558
559 def normalise_fun( *fs ):
560 return lambda name, opts: _normalise_fun(name, opts, fs)
561
562 def _normalise_fun( name, opts, *fs ):
563 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
564
565 def normalise_errmsg_fun( *fs ):
566 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
567
568 def _normalise_errmsg_fun( name, opts, *fs ):
569 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
570
571 def check_errmsg(needle):
572 def norm(str):
573 if needle in str:
574 return "%s contained in -ddump-simpl\n" % needle
575 else:
576 return "%s not contained in -ddump-simpl\n" % needle
577 return normalise_errmsg_fun(norm)
578
579 def grep_errmsg(needle):
580 def norm(str):
581 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
582 return normalise_errmsg_fun(norm)
583
584 def normalise_whitespace_fun(f):
585 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
586
587 def _normalise_whitespace_fun(name, opts, f):
588 opts.whitespace_normaliser = f
589
590 def normalise_version_( *pkgs ):
591 def normalise_version__( str ):
592 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
593 '\\1-<VERSION>', str)
594 return normalise_version__
595
596 def normalise_version( *pkgs ):
597 def normalise_version__( name, opts ):
598 _normalise_fun(name, opts, normalise_version_(*pkgs))
599 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
600 return normalise_version__
601
602 def normalise_drive_letter(name, opts):
603 # Windows only. Change D:\\ to C:\\.
604 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
605
606 def keep_prof_callstacks(name, opts):
607 """Keep profiling callstacks.
608
609 Use together with `only_ways(prof_ways)`.
610 """
611 opts.keep_prof_callstacks = True
612
613 def join_normalisers(*a):
614 """
615 Compose functions, flattening sequences.
616
617 join_normalisers(f1,[f2,f3],f4)
618
619 is the same as
620
621 lambda x: f1(f2(f3(f4(x))))
622 """
623
624 def flatten(l):
625 """
626 Taken from http://stackoverflow.com/a/2158532/946226
627 """
628 for el in l:
629 if (isinstance(el, collections.Iterable)
630 and not isinstance(el, (bytes, str))):
631 for sub in flatten(el):
632 yield sub
633 else:
634 yield el
635
636 a = flatten(a)
637
638 fn = lambda x:x # identity function
639 for f in a:
640 assert callable(f)
641 fn = lambda x,f=f,fn=fn: fn(f(x))
642 return fn
643
644 # ----
645 # Function for composing two opt-fns together
646
647 def executeSetups(fs, name, opts):
648 if type(fs) is list:
649 # If we have a list of setups, then execute each one
650 for f in fs:
651 executeSetups(f, name, opts)
652 else:
653 # fs is a single function, so just apply it
654 fs(name, opts)
655
656 # -----------------------------------------------------------------------------
657 # The current directory of tests
658
659 def newTestDir(tempdir, dir):
660
661 global thisdir_settings
662 # reset the options for this test directory
663 def settings(name, opts, tempdir=tempdir, dir=dir):
664 return _newTestDir(name, opts, tempdir, dir)
665 thisdir_settings = settings
666
667 # Should be equal to entry in toplevel .gitignore.
668 testdir_suffix = '.run'
669
670 def _newTestDir(name, opts, tempdir, dir):
671 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
672 opts.srcdir = os.path.join(os.getcwd(), dir)
673 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
674 opts.compiler_always_flags = config.compiler_always_flags
675
676 # -----------------------------------------------------------------------------
677 # Actually doing tests
678
679 parallelTests = []
680 aloneTests = []
681 allTestNames = set([])
682
683 def runTest(watcher, opts, name, func, args):
684 if config.use_threads:
685 pool_sema.acquire()
686 t = threading.Thread(target=test_common_thread,
687 name=name,
688 args=(watcher, name, opts, func, args))
689 t.daemon = False
690 t.start()
691 else:
692 test_common_work(watcher, name, opts, func, args)
693
694 # name :: String
695 # setup :: [TestOpt] -> IO ()
696 def test(name, setup, func, args):
697 global aloneTests
698 global parallelTests
699 global allTestNames
700 global thisdir_settings
701 if name in allTestNames:
702 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
703 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
704 framework_fail(name, 'bad_name', 'This test has an invalid name')
705
706 if config.run_only_some_tests:
707 if name not in config.only:
708 return
709 else:
710 # Note [Mutating config.only]
711 # config.only is initially the set of tests requested by
712 # the user (via 'make TEST='). We then remove all tests that
713 # we've already seen (in .T files), so that we can later
714 # report on any tests we couldn't find and error out.
715 config.only.remove(name)
716
717 # Make a deep copy of the default_testopts, as we need our own copy
718 # of any dictionaries etc inside it. Otherwise, if one test modifies
719 # them, all tests will see the modified version!
720 myTestOpts = copy.deepcopy(default_testopts)
721
722 executeSetups([thisdir_settings, setup], name, myTestOpts)
723
724 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
725 if myTestOpts.alone:
726 aloneTests.append(thisTest)
727 else:
728 parallelTests.append(thisTest)
729 allTestNames.add(name)
730
731 if config.use_threads:
732 def test_common_thread(watcher, name, opts, func, args):
733 try:
734 test_common_work(watcher, name, opts, func, args)
735 finally:
736 pool_sema.release()
737
738 def get_package_cache_timestamp():
739 if config.package_conf_cache_file == '':
740 return 0.0
741 else:
742 try:
743 return os.stat(config.package_conf_cache_file).st_mtime
744 except:
745 return 0.0
746
747 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
748
749 def test_common_work(watcher, name, opts, func, args):
750 try:
751 t.total_tests += 1
752 setLocalTestOpts(opts)
753
754 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
755
756 # All the ways we might run this test
757 if func == compile or func == multimod_compile:
758 all_ways = config.compile_ways
759 elif func == compile_and_run or func == multimod_compile_and_run:
760 all_ways = config.run_ways
761 elif func == ghci_script:
762 if 'ghci' in config.run_ways:
763 all_ways = ['ghci']
764 else:
765 all_ways = []
766 else:
767 all_ways = ['normal']
768
769 # A test itself can request extra ways by setting opts.extra_ways
770 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
771
772 t.total_test_cases += len(all_ways)
773
774 ok_way = lambda way: \
775 not getTestOpts().skip \
776 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
777 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
778 and (not (config.skip_perf_tests and isStatsTest())) \
779 and (not (config.only_perf_tests and not isStatsTest())) \
780 and way not in getTestOpts().omit_ways
781
782 # Which ways we are asked to skip
783 do_ways = list(filter (ok_way,all_ways))
784
785 # Only run all ways in slow mode.
786 # See Note [validate and testsuite speed] in toplevel Makefile.
787 if config.accept:
788 # Only ever run one way
789 do_ways = do_ways[:1]
790 elif config.speed > 0:
791 # However, if we EXPLICITLY asked for a way (with extra_ways)
792 # please test it!
793 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
794 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
795 do_ways = other_ways[:1] + explicit_ways
796
797 # Find all files in the source directory that this test
798 # depends on. Do this only once for all ways.
799 # Generously add all filenames that start with the name of
800 # the test to this set, as a convenience to test authors.
801 # They will have to use the `extra_files` setup function to
802 # specify all other files that their test depends on (but
803 # this seems to be necessary for only about 10% of all
804 # tests).
805 files = set(f for f in os.listdir(opts.srcdir)
806 if f.startswith(name) and not f == name and
807 not f.endswith(testdir_suffix) and
808 not os.path.splitext(f)[1] in do_not_copy)
809 for filename in (opts.extra_files + extra_src_files.get(name, [])):
810 if filename.startswith('/'):
811 framework_fail(name, 'whole-test',
812 'no absolute paths in extra_files please: ' + filename)
813
814 elif '*' in filename:
815 # Don't use wildcards in extra_files too much, as
816 # globbing is slow.
817 files.update((os.path.relpath(f, opts.srcdir)
818 for f in glob.iglob(in_srcdir(filename))))
819
820 elif filename:
821 files.add(filename)
822
823 else:
824 framework_fail(name, 'whole-test', 'extra_file is empty string')
825
826 # Run the required tests...
827 for way in do_ways:
828 if stopping():
829 break
830 try:
831 do_test(name, way, func, args, files)
832 except KeyboardInterrupt:
833 stopNow()
834 except Exception as e:
835 framework_fail(name, way, str(e))
836 traceback.print_exc()
837
838 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
839
840 if config.cleanup and do_ways:
841 try:
842 cleanup()
843 except Exception as e:
844 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
845
846 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
847
848 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
849 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
850
851 except Exception as e:
852 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
853 finally:
854 watcher.notify()
855
856 def do_test(name, way, func, args, files):
857 opts = getTestOpts()
858
859 full_name = name + '(' + way + ')'
860
861 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
862 full_name, t.total_tests, len(allTestNames),
863 [len(t.unexpected_passes),
864 len(t.unexpected_failures),
865 len(t.framework_failures)]))
866
867 # Clean up prior to the test, so that we can't spuriously conclude
868 # that it passed on the basis of old run outputs.
869 cleanup()
870 os.makedirs(opts.testdir)
871
872 # Link all source files for this test into a new directory in
873 # /tmp, and run the test in that directory. This makes it
874 # possible to run tests in parallel, without modification, that
875 # would otherwise (accidentally) write to the same output file.
876 # It also makes it easier to keep the testsuite clean.
877
878 for extra_file in files:
879 src = in_srcdir(extra_file)
880 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
881 if os.path.isfile(src):
882 link_or_copy_file(src, dst)
883 elif os.path.isdir(src):
884 os.mkdir(dst)
885 lndir(src, dst)
886 else:
887 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
888 # When using a ghc built without haddock support, .t
889 # files are rightfully missing. Don't
890 # framework_fail. Test will be skipped later.
891 pass
892 else:
893 framework_fail(name, way,
894 'extra_file does not exist: ' + extra_file)
895
896 if func.__name__ == 'run_command' or opts.pre_cmd:
897 # When running 'MAKE' make sure 'TOP' still points to the
898 # root of the testsuite.
899 src_makefile = in_srcdir('Makefile')
900 dst_makefile = in_testdir('Makefile')
901 if os.path.exists(src_makefile):
902 with io.open(src_makefile, 'r', encoding='utf8') as src:
903 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
904 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
905 dst.write(makefile)
906
907 if opts.pre_cmd:
908 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
909 stderr = subprocess.STDOUT,
910 print_output = config.verbose >= 3)
911
912 # If user used expect_broken then don't record failures of pre_cmd
913 if exit_code != 0 and opts.expect not in ['fail']:
914 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
915 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
916
917 result = func(*[name,way] + args)
918
919 if opts.expect not in ['pass', 'fail', 'missing-lib']:
920 framework_fail(name, way, 'bad expected ' + opts.expect)
921
922 try:
923 passFail = result['passFail']
924 except (KeyError, TypeError):
925 passFail = 'No passFail found'
926
927 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
928
929 if passFail == 'pass':
930 if _expect_pass(way):
931 t.expected_passes.append((directory, name, way))
932 t.n_expected_passes += 1
933 else:
934 if_verbose(1, '*** unexpected pass for %s' % full_name)
935 t.unexpected_passes.append((directory, name, 'unexpected', way))
936 elif passFail == 'fail':
937 if _expect_pass(way):
938 reason = result['reason']
939 tag = result.get('tag')
940 if tag == 'stat':
941 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
942 t.unexpected_stat_failures.append((directory, name, reason, way))
943 else:
944 if_verbose(1, '*** unexpected failure for %s' % full_name)
945 t.unexpected_failures.append((directory, name, reason, way))
946 else:
947 if opts.expect == 'missing-lib':
948 t.missing_libs.append((directory, name, 'missing-lib', way))
949 else:
950 t.n_expected_failures += 1
951 else:
952 framework_fail(name, way, 'bad result ' + passFail)
953
954 # Make is often invoked with -s, which means if it fails, we get
955 # no feedback at all. This is annoying. So let's remove the option
956 # if found and instead have the testsuite decide on what to do
957 # with the output.
958 def override_options(pre_cmd):
959 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
960 return pre_cmd.replace('-s' , '') \
961 .replace('--silent', '') \
962 .replace('--quiet' , '')
963
964 return pre_cmd
965
966 def framework_fail(name, way, reason):
967 opts = getTestOpts()
968 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
969 full_name = name + '(' + way + ')'
970 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
971 t.framework_failures.append((directory, name, way, reason))
972
973 def framework_warn(name, way, reason):
974 opts = getTestOpts()
975 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
976 full_name = name + '(' + way + ')'
977 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
978 t.framework_warnings.append((directory, name, way, reason))
979
980 def badResult(result):
981 try:
982 if result['passFail'] == 'pass':
983 return False
984 return True
985 except (KeyError, TypeError):
986 return True
987
988 # -----------------------------------------------------------------------------
989 # Generic command tests
990
991 # A generic command test is expected to run and exit successfully.
992 #
993 # The expected exit code can be changed via exit_code() as normal, and
994 # the expected stdout/stderr are stored in <testname>.stdout and
995 # <testname>.stderr. The output of the command can be ignored
996 # altogether by using the setup function ignore_stdout instead of
997 # run_command.
998
999 def run_command( name, way, cmd ):
1000 return simple_run( name, '', override_options(cmd), '' )
1001
1002 # -----------------------------------------------------------------------------
1003 # GHCi tests
1004
1005 def ghci_script( name, way, script):
1006 flags = ' '.join(get_compiler_flags())
1007 way_flags = ' '.join(config.way_flags[way])
1008
1009 # We pass HC and HC_OPTS as environment variables, so that the
1010 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1011 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1012 ).format(flags=flags, way_flags=way_flags)
1013 # NB: put way_flags before flags so that flags in all.T can overrie others
1014
1015 getTestOpts().stdin = script
1016 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1017
1018 # -----------------------------------------------------------------------------
1019 # Compile-only tests
1020
1021 def compile( name, way, extra_hc_opts ):
1022 return do_compile( name, way, 0, '', [], extra_hc_opts )
1023
1024 def compile_fail( name, way, extra_hc_opts ):
1025 return do_compile( name, way, 1, '', [], extra_hc_opts )
1026
1027 def backpack_typecheck( name, way, extra_hc_opts ):
1028 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1029
1030 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1031 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1032
1033 def backpack_compile( name, way, extra_hc_opts ):
1034 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1035
1036 def backpack_compile_fail( name, way, extra_hc_opts ):
1037 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1038
1039 def backpack_run( name, way, extra_hc_opts ):
1040 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1041
1042 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1043 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1044
1045 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1046 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1047
1048 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1049 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1050
1051 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1052 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1053
1054 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1055 # print 'Compile only, extra args = ', extra_hc_opts
1056
1057 result = extras_build( way, extra_mods, extra_hc_opts )
1058 if badResult(result):
1059 return result
1060 extra_hc_opts = result['hc_opts']
1061
1062 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1063
1064 if badResult(result):
1065 return result
1066
1067 # the actual stderr should always match the expected, regardless
1068 # of whether we expected the compilation to fail or not (successful
1069 # compilations may generate warnings).
1070
1071 expected_stderr_file = find_expected_file(name, 'stderr')
1072 actual_stderr_file = add_suffix(name, 'comp.stderr')
1073
1074 if not compare_outputs(way, 'stderr',
1075 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1076 normalise_errmsg),
1077 expected_stderr_file, actual_stderr_file,
1078 whitespace_normaliser=getattr(getTestOpts(),
1079 "whitespace_normaliser",
1080 normalise_whitespace)):
1081 return failBecause('stderr mismatch')
1082
1083 # no problems found, this test passed
1084 return passed()
1085
1086 def compile_cmp_asm( name, way, extra_hc_opts ):
1087 print('Compile only, extra args = ', extra_hc_opts)
1088 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1089
1090 if badResult(result):
1091 return result
1092
1093 # the actual stderr should always match the expected, regardless
1094 # of whether we expected the compilation to fail or not (successful
1095 # compilations may generate warnings).
1096
1097 expected_asm_file = find_expected_file(name, 'asm')
1098 actual_asm_file = add_suffix(name, 's')
1099
1100 if not compare_outputs(way, 'asm',
1101 join_normalisers(normalise_errmsg, normalise_asm),
1102 expected_asm_file, actual_asm_file):
1103 return failBecause('asm mismatch')
1104
1105 # no problems found, this test passed
1106 return passed()
1107
1108 # -----------------------------------------------------------------------------
1109 # Compile-and-run tests
1110
1111 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1112 # print 'Compile and run, extra args = ', extra_hc_opts
1113
1114 result = extras_build( way, extra_mods, extra_hc_opts )
1115 if badResult(result):
1116 return result
1117 extra_hc_opts = result['hc_opts']
1118
1119 if way.startswith('ghci'): # interpreted...
1120 return interpreter_run(name, way, extra_hc_opts, top_mod)
1121 else: # compiled...
1122 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1123 if badResult(result):
1124 return result
1125
1126 cmd = './' + name;
1127
1128 # we don't check the compiler's stderr for a compile-and-run test
1129 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1130
1131 def compile_and_run( name, way, extra_hc_opts ):
1132 return compile_and_run__( name, way, '', [], extra_hc_opts)
1133
1134 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1135 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1136
1137 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1138 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1139
1140 def stats( name, way, stats_file ):
1141 opts = getTestOpts()
1142 return check_stats(name, way, stats_file, opts.stats_range_fields)
1143
1144 def metric_dict(name, way, metric, value):
1145 return Perf.PerfStat(
1146 test_env = config.test_env,
1147 test = name,
1148 way = way,
1149 metric = metric,
1150 value = value)
1151
1152 # -----------------------------------------------------------------------------
1153 # Check test stats. This prints the results for the user.
1154 # name: name of the test.
1155 # way: the way.
1156 # stats_file: the path of the stats_file containing the stats for the test.
1157 # range_fields
1158 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1159 # This prints the results for the user.
1160 def check_stats(name, way, stats_file, range_fields):
1161 result = passed()
1162 if range_fields:
1163 try:
1164 f = open(in_testdir(stats_file))
1165 except IOError as e:
1166 return failBecause(str(e))
1167 stats_file_contents = f.read()
1168 f.close()
1169
1170 for (metric, range_val_dev) in range_fields.items():
1171 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1172 if field_match == None:
1173 print('Failed to find metric: ', metric)
1174 metric_result = failBecause('no such stats metric')
1175 else:
1176 actual_val = int(field_match.group(1))
1177
1178 # Store the metric so it can later be stored in a git note.
1179 perf_stat = metric_dict(name, way, metric, actual_val)
1180 change = None
1181
1182 # If this is the first time running the benchmark, then pass.
1183 if range_val_dev == None:
1184 metric_result = passed()
1185 change = MetricChange.NewMetric
1186 else:
1187 (expected_val, tolerance_dev) = range_val_dev
1188 (change, metric_result) = Perf.check_stats_change(
1189 perf_stat,
1190 expected_val,
1191 tolerance_dev,
1192 config.allowed_perf_changes,
1193 config.verbose >= 4)
1194 t.metrics.append((change, perf_stat))
1195
1196 # If any metric fails then the test fails.
1197 # Note, the remaining metrics are still run so that
1198 # a complete list of changes can be presented to the user.
1199 if metric_result['passFail'] == 'fail':
1200 result = metric_result
1201
1202 return result
1203
1204 # -----------------------------------------------------------------------------
1205 # Build a single-module program
1206
1207 def extras_build( way, extra_mods, extra_hc_opts ):
1208 for mod, opts in extra_mods:
1209 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1210 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1211 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1212 if badResult(result):
1213 return result
1214
1215 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1216
1217 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1218 opts = getTestOpts()
1219
1220 # Redirect stdout and stderr to the same file
1221 stdout = in_testdir(name, 'comp.stderr')
1222 stderr = subprocess.STDOUT
1223
1224 if top_mod != '':
1225 srcname = top_mod
1226 elif addsuf:
1227 if backpack:
1228 srcname = add_suffix(name, 'bkp')
1229 else:
1230 srcname = add_hs_lhs_suffix(name)
1231 else:
1232 srcname = name
1233
1234 if top_mod != '':
1235 to_do = '--make '
1236 if link:
1237 to_do = to_do + '-o ' + name
1238 elif backpack:
1239 if link:
1240 to_do = '-o ' + name + ' '
1241 else:
1242 to_do = ''
1243 to_do = to_do + '--backpack '
1244 elif link:
1245 to_do = '-o ' + name
1246 else:
1247 to_do = '-c' # just compile
1248
1249 stats_file = name + '.comp.stats'
1250 if isCompilerStatsTest():
1251 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1252 if backpack:
1253 extra_hc_opts += ' -outputdir ' + name + '.out'
1254
1255 # Required by GHC 7.3+, harmless for earlier versions:
1256 if (getTestOpts().c_src or
1257 getTestOpts().objc_src or
1258 getTestOpts().objcpp_src or
1259 getTestOpts().cmm_src):
1260 extra_hc_opts += ' -no-hs-main '
1261
1262 if getTestOpts().compile_cmd_prefix == '':
1263 cmd_prefix = ''
1264 else:
1265 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1266
1267 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1268
1269 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1270 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1271 ).format(**locals())
1272
1273 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1274
1275 if exit_code != 0 and not should_fail:
1276 if config.verbose >= 1 and _expect_pass(way):
1277 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1278 actual_stderr_path = in_testdir(name, 'comp.stderr')
1279 dump_file(actual_stderr_path)
1280
1281 # ToDo: if the sub-shell was killed by ^C, then exit
1282
1283 if isCompilerStatsTest():
1284 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1285 if badResult(statsResult):
1286 return statsResult
1287
1288 if should_fail:
1289 if exit_code == 0:
1290 return failBecause('exit code 0')
1291 else:
1292 if exit_code != 0:
1293 return failBecause('exit code non-0')
1294
1295 return passed()
1296
1297 # -----------------------------------------------------------------------------
1298 # Run a program and check its output
1299 #
1300 # If testname.stdin exists, route input from that, else
1301 # from /dev/null. Route output to testname.run.stdout and
1302 # testname.run.stderr. Returns the exit code of the run.
1303
1304 def simple_run(name, way, prog, extra_run_opts):
1305 opts = getTestOpts()
1306
1307 # figure out what to use for stdin
1308 if opts.stdin:
1309 stdin = in_testdir(opts.stdin)
1310 elif os.path.exists(in_testdir(name, 'stdin')):
1311 stdin = in_testdir(name, 'stdin')
1312 else:
1313 stdin = None
1314
1315 stdout = in_testdir(name, 'run.stdout')
1316 if opts.combined_output:
1317 stderr = subprocess.STDOUT
1318 else:
1319 stderr = in_testdir(name, 'run.stderr')
1320
1321 my_rts_flags = rts_flags(way)
1322
1323 stats_file = name + '.stats'
1324 if isStatsTest() and not isCompilerStatsTest():
1325 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1326 else:
1327 stats_args = ''
1328
1329 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1330 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1331
1332 if opts.cmd_wrapper != None:
1333 cmd = opts.cmd_wrapper(cmd)
1334
1335 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1336
1337 # run the command
1338 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1339
1340 # check the exit code
1341 if exit_code != opts.exit_code:
1342 if config.verbose >= 1 and _expect_pass(way):
1343 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1344 dump_stdout(name)
1345 dump_stderr(name)
1346 return failBecause('bad exit code')
1347
1348 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1349 return failBecause('bad stderr')
1350 if not (opts.ignore_stdout or stdout_ok(name, way)):
1351 return failBecause('bad stdout')
1352
1353 check_hp = '-h' in my_rts_flags and opts.check_hp
1354 check_prof = '-p' in my_rts_flags
1355
1356 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1357 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1358 return failBecause('bad heap profile')
1359 if check_prof and not check_prof_ok(name, way):
1360 return failBecause('bad profile')
1361
1362 return check_stats(name, way, stats_file, opts.stats_range_fields)
1363
1364 def rts_flags(way):
1365 args = config.way_rts_flags.get(way, [])
1366 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1367
1368 # -----------------------------------------------------------------------------
1369 # Run a program in the interpreter and check its output
1370
1371 def interpreter_run(name, way, extra_hc_opts, top_mod):
1372 opts = getTestOpts()
1373
1374 stdout = in_testdir(name, 'interp.stdout')
1375 stderr = in_testdir(name, 'interp.stderr')
1376 script = in_testdir(name, 'genscript')
1377
1378 if opts.combined_output:
1379 framework_fail(name, 'unsupported',
1380 'WAY=ghci and combined_output together is not supported')
1381
1382 if (top_mod == ''):
1383 srcname = add_hs_lhs_suffix(name)
1384 else:
1385 srcname = top_mod
1386
1387 delimiter = '===== program output begins here\n'
1388
1389 with io.open(script, 'w', encoding='utf8') as f:
1390 # set the prog name and command-line args to match the compiled
1391 # environment.
1392 f.write(':set prog ' + name + '\n')
1393 f.write(':set args ' + opts.extra_run_opts + '\n')
1394 # Add marker lines to the stdout and stderr output files, so we
1395 # can separate GHCi's output from the program's.
1396 f.write(':! echo ' + delimiter)
1397 f.write(':! echo 1>&2 ' + delimiter)
1398 # Set stdout to be line-buffered to match the compiled environment.
1399 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1400 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1401 # in the event of an exception as for the compiled program.
1402 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1403
1404 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1405 if os.path.exists(stdin):
1406 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1407
1408 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1409
1410 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1411 ).format(**locals())
1412
1413 if getTestOpts().cmd_wrapper != None:
1414 cmd = opts.cmd_wrapper(cmd);
1415
1416 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1417
1418 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1419
1420 # split the stdout into compilation/program output
1421 split_file(stdout, delimiter,
1422 in_testdir(name, 'comp.stdout'),
1423 in_testdir(name, 'run.stdout'))
1424 split_file(stderr, delimiter,
1425 in_testdir(name, 'comp.stderr'),
1426 in_testdir(name, 'run.stderr'))
1427
1428 # check the exit code
1429 if exit_code != getTestOpts().exit_code:
1430 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1431 dump_stdout(name)
1432 dump_stderr(name)
1433 return failBecause('bad exit code')
1434
1435 # ToDo: if the sub-shell was killed by ^C, then exit
1436
1437 if not (opts.ignore_stderr or stderr_ok(name, way)):
1438 return failBecause('bad stderr')
1439 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1440 return failBecause('bad stdout')
1441 else:
1442 return passed()
1443
1444 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1445 # See Note [Universal newlines].
1446 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1447 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1448 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1449 line = infile.readline()
1450 while re.sub('^\s*','',line) != delimiter and line != '':
1451 out1.write(line)
1452 line = infile.readline()
1453
1454 line = infile.readline()
1455 while line != '':
1456 out2.write(line)
1457 line = infile.readline()
1458
1459 # -----------------------------------------------------------------------------
1460 # Utils
1461 def get_compiler_flags():
1462 opts = getTestOpts()
1463
1464 flags = copy.copy(opts.compiler_always_flags)
1465
1466 flags.append(opts.extra_hc_opts)
1467
1468 if opts.outputdir != None:
1469 flags.extend(["-outputdir", opts.outputdir])
1470
1471 return flags
1472
1473 def stdout_ok(name, way):
1474 actual_stdout_file = add_suffix(name, 'run.stdout')
1475 expected_stdout_file = find_expected_file(name, 'stdout')
1476
1477 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1478
1479 check_stdout = getTestOpts().check_stdout
1480 if check_stdout:
1481 actual_stdout_path = in_testdir(actual_stdout_file)
1482 return check_stdout(actual_stdout_path, extra_norm)
1483
1484 return compare_outputs(way, 'stdout', extra_norm,
1485 expected_stdout_file, actual_stdout_file)
1486
1487 def dump_stdout( name ):
1488 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1489 str = f.read().strip()
1490 if str:
1491 print("Stdout (", name, "):")
1492 print(str)
1493
1494 def stderr_ok(name, way):
1495 actual_stderr_file = add_suffix(name, 'run.stderr')
1496 expected_stderr_file = find_expected_file(name, 'stderr')
1497
1498 return compare_outputs(way, 'stderr',
1499 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1500 expected_stderr_file, actual_stderr_file,
1501 whitespace_normaliser=normalise_whitespace)
1502
1503 def dump_stderr( name ):
1504 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1505 str = f.read().strip()
1506 if str:
1507 print("Stderr (", name, "):")
1508 print(str)
1509
1510 def read_no_crs(file):
1511 str = ''
1512 try:
1513 # See Note [Universal newlines].
1514 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1515 str = h.read()
1516 except Exception:
1517 # On Windows, if the program fails very early, it seems the
1518 # files stdout/stderr are redirected to may not get created
1519 pass
1520 return str
1521
1522 def write_file(file, str):
1523 # See Note [Universal newlines].
1524 with io.open(file, 'w', encoding='utf8', newline='') as h:
1525 h.write(str)
1526
1527 # Note [Universal newlines]
1528 #
1529 # We don't want to write any Windows style line endings ever, because
1530 # it would mean that `make accept` would touch every line of the file
1531 # when switching between Linux and Windows.
1532 #
1533 # Furthermore, when reading a file, it is convenient to translate all
1534 # Windows style endings to '\n', as it simplifies searching or massaging
1535 # the content.
1536 #
1537 # Solution: use `io.open` instead of `open`
1538 # * when reading: use newline=None to translate '\r\n' to '\n'
1539 # * when writing: use newline='' to not translate '\n' to '\r\n'
1540 #
1541 # See https://docs.python.org/2/library/io.html#io.open.
1542 #
1543 # This should work with both python2 and python3, and with both mingw*
1544 # as msys2 style Python.
1545 #
1546 # Do note that io.open returns unicode strings. So we have to specify
1547 # the expected encoding. But there is at least one file which is not
1548 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1549 # Another solution would be to open files in binary mode always, and
1550 # operate on bytes.
1551
1552 def check_hp_ok(name):
1553 opts = getTestOpts()
1554
1555 # do not qualify for hp2ps because we should be in the right directory
1556 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1557
1558 hp2psResult = runCmd(hp2psCmd)
1559
1560 actual_ps_path = in_testdir(name, 'ps')
1561
1562 if hp2psResult == 0:
1563 if os.path.exists(actual_ps_path):
1564 if gs_working:
1565 gsResult = runCmd(genGSCmd(actual_ps_path))
1566 if (gsResult == 0):
1567 return (True)
1568 else:
1569 print("hp2ps output for " + name + "is not valid PostScript")
1570 else: return (True) # assume postscript is valid without ghostscript
1571 else:
1572 print("hp2ps did not generate PostScript for " + name)
1573 return (False)
1574 else:
1575 print("hp2ps error when processing heap profile for " + name)
1576 return(False)
1577
1578 def check_prof_ok(name, way):
1579 expected_prof_file = find_expected_file(name, 'prof.sample')
1580 expected_prof_path = in_testdir(expected_prof_file)
1581
1582 # Check actual prof file only if we have an expected prof file to
1583 # compare it with.
1584 if not os.path.exists(expected_prof_path):
1585 return True
1586
1587 actual_prof_file = add_suffix(name, 'prof')
1588 actual_prof_path = in_testdir(actual_prof_file)
1589
1590 if not os.path.exists(actual_prof_path):
1591 print(actual_prof_path + " does not exist")
1592 return(False)
1593
1594 if os.path.getsize(actual_prof_path) == 0:
1595 print(actual_prof_path + " is empty")
1596 return(False)
1597
1598 return compare_outputs(way, 'prof', normalise_prof,
1599 expected_prof_file, actual_prof_file,
1600 whitespace_normaliser=normalise_whitespace)
1601
1602 # Compare expected output to actual output, and optionally accept the
1603 # new output. Returns true if output matched or was accepted, false
1604 # otherwise. See Note [Output comparison] for the meaning of the
1605 # normaliser and whitespace_normaliser parameters.
1606 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1607 whitespace_normaliser=lambda x:x):
1608
1609 expected_path = in_srcdir(expected_file)
1610 actual_path = in_testdir(actual_file)
1611
1612 if os.path.exists(expected_path):
1613 expected_str = normaliser(read_no_crs(expected_path))
1614 # Create the .normalised file in the testdir, not in the srcdir.
1615 expected_normalised_file = add_suffix(expected_file, 'normalised')
1616 expected_normalised_path = in_testdir(expected_normalised_file)
1617 else:
1618 expected_str = ''
1619 expected_normalised_path = '/dev/null'
1620
1621 actual_raw = read_no_crs(actual_path)
1622 actual_str = normaliser(actual_raw)
1623
1624 # See Note [Output comparison].
1625 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1626 return True
1627 else:
1628 if config.verbose >= 1 and _expect_pass(way):
1629 print('Actual ' + kind + ' output differs from expected:')
1630
1631 if expected_normalised_path != '/dev/null':
1632 write_file(expected_normalised_path, expected_str)
1633
1634 actual_normalised_path = add_suffix(actual_path, 'normalised')
1635 write_file(actual_normalised_path, actual_str)
1636
1637 if config.verbose >= 1 and _expect_pass(way):
1638 # See Note [Output comparison].
1639 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1640 actual_normalised_path),
1641 print_output=True)
1642
1643 # If for some reason there were no non-whitespace differences,
1644 # then do a full diff
1645 if r == 0:
1646 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1647 actual_normalised_path),
1648 print_output=True)
1649
1650 if config.accept and (getTestOpts().expect == 'fail' or
1651 way in getTestOpts().expect_fail_for):
1652 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1653 return False
1654 elif config.accept and actual_raw:
1655 if config.accept_platform:
1656 if_verbose(1, 'Accepting new output for platform "'
1657 + config.platform + '".')
1658 expected_path += '-' + config.platform
1659 elif config.accept_os:
1660 if_verbose(1, 'Accepting new output for os "'
1661 + config.os + '".')
1662 expected_path += '-' + config.os
1663 else:
1664 if_verbose(1, 'Accepting new output.')
1665
1666 write_file(expected_path, actual_raw)
1667 return True
1668 elif config.accept:
1669 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1670 os.remove(expected_path)
1671 return True
1672 else:
1673 return False
1674
1675 # Note [Output comparison]
1676 #
1677 # We do two types of output comparison:
1678 #
1679 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1680 # optional `whitespace_normaliser` to the expected and the actual
1681 # output, before comparing the two.
1682 #
1683 # 2. To show as a diff to the user when the test indeed failed. We apply
1684 # the same `normaliser` function to the outputs, to make the diff as
1685 # small as possible (only showing the actual problem). But we don't
1686 # apply the `whitespace_normaliser` here, because it might completely
1687 # squash all whitespace, making the diff unreadable. Instead we rely
1688 # on the `diff` program to ignore whitespace changes as much as
1689 # possible (#10152).
1690
1691 def normalise_whitespace( str ):
1692 # Merge contiguous whitespace characters into a single space.
1693 return ' '.join(str.split())
1694
1695 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1696
1697 def normalise_callstacks(s):
1698 opts = getTestOpts()
1699 def repl(matches):
1700 location = matches.group(1)
1701 location = normalise_slashes_(location)
1702 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1703 # Ignore line number differences in call stacks (#10834).
1704 s = re.sub(callSite_re, repl, s)
1705 # Ignore the change in how we identify implicit call-stacks
1706 s = s.replace('from ImplicitParams', 'from HasCallStack')
1707 if not opts.keep_prof_callstacks:
1708 # Don't output prof callstacks. Test output should be
1709 # independent from the WAY we run the test.
1710 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1711 return s
1712
1713 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1714
1715 def normalise_type_reps(str):
1716 """ Normalise out fingerprints from Typeable TyCon representations """
1717 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1718
1719 def normalise_errmsg( str ):
1720 """Normalise error-messages emitted via stderr"""
1721 # IBM AIX's `ld` is a bit chatty
1722 if opsys('aix'):
1723 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1724 # remove " error:" and lower-case " Warning:" to make patch for
1725 # trac issue #10021 smaller
1726 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1727 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1728 str = normalise_callstacks(str)
1729 str = normalise_type_reps(str)
1730
1731 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1732 # the colon is there because it appears in error messages; this
1733 # hacky solution is used in place of more sophisticated filename
1734 # mangling
1735 str = re.sub('([^\\s])\\.exe', '\\1', str)
1736
1737 # normalise slashes, minimise Windows/Unix filename differences
1738 str = re.sub('\\\\', '/', str)
1739
1740 # The inplace ghc's are called ghc-stage[123] to avoid filename
1741 # collisions, so we need to normalise that to just "ghc"
1742 str = re.sub('ghc-stage[123]', 'ghc', str)
1743
1744 # Error messages sometimes contain integer implementation package
1745 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1746
1747 # Error messages sometimes contain this blurb which can vary
1748 # spuriously depending upon build configuration (e.g. based on integer
1749 # backend)
1750 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1751 '...plus N instances involving out-of-scope types', str)
1752
1753 # Also filter out bullet characters. This is because bullets are used to
1754 # separate error sections, and tests shouldn't be sensitive to how the
1755 # the division happens.
1756 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1757 str = str.replace(bullet, '')
1758
1759 # Windows only, this is a bug in hsc2hs but it is preventing
1760 # stable output for the testsuite. See Trac #9775. For now we filter out this
1761 # warning message to get clean output.
1762 if config.msys:
1763 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1764 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1765
1766 return str
1767
1768 # normalise a .prof file, so that we can reasonably compare it against
1769 # a sample. This doesn't compare any of the actual profiling data,
1770 # only the shape of the profile and the number of entries.
1771 def normalise_prof (str):
1772 # strip everything up to the line beginning "COST CENTRE"
1773 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1774
1775 # strip results for CAFs, these tend to change unpredictably
1776 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1777
1778 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1779 # sometimes under MAIN.
1780 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1781
1782 # We have something like this:
1783 #
1784 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1785 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1786 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1787 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1788 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1789 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1790 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1791 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1792 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1793 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1794 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1795 #
1796 # then we remove all the specific profiling data, leaving only the cost
1797 # centre name, module, src, and entries, to end up with this: (modulo
1798 # whitespace between columns)
1799 #
1800 # MAIN MAIN <built-in> 0
1801 # readPrec Main Main_1.hs:7:13-16 1
1802 # readPrec Main Main_1.hs:4:13-16 1
1803 # == Main Main_1.hs:7:25-26 1
1804 # == Main Main_1.hs:4:25-26 1
1805 # showsPrec Main Main_1.hs:7:19-22 2
1806 # showsPrec Main Main_1.hs:4:19-22 2
1807 # readPrec Main Main_1.hs:7:13-16 0
1808 # readPrec Main Main_1.hs:4:13-16 0
1809
1810 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1811 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1812 # this works fine.
1813 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1814 '\\1 \\2 \\3 \\5\n', str)
1815 return str
1816
1817 def normalise_slashes_( str ):
1818 str = re.sub('\\\\', '/', str)
1819 str = re.sub('//', '/', str)
1820 return str
1821
1822 def normalise_exe_( str ):
1823 str = re.sub('\.exe', '', str)
1824 return str
1825
1826 def normalise_output( str ):
1827 # remove " error:" and lower-case " Warning:" to make patch for
1828 # trac issue #10021 smaller
1829 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1830 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1831 # Remove a .exe extension (for Windows)
1832 # This can occur in error messages generated by the program.
1833 str = re.sub('([^\\s])\\.exe', '\\1', str)
1834 str = normalise_callstacks(str)
1835 str = normalise_type_reps(str)
1836 return str
1837
1838 def normalise_asm( str ):
1839 lines = str.split('\n')
1840 # Only keep instructions and labels not starting with a dot.
1841 metadata = re.compile('^[ \t]*\\..*$')
1842 out = []
1843 for line in lines:
1844 # Drop metadata directives (e.g. ".type")
1845 if not metadata.match(line):
1846 line = re.sub('@plt', '', line)
1847 instr = line.lstrip().split()
1848 # Drop empty lines.
1849 if not instr:
1850 continue
1851 # Drop operands, except for call instructions.
1852 elif instr[0] == 'call':
1853 out.append(instr[0] + ' ' + instr[1])
1854 else:
1855 out.append(instr[0])
1856 out = '\n'.join(out)
1857 return out
1858
1859 def if_verbose( n, s ):
1860 if config.verbose >= n:
1861 print(s)
1862
1863 def dump_file(f):
1864 try:
1865 with io.open(f) as file:
1866 print(file.read())
1867 except Exception:
1868 print('')
1869
1870 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1871 timeout_prog = strip_quotes(config.timeout_prog)
1872 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1873
1874 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1875 cmd = cmd.format(**config.__dict__)
1876 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1877
1878 stdin_file = io.open(stdin, 'rb') if stdin else None
1879 stdout_buffer = b''
1880 stderr_buffer = b''
1881
1882 hStdErr = subprocess.PIPE
1883 if stderr is subprocess.STDOUT:
1884 hStdErr = subprocess.STDOUT
1885
1886 try:
1887 # cmd is a complex command in Bourne-shell syntax
1888 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1889 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1890 # to invoke the Bourne shell
1891
1892 r = subprocess.Popen([timeout_prog, timeout, cmd],
1893 stdin=stdin_file,
1894 stdout=subprocess.PIPE,
1895 stderr=hStdErr,
1896 env=ghc_env)
1897
1898 stdout_buffer, stderr_buffer = r.communicate()
1899 finally:
1900 if stdin_file:
1901 stdin_file.close()
1902 if config.verbose >= 1 and print_output:
1903 if stdout_buffer:
1904 sys.stdout.buffer.write(stdout_buffer)
1905 if stderr_buffer:
1906 sys.stderr.buffer.write(stderr_buffer)
1907
1908 if stdout:
1909 with io.open(stdout, 'wb') as f:
1910 f.write(stdout_buffer)
1911 if stderr:
1912 if stderr is not subprocess.STDOUT:
1913 with io.open(stderr, 'wb') as f:
1914 f.write(stderr_buffer)
1915
1916 if r.returncode == 98:
1917 # The python timeout program uses 98 to signal that ^C was pressed
1918 stopNow()
1919 if r.returncode == 99 and getTestOpts().exit_code != 99:
1920 # Only print a message when timeout killed the process unexpectedly.
1921 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1922 return r.returncode
1923
1924 # -----------------------------------------------------------------------------
1925 # checking if ghostscript is available for checking the output of hp2ps
1926
1927 def genGSCmd(psfile):
1928 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1929
1930 def gsNotWorking():
1931 global gs_working
1932 print("GhostScript not available for hp2ps tests")
1933
1934 global gs_working
1935 gs_working = False
1936 if config.have_profiling:
1937 if config.gs != '':
1938 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1939 if resultGood == 0:
1940 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1941 ' >/dev/null 2>&1')
1942 if resultBad != 0:
1943 print("GhostScript available for hp2ps tests")
1944 gs_working = True
1945 else:
1946 gsNotWorking();
1947 else:
1948 gsNotWorking();
1949 else:
1950 gsNotWorking();
1951
1952 def add_suffix( name, suffix ):
1953 if suffix == '':
1954 return name
1955 else:
1956 return name + '.' + suffix
1957
1958 def add_hs_lhs_suffix(name):
1959 if getTestOpts().c_src:
1960 return add_suffix(name, 'c')
1961 elif getTestOpts().cmm_src:
1962 return add_suffix(name, 'cmm')
1963 elif getTestOpts().objc_src:
1964 return add_suffix(name, 'm')
1965 elif getTestOpts().objcpp_src:
1966 return add_suffix(name, 'mm')
1967 elif getTestOpts().literate:
1968 return add_suffix(name, 'lhs')
1969 else:
1970 return add_suffix(name, 'hs')
1971
1972 def replace_suffix( name, suffix ):
1973 base, suf = os.path.splitext(name)
1974 return base + '.' + suffix
1975
1976 def in_testdir(name, suffix=''):
1977 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1978
1979 def in_srcdir(name, suffix=''):
1980 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1981
1982 # Finding the sample output. The filename is of the form
1983 #
1984 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1985 #
1986 def find_expected_file(name, suff):
1987 basename = add_suffix(name, suff)
1988
1989 files = [basename + ws + plat
1990 for plat in ['-' + config.platform, '-' + config.os, '']
1991 for ws in ['-ws-' + config.wordsize, '']]
1992
1993 for f in files:
1994 if os.path.exists(in_srcdir(f)):
1995 return f
1996
1997 return basename
1998
1999 if config.msys:
2000 import stat
2001 def cleanup():
2002 testdir = getTestOpts().testdir
2003 max_attempts = 5
2004 retries = max_attempts
2005 def on_error(function, path, excinfo):
2006 # At least one test (T11489) removes the write bit from a file it
2007 # produces. Windows refuses to delete read-only files with a
2008 # permission error. Try setting the write bit and try again.
2009 os.chmod(path, stat.S_IWRITE)
2010 function(path)
2011
2012 # On Windows we have to retry the delete a couple of times.
2013 # The reason for this is that a FileDelete command just marks a
2014 # file for deletion. The file is really only removed when the last
2015 # handle to the file is closed. Unfortunately there are a lot of
2016 # system services that can have a file temporarily opened using a shared
2017 # readonly lock, such as the built in AV and search indexer.
2018 #
2019 # We can't really guarantee that these are all off, so what we can do is
2020 # whenever after a rmtree the folder still exists to try again and wait a bit.
2021 #
2022 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2023 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2024 # still locked then abort the current test by throwing an exception, this so it won't fail
2025 # with an even more cryptic error.
2026 #
2027 # See Trac #13162
2028 exception = None
2029 while retries > 0 and os.path.exists(testdir):
2030 time.sleep((max_attempts-retries)*6)
2031 try:
2032 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2033 except Exception as e:
2034 exception = e
2035 retries -= 1
2036
2037 if retries == 0 and os.path.exists(testdir):
2038 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2039 % (testdir, exception))
2040 else:
2041 def cleanup():
2042 testdir = getTestOpts().testdir
2043 if os.path.exists(testdir):
2044 shutil.rmtree(testdir, ignore_errors=False)
2045
2046
2047 # -----------------------------------------------------------------------------
2048 # Return a list of all the files ending in '.T' below directories roots.
2049
2050 def findTFiles(roots):
2051 for root in roots:
2052 for path, dirs, files in os.walk(root, topdown=True):
2053 # Never pick up .T files in uncleaned .run directories.
2054 dirs[:] = [dir for dir in sorted(dirs)
2055 if not dir.endswith(testdir_suffix)]
2056 for filename in files:
2057 if filename.endswith('.T'):
2058 yield os.path.join(path, filename)
2059
2060 # -----------------------------------------------------------------------------
2061 # Output a test summary to the specified file object
2062
2063 def summary(t, file, short=False, color=False):
2064
2065 file.write('\n')
2066 printUnexpectedTests(file,
2067 [t.unexpected_passes, t.unexpected_failures,
2068 t.unexpected_stat_failures, t.framework_failures])
2069
2070 if short:
2071 # Only print the list of unexpected tests above.
2072 return
2073
2074 colorize = lambda s: s
2075 if color:
2076 if len(t.unexpected_failures) > 0 or \
2077 len(t.unexpected_stat_failures) > 0 or \
2078 len(t.framework_failures) > 0:
2079 colorize = str_fail
2080 else:
2081 colorize = str_pass
2082
2083 file.write(colorize('SUMMARY') + ' for test run started at '
2084 + time.strftime("%c %Z", t.start_time) + '\n'
2085 + str(datetime.timedelta(seconds=
2086 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2087 + ' spent to go through\n'
2088 + repr(t.total_tests).rjust(8)
2089 + ' total tests, which gave rise to\n'
2090 + repr(t.total_test_cases).rjust(8)
2091 + ' test cases, of which\n'
2092 + repr(t.n_tests_skipped).rjust(8)
2093 + ' were skipped\n'
2094 + '\n'
2095 + repr(len(t.missing_libs)).rjust(8)
2096 + ' had missing libraries\n'
2097 + repr(t.n_expected_passes).rjust(8)
2098 + ' expected passes\n'
2099 + repr(t.n_expected_failures).rjust(8)
2100 + ' expected failures\n'
2101 + '\n'
2102 + repr(len(t.framework_failures)).rjust(8)
2103 + ' caused framework failures\n'
2104 + repr(len(t.framework_warnings)).rjust(8)
2105 + ' caused framework warnings\n'
2106 + repr(len(t.unexpected_passes)).rjust(8)
2107 + ' unexpected passes\n'
2108 + repr(len(t.unexpected_failures)).rjust(8)
2109 + ' unexpected failures\n'
2110 + repr(len(t.unexpected_stat_failures)).rjust(8)
2111 + ' unexpected stat failures\n'
2112 + '\n')
2113
2114 if t.unexpected_passes:
2115 file.write('Unexpected passes:\n')
2116 printTestInfosSummary(file, t.unexpected_passes)
2117
2118 if t.unexpected_failures:
2119 file.write('Unexpected failures:\n')
2120 printTestInfosSummary(file, t.unexpected_failures)
2121
2122 if t.unexpected_stat_failures:
2123 file.write('Unexpected stat failures:\n')
2124 printTestInfosSummary(file, t.unexpected_stat_failures)
2125
2126 if t.framework_failures:
2127 file.write('Framework failures:\n')
2128 printTestInfosSummary(file, t.framework_failures)
2129
2130 if t.framework_warnings:
2131 file.write('Framework warnings:\n')
2132 printTestInfosSummary(file, t.framework_warnings)
2133
2134 if stopping():
2135 file.write('WARNING: Testsuite run was terminated early\n')
2136
2137 def printUnexpectedTests(file, testInfoss):
2138 unexpected = set(name for testInfos in testInfoss
2139 for (_, name, _, _) in testInfos
2140 if not name.endswith('.T'))
2141 if unexpected:
2142 file.write('Unexpected results from:\n')
2143 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2144 file.write('\n')
2145
2146 def printTestInfosSummary(file, testInfos):
2147 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2148 for (directory, name, reason, way) in testInfos:
2149 directory = directory.ljust(maxDirLen)
2150 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2151 file.write('\n')
2152
2153 def modify_lines(s, f):
2154 s = '\n'.join([f(l) for l in s.splitlines()])
2155 if s and s[-1] != '\n':
2156 # Prevent '\ No newline at end of file' warnings when diffing.
2157 s += '\n'
2158 return s