Introduce NCG config flag and add helper
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
23 import perf_notes as Perf
24 from perf_notes import MetricChange
25 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
26
27 global pool_sema
28 if config.use_threads:
29 import threading
30 pool_sema = threading.BoundedSemaphore(value=config.threads)
31
32 global wantToStop
33 wantToStop = False
34
35 def stopNow():
36 global wantToStop
37 wantToStop = True
38
39 def stopping():
40 return wantToStop
41
42
43 # Options valid for the current test only (these get reset to
44 # testdir_testopts after each test).
45
46 global testopts_local
47 if config.use_threads:
48 testopts_local = threading.local()
49 else:
50 class TestOpts_Local:
51 pass
52 testopts_local = TestOpts_Local()
53
54 def getTestOpts():
55 return testopts_local.x
56
57 def setLocalTestOpts(opts):
58 global testopts_local
59 testopts_local.x=opts
60
61 def isCompilerStatsTest():
62 opts = getTestOpts()
63 return bool(opts.is_compiler_stats_test)
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return bool(opts.stats_range_fields)
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86 #
87 # type TestOpt = (name :: String, opts :: Object) -> IO ()
88
89 def normal( name, opts ):
90 return;
91
92 def skip( name, opts ):
93 opts.skip = True
94
95 def expect_fail( name, opts ):
96 # The compiler, testdriver, OS or platform is missing a certain
97 # feature, and we don't plan to or can't fix it now or in the
98 # future.
99 opts.expect = 'fail';
100
101 def reqlib( lib ):
102 return lambda name, opts, l=lib: _reqlib (name, opts, l )
103
104 def stage1(name, opts):
105 # See Note [Why is there no stage1 setup function?]
106 framework_fail(name, 'stage1 setup function does not exist',
107 'add your test to testsuite/tests/stage1 instead')
108
109 # Note [Why is there no stage1 setup function?]
110 #
111 # Presumably a stage1 setup function would signal that the stage1
112 # compiler should be used to compile a test.
113 #
114 # Trouble is, the path to the compiler + the `ghc --info` settings for
115 # that compiler are currently passed in from the `make` part of the
116 # testsuite driver.
117 #
118 # Switching compilers in the Python part would be entirely too late, as
119 # all ghc_with_* settings would be wrong. See config/ghc for possible
120 # consequences (for example, config.run_ways would still be
121 # based on the default compiler, quite likely causing ./validate --slow
122 # to fail).
123 #
124 # It would be possible to let the Python part of the testsuite driver
125 # make the call to `ghc --info`, but doing so would require quite some
126 # work. Care has to be taken to not affect the run_command tests for
127 # example, as they also use the `ghc --info` settings:
128 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
129 #
130 # If you want a test to run using the stage1 compiler, add it to the
131 # testsuite/tests/stage1 directory. Validate runs the tests in that
132 # directory with `make stage=1`.
133
134 # Cache the results of looking to see if we have a library or not.
135 # This makes quite a difference, especially on Windows.
136 have_lib_cache = {}
137
138 def have_library(lib):
139 """ Test whether the given library is available """
140 if lib in have_lib_cache:
141 got_it = have_lib_cache[lib]
142 else:
143 cmd = strip_quotes(config.ghc_pkg)
144 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
145 stdout=subprocess.PIPE,
146 stderr=subprocess.PIPE,
147 env=ghc_env)
148 # read from stdout and stderr to avoid blocking due to
149 # buffers filling
150 p.communicate()
151 r = p.wait()
152 got_it = r == 0
153 have_lib_cache[lib] = got_it
154
155 return got_it
156
157 def _reqlib( name, opts, lib ):
158 if not have_library(lib):
159 opts.expect = 'missing-lib'
160
161 def req_haddock( name, opts ):
162 if not config.haddock:
163 opts.expect = 'missing-lib'
164
165 def req_profiling( name, opts ):
166 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
167 if not config.have_profiling:
168 opts.expect = 'fail'
169
170 def req_shared_libs( name, opts ):
171 if not config.have_shared_libs:
172 opts.expect = 'fail'
173
174 def req_interp( name, opts ):
175 if not config.have_interp:
176 opts.expect = 'fail'
177
178 def req_smp( name, opts ):
179 if not config.have_smp:
180 opts.expect = 'fail'
181
182 def ignore_stdout(name, opts):
183 opts.ignore_stdout = True
184
185 def ignore_stderr(name, opts):
186 opts.ignore_stderr = True
187
188 def combined_output( name, opts ):
189 opts.combined_output = True
190
191 # -----
192
193 def expect_fail_for( ways ):
194 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
195
196 def _expect_fail_for( name, opts, ways ):
197 opts.expect_fail_for = ways
198
199 def expect_broken( bug ):
200 # This test is a expected not to work due to the indicated trac bug
201 # number.
202 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
203
204 def _expect_broken( name, opts, bug ):
205 record_broken(name, opts, bug)
206 opts.expect = 'fail';
207
208 def expect_broken_for( bug, ways ):
209 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
210
211 def _expect_broken_for( name, opts, bug, ways ):
212 record_broken(name, opts, bug)
213 opts.expect_fail_for = ways
214
215 def record_broken(name, opts, bug):
216 me = (bug, opts.testdir, name)
217 if not me in brokens:
218 brokens.append(me)
219
220 def _expect_pass(way):
221 # Helper function. Not intended for use in .T files.
222 opts = getTestOpts()
223 return opts.expect == 'pass' and way not in opts.expect_fail_for
224
225 # -----
226
227 def omit_ways( ways ):
228 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
229
230 def _omit_ways( name, opts, ways ):
231 opts.omit_ways = ways
232
233 # -----
234
235 def only_ways( ways ):
236 return lambda name, opts, w=ways: _only_ways( name, opts, w )
237
238 def _only_ways( name, opts, ways ):
239 opts.only_ways = ways
240
241 # -----
242
243 def extra_ways( ways ):
244 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
245
246 def _extra_ways( name, opts, ways ):
247 opts.extra_ways = ways
248
249 # -----
250
251 def set_stdin( file ):
252 return lambda name, opts, f=file: _set_stdin(name, opts, f);
253
254 def _set_stdin( name, opts, f ):
255 opts.stdin = f
256
257 # -----
258
259 def exit_code( val ):
260 return lambda name, opts, v=val: _exit_code(name, opts, v);
261
262 def _exit_code( name, opts, v ):
263 opts.exit_code = v
264
265 def signal_exit_code( val ):
266 if opsys('solaris2'):
267 return exit_code( val )
268 else:
269 # When application running on Linux receives fatal error
270 # signal, then its exit code is encoded as 128 + signal
271 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
272 # I assume that Mac OS X behaves in the same way at least Mac
273 # OS X builder behavior suggests this.
274 return exit_code( val+128 )
275
276 # -----
277
278 def compile_timeout_multiplier( val ):
279 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
280
281 def _compile_timeout_multiplier( name, opts, v ):
282 opts.compile_timeout_multiplier = v
283
284 def run_timeout_multiplier( val ):
285 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
286
287 def _run_timeout_multiplier( name, opts, v ):
288 opts.run_timeout_multiplier = v
289
290 # -----
291
292 def extra_run_opts( val ):
293 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
294
295 def _extra_run_opts( name, opts, v ):
296 opts.extra_run_opts = v
297
298 # -----
299
300 def extra_hc_opts( val ):
301 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
302
303 def _extra_hc_opts( name, opts, v ):
304 opts.extra_hc_opts = v
305
306 # -----
307
308 def extra_clean( files ):
309 # TODO. Remove all calls to extra_clean.
310 return lambda _name, _opts: None
311
312 def extra_files(files):
313 return lambda name, opts: _extra_files(name, opts, files)
314
315 def _extra_files(name, opts, files):
316 opts.extra_files.extend(files)
317
318 # -----
319
320 # Defaults to "test everything, and only break on extreme cases"
321 #
322 # The inputs to this function are slightly interesting:
323 # metric can be either:
324 # - 'all', in which case all 3 possible metrics are collected and compared.
325 # - The specific metric one wants to use in the test.
326 # - A list of the metrics one wants to use in the test.
327 #
328 # Deviation defaults to 20% because the goal is correctness over performance.
329 # The testsuite should avoid breaking when there is not an actual error.
330 # Instead, the testsuite should notify of regressions in a non-breaking manner.
331 #
332 # collect_compiler_stats is used when the metrics collected are about the compiler.
333 # collect_stats is used in the majority case when the metrics to be collected
334 # are about the performance of the runtime code generated by the compiler.
335 def collect_compiler_stats(metric='all',deviation=20):
336 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
337
338 def collect_stats(metric='all', deviation=20):
339 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
340
341 def testing_metrics():
342 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
343
344 # This is an internal function that is used only in the implementation.
345 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
346 # If the boolean is set to true, it indicates that this test is one that
347 # measures the performance numbers of the compiler.
348 # As this is a fairly rare case in the testsuite, it defaults to false to
349 # indicate that it is a 'normal' performance test.
350 def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
351 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
352 failBecause('This test has an invalid name.')
353
354 tests = Perf.get_perf_stats('HEAD^')
355
356 # Might have multiple metrics being measured for a single test.
357 test = [t for t in tests if t.test == name]
358
359 if tests == [] or test == []:
360 # There are no prior metrics for this test.
361 if isinstance(metric, str):
362 if metric == 'all':
363 for field in testing_metrics():
364 opts.stats_range_fields[field] = None
365 else:
366 opts.stats_range_fields[metric] = None
367 if isinstance(metric, list):
368 for field in metric:
369 opts.stats_range_fields[field] = None
370
371 return
372
373 if is_compiler_stats_test:
374 opts.is_compiler_stats_test = True
375
376 # Compiler performance numbers change when debugging is on, making the results
377 # useless and confusing. Therefore, skip if debugging is on.
378 if config.compiler_debugged and is_compiler_stats_test:
379 opts.skip = 1
380
381 # get the average value of the given metric from test
382 def get_avg_val(metric_2):
383 metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
384 return sum(metric_2_metrics) / len(metric_2_metrics)
385
386 # 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
387 if isinstance(metric, str):
388 if metric == 'all':
389 for field in testing_metrics():
390 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
391 return
392 else:
393 opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
394 return
395
396 if isinstance(metric, list):
397 for field in metric:
398 opts.stats_range_fields[field] = (get_avg_val(field), deviation)
399
400 # -----
401
402 def when(b, f):
403 # When list_brokens is on, we want to see all expect_broken calls,
404 # so we always do f
405 if b or config.list_broken:
406 return f
407 else:
408 return normal
409
410 def unless(b, f):
411 return when(not b, f)
412
413 def doing_ghci():
414 return 'ghci' in config.run_ways
415
416 def ghc_dynamic():
417 return config.ghc_dynamic
418
419 def fast():
420 return config.speed == 2
421
422 def platform( plat ):
423 return config.platform == plat
424
425 def opsys( os ):
426 return config.os == os
427
428 def arch( arch ):
429 return config.arch == arch
430
431 def wordsize( ws ):
432 return config.wordsize == str(ws)
433
434 def msys( ):
435 return config.msys
436
437 def cygwin( ):
438 return config.cygwin
439
440 def have_vanilla( ):
441 return config.have_vanilla
442
443 def have_ncg( ):
444 return config.have_ncg
445
446 def have_dynamic( ):
447 return config.have_dynamic
448
449 def have_profiling( ):
450 return config.have_profiling
451
452 def in_tree_compiler( ):
453 return config.in_tree_compiler
454
455 def unregisterised( ):
456 return config.unregisterised
457
458 def compiler_profiled( ):
459 return config.compiler_profiled
460
461 def compiler_debugged( ):
462 return config.compiler_debugged
463
464 def have_gdb( ):
465 return config.have_gdb
466
467 def have_readelf( ):
468 return config.have_readelf
469
470 # Many tests sadly break with integer-simple due to GHCi's ignorance of it.
471 broken_without_gmp = unless(have_library('integer-gmp'), expect_broken(16043))
472
473 # ---
474
475 def high_memory_usage(name, opts):
476 opts.alone = True
477
478 # If a test is for a multi-CPU race, then running the test alone
479 # increases the chance that we'll actually see it.
480 def multi_cpu_race(name, opts):
481 opts.alone = True
482
483 # ---
484 def literate( name, opts ):
485 opts.literate = True
486
487 def c_src( name, opts ):
488 opts.c_src = True
489
490 def objc_src( name, opts ):
491 opts.objc_src = True
492
493 def objcpp_src( name, opts ):
494 opts.objcpp_src = True
495
496 def cmm_src( name, opts ):
497 opts.cmm_src = True
498
499 def outputdir( odir ):
500 return lambda name, opts, d=odir: _outputdir(name, opts, d)
501
502 def _outputdir( name, opts, odir ):
503 opts.outputdir = odir;
504
505 # ----
506
507 def pre_cmd( cmd ):
508 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
509
510 def _pre_cmd( name, opts, cmd ):
511 opts.pre_cmd = cmd
512
513 # ----
514
515 def cmd_prefix( prefix ):
516 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
517
518 def _cmd_prefix( name, opts, prefix ):
519 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
520
521 # ----
522
523 def cmd_wrapper( fun ):
524 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
525
526 def _cmd_wrapper( name, opts, fun ):
527 opts.cmd_wrapper = fun
528
529 # ----
530
531 def compile_cmd_prefix( prefix ):
532 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
533
534 def _compile_cmd_prefix( name, opts, prefix ):
535 opts.compile_cmd_prefix = prefix
536
537 # ----
538
539 def check_stdout( f ):
540 return lambda name, opts, f=f: _check_stdout(name, opts, f)
541
542 def _check_stdout( name, opts, f ):
543 opts.check_stdout = f
544
545 def no_check_hp(name, opts):
546 opts.check_hp = False
547
548 # ----
549
550 def filter_stdout_lines( regex ):
551 """ Filter lines of stdout with the given regular expression """
552 def f( name, opts ):
553 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
554 return f
555
556 def normalise_slashes( name, opts ):
557 _normalise_fun(name, opts, normalise_slashes_)
558
559 def normalise_exe( name, opts ):
560 _normalise_fun(name, opts, normalise_exe_)
561
562 def normalise_fun( *fs ):
563 return lambda name, opts: _normalise_fun(name, opts, fs)
564
565 def _normalise_fun( name, opts, *fs ):
566 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
567
568 def normalise_errmsg_fun( *fs ):
569 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
570
571 def _normalise_errmsg_fun( name, opts, *fs ):
572 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
573
574 def check_errmsg(needle):
575 def norm(str):
576 if needle in str:
577 return "%s contained in -ddump-simpl\n" % needle
578 else:
579 return "%s not contained in -ddump-simpl\n" % needle
580 return normalise_errmsg_fun(norm)
581
582 def grep_errmsg(needle):
583 def norm(str):
584 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
585 return normalise_errmsg_fun(norm)
586
587 def normalise_whitespace_fun(f):
588 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
589
590 def _normalise_whitespace_fun(name, opts, f):
591 opts.whitespace_normaliser = f
592
593 def normalise_version_( *pkgs ):
594 def normalise_version__( str ):
595 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
596 '\\1-<VERSION>', str)
597 return normalise_version__
598
599 def normalise_version( *pkgs ):
600 def normalise_version__( name, opts ):
601 _normalise_fun(name, opts, normalise_version_(*pkgs))
602 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
603 return normalise_version__
604
605 def normalise_drive_letter(name, opts):
606 # Windows only. Change D:\\ to C:\\.
607 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
608
609 def keep_prof_callstacks(name, opts):
610 """Keep profiling callstacks.
611
612 Use together with `only_ways(prof_ways)`.
613 """
614 opts.keep_prof_callstacks = True
615
616 def join_normalisers(*a):
617 """
618 Compose functions, flattening sequences.
619
620 join_normalisers(f1,[f2,f3],f4)
621
622 is the same as
623
624 lambda x: f1(f2(f3(f4(x))))
625 """
626
627 def flatten(l):
628 """
629 Taken from http://stackoverflow.com/a/2158532/946226
630 """
631 for el in l:
632 if (isinstance(el, collections.Iterable)
633 and not isinstance(el, (bytes, str))):
634 for sub in flatten(el):
635 yield sub
636 else:
637 yield el
638
639 a = flatten(a)
640
641 fn = lambda x:x # identity function
642 for f in a:
643 assert callable(f)
644 fn = lambda x,f=f,fn=fn: fn(f(x))
645 return fn
646
647 # ----
648 # Function for composing two opt-fns together
649
650 def executeSetups(fs, name, opts):
651 if type(fs) is list:
652 # If we have a list of setups, then execute each one
653 for f in fs:
654 executeSetups(f, name, opts)
655 else:
656 # fs is a single function, so just apply it
657 fs(name, opts)
658
659 # -----------------------------------------------------------------------------
660 # The current directory of tests
661
662 def newTestDir(tempdir, dir):
663
664 global thisdir_settings
665 # reset the options for this test directory
666 def settings(name, opts, tempdir=tempdir, dir=dir):
667 return _newTestDir(name, opts, tempdir, dir)
668 thisdir_settings = settings
669
670 # Should be equal to entry in toplevel .gitignore.
671 testdir_suffix = '.run'
672
673 def _newTestDir(name, opts, tempdir, dir):
674 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
675 opts.srcdir = os.path.join(os.getcwd(), dir)
676 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
677 opts.compiler_always_flags = config.compiler_always_flags
678
679 # -----------------------------------------------------------------------------
680 # Actually doing tests
681
682 parallelTests = []
683 aloneTests = []
684 allTestNames = set([])
685
686 def runTest(watcher, opts, name, func, args):
687 if config.use_threads:
688 pool_sema.acquire()
689 t = threading.Thread(target=test_common_thread,
690 name=name,
691 args=(watcher, name, opts, func, args))
692 t.daemon = False
693 t.start()
694 else:
695 test_common_work(watcher, name, opts, func, args)
696
697 # name :: String
698 # setup :: [TestOpt] -> IO ()
699 def test(name, setup, func, args):
700 global aloneTests
701 global parallelTests
702 global allTestNames
703 global thisdir_settings
704 if name in allTestNames:
705 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
706 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
707 framework_fail(name, 'bad_name', 'This test has an invalid name')
708
709 if config.run_only_some_tests:
710 if name not in config.only:
711 return
712 else:
713 # Note [Mutating config.only]
714 # config.only is initially the set of tests requested by
715 # the user (via 'make TEST='). We then remove all tests that
716 # we've already seen (in .T files), so that we can later
717 # report on any tests we couldn't find and error out.
718 config.only.remove(name)
719
720 # Make a deep copy of the default_testopts, as we need our own copy
721 # of any dictionaries etc inside it. Otherwise, if one test modifies
722 # them, all tests will see the modified version!
723 myTestOpts = copy.deepcopy(default_testopts)
724
725 executeSetups([thisdir_settings, setup], name, myTestOpts)
726
727 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
728 if myTestOpts.alone:
729 aloneTests.append(thisTest)
730 else:
731 parallelTests.append(thisTest)
732 allTestNames.add(name)
733
734 if config.use_threads:
735 def test_common_thread(watcher, name, opts, func, args):
736 try:
737 test_common_work(watcher, name, opts, func, args)
738 finally:
739 pool_sema.release()
740
741 def get_package_cache_timestamp():
742 if config.package_conf_cache_file == '':
743 return 0.0
744 else:
745 try:
746 return os.stat(config.package_conf_cache_file).st_mtime
747 except:
748 return 0.0
749
750 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
751
752 def test_common_work(watcher, name, opts, func, args):
753 try:
754 t.total_tests += 1
755 setLocalTestOpts(opts)
756
757 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
758
759 # All the ways we might run this test
760 if func == compile or func == multimod_compile:
761 all_ways = config.compile_ways
762 elif func == compile_and_run or func == multimod_compile_and_run:
763 all_ways = config.run_ways
764 elif func == ghci_script:
765 if 'ghci' in config.run_ways:
766 all_ways = ['ghci']
767 else:
768 all_ways = []
769 else:
770 all_ways = ['normal']
771
772 # A test itself can request extra ways by setting opts.extra_ways
773 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
774
775 t.total_test_cases += len(all_ways)
776
777 ok_way = lambda way: \
778 not getTestOpts().skip \
779 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
780 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
781 and (not (config.skip_perf_tests and isStatsTest())) \
782 and (not (config.only_perf_tests and not isStatsTest())) \
783 and way not in getTestOpts().omit_ways
784
785 # Which ways we are asked to skip
786 do_ways = list(filter (ok_way,all_ways))
787
788 # Only run all ways in slow mode.
789 # See Note [validate and testsuite speed] in toplevel Makefile.
790 if config.accept:
791 # Only ever run one way
792 do_ways = do_ways[:1]
793 elif config.speed > 0:
794 # However, if we EXPLICITLY asked for a way (with extra_ways)
795 # please test it!
796 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
797 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
798 do_ways = other_ways[:1] + explicit_ways
799
800 # Find all files in the source directory that this test
801 # depends on. Do this only once for all ways.
802 # Generously add all filenames that start with the name of
803 # the test to this set, as a convenience to test authors.
804 # They will have to use the `extra_files` setup function to
805 # specify all other files that their test depends on (but
806 # this seems to be necessary for only about 10% of all
807 # tests).
808 files = set(f for f in os.listdir(opts.srcdir)
809 if f.startswith(name) and not f == name and
810 not f.endswith(testdir_suffix) and
811 not os.path.splitext(f)[1] in do_not_copy)
812 for filename in (opts.extra_files + extra_src_files.get(name, [])):
813 if filename.startswith('/'):
814 framework_fail(name, 'whole-test',
815 'no absolute paths in extra_files please: ' + filename)
816
817 elif '*' in filename:
818 # Don't use wildcards in extra_files too much, as
819 # globbing is slow.
820 files.update((os.path.relpath(f, opts.srcdir)
821 for f in glob.iglob(in_srcdir(filename))))
822
823 elif filename:
824 files.add(filename)
825
826 else:
827 framework_fail(name, 'whole-test', 'extra_file is empty string')
828
829 # Run the required tests...
830 for way in do_ways:
831 if stopping():
832 break
833 try:
834 do_test(name, way, func, args, files)
835 except KeyboardInterrupt:
836 stopNow()
837 except Exception as e:
838 framework_fail(name, way, str(e))
839 traceback.print_exc()
840
841 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
842
843 if config.cleanup and do_ways:
844 try:
845 cleanup()
846 except Exception as e:
847 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
848
849 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
850
851 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
852 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
853
854 except Exception as e:
855 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
856 finally:
857 watcher.notify()
858
859 def do_test(name, way, func, args, files):
860 opts = getTestOpts()
861
862 full_name = name + '(' + way + ')'
863
864 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
865 full_name, t.total_tests, len(allTestNames),
866 [len(t.unexpected_passes),
867 len(t.unexpected_failures),
868 len(t.framework_failures)]))
869
870 # Clean up prior to the test, so that we can't spuriously conclude
871 # that it passed on the basis of old run outputs.
872 cleanup()
873 os.makedirs(opts.testdir)
874
875 # Link all source files for this test into a new directory in
876 # /tmp, and run the test in that directory. This makes it
877 # possible to run tests in parallel, without modification, that
878 # would otherwise (accidentally) write to the same output file.
879 # It also makes it easier to keep the testsuite clean.
880
881 for extra_file in files:
882 src = in_srcdir(extra_file)
883 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
884 if os.path.isfile(src):
885 link_or_copy_file(src, dst)
886 elif os.path.isdir(src):
887 os.mkdir(dst)
888 lndir(src, dst)
889 else:
890 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
891 # When using a ghc built without haddock support, .t
892 # files are rightfully missing. Don't
893 # framework_fail. Test will be skipped later.
894 pass
895 else:
896 framework_fail(name, way,
897 'extra_file does not exist: ' + extra_file)
898
899 if func.__name__ == 'run_command' or opts.pre_cmd:
900 # When running 'MAKE' make sure 'TOP' still points to the
901 # root of the testsuite.
902 src_makefile = in_srcdir('Makefile')
903 dst_makefile = in_testdir('Makefile')
904 if os.path.exists(src_makefile):
905 with io.open(src_makefile, 'r', encoding='utf8') as src:
906 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
907 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
908 dst.write(makefile)
909
910 if opts.pre_cmd:
911 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
912 stderr = subprocess.STDOUT,
913 print_output = config.verbose >= 3)
914
915 # If user used expect_broken then don't record failures of pre_cmd
916 if exit_code != 0 and opts.expect not in ['fail']:
917 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
918 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
919
920 result = func(*[name,way] + args)
921
922 if opts.expect not in ['pass', 'fail', 'missing-lib']:
923 framework_fail(name, way, 'bad expected ' + opts.expect)
924
925 try:
926 passFail = result['passFail']
927 except (KeyError, TypeError):
928 passFail = 'No passFail found'
929
930 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
931
932 if passFail == 'pass':
933 if _expect_pass(way):
934 t.expected_passes.append((directory, name, way))
935 t.n_expected_passes += 1
936 else:
937 if_verbose(1, '*** unexpected pass for %s' % full_name)
938 t.unexpected_passes.append((directory, name, 'unexpected', way))
939 elif passFail == 'fail':
940 if _expect_pass(way):
941 reason = result['reason']
942 tag = result.get('tag')
943 if tag == 'stat':
944 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
945 t.unexpected_stat_failures.append((directory, name, reason, way))
946 else:
947 if_verbose(1, '*** unexpected failure for %s' % full_name)
948 t.unexpected_failures.append((directory, name, reason, way))
949 else:
950 if opts.expect == 'missing-lib':
951 t.missing_libs.append((directory, name, 'missing-lib', way))
952 else:
953 t.n_expected_failures += 1
954 else:
955 framework_fail(name, way, 'bad result ' + passFail)
956
957 # Make is often invoked with -s, which means if it fails, we get
958 # no feedback at all. This is annoying. So let's remove the option
959 # if found and instead have the testsuite decide on what to do
960 # with the output.
961 def override_options(pre_cmd):
962 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
963 return pre_cmd.replace('-s' , '') \
964 .replace('--silent', '') \
965 .replace('--quiet' , '')
966
967 return pre_cmd
968
969 def framework_fail(name, way, reason):
970 opts = getTestOpts()
971 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
972 full_name = name + '(' + way + ')'
973 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
974 t.framework_failures.append((directory, name, way, reason))
975
976 def framework_warn(name, way, reason):
977 opts = getTestOpts()
978 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
979 full_name = name + '(' + way + ')'
980 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
981 t.framework_warnings.append((directory, name, way, reason))
982
983 def badResult(result):
984 try:
985 if result['passFail'] == 'pass':
986 return False
987 return True
988 except (KeyError, TypeError):
989 return True
990
991 # -----------------------------------------------------------------------------
992 # Generic command tests
993
994 # A generic command test is expected to run and exit successfully.
995 #
996 # The expected exit code can be changed via exit_code() as normal, and
997 # the expected stdout/stderr are stored in <testname>.stdout and
998 # <testname>.stderr. The output of the command can be ignored
999 # altogether by using the setup function ignore_stdout instead of
1000 # run_command.
1001
1002 def run_command( name, way, cmd ):
1003 return simple_run( name, '', override_options(cmd), '' )
1004
1005 # -----------------------------------------------------------------------------
1006 # GHCi tests
1007
1008 def ghci_script( name, way, script):
1009 flags = ' '.join(get_compiler_flags())
1010 way_flags = ' '.join(config.way_flags[way])
1011
1012 # We pass HC and HC_OPTS as environment variables, so that the
1013 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1014 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1015 ).format(flags=flags, way_flags=way_flags)
1016 # NB: put way_flags before flags so that flags in all.T can overrie others
1017
1018 getTestOpts().stdin = script
1019 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1020
1021 # -----------------------------------------------------------------------------
1022 # Compile-only tests
1023
1024 def compile( name, way, extra_hc_opts ):
1025 return do_compile( name, way, 0, '', [], extra_hc_opts )
1026
1027 def compile_fail( name, way, extra_hc_opts ):
1028 return do_compile( name, way, 1, '', [], extra_hc_opts )
1029
1030 def backpack_typecheck( name, way, extra_hc_opts ):
1031 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1032
1033 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1034 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1035
1036 def backpack_compile( name, way, extra_hc_opts ):
1037 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1038
1039 def backpack_compile_fail( name, way, extra_hc_opts ):
1040 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1041
1042 def backpack_run( name, way, extra_hc_opts ):
1043 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1044
1045 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1046 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1047
1048 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1049 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1050
1051 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1052 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1053
1054 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1055 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1056
1057 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1058 # print 'Compile only, extra args = ', extra_hc_opts
1059
1060 result = extras_build( way, extra_mods, extra_hc_opts )
1061 if badResult(result):
1062 return result
1063 extra_hc_opts = result['hc_opts']
1064
1065 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1066
1067 if badResult(result):
1068 return result
1069
1070 # the actual stderr should always match the expected, regardless
1071 # of whether we expected the compilation to fail or not (successful
1072 # compilations may generate warnings).
1073
1074 expected_stderr_file = find_expected_file(name, 'stderr')
1075 actual_stderr_file = add_suffix(name, 'comp.stderr')
1076
1077 if not compare_outputs(way, 'stderr',
1078 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1079 normalise_errmsg),
1080 expected_stderr_file, actual_stderr_file,
1081 whitespace_normaliser=getattr(getTestOpts(),
1082 "whitespace_normaliser",
1083 normalise_whitespace)):
1084 return failBecause('stderr mismatch')
1085
1086 # no problems found, this test passed
1087 return passed()
1088
1089 def compile_cmp_asm( name, way, extra_hc_opts ):
1090 print('Compile only, extra args = ', extra_hc_opts)
1091 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1092
1093 if badResult(result):
1094 return result
1095
1096 # the actual stderr should always match the expected, regardless
1097 # of whether we expected the compilation to fail or not (successful
1098 # compilations may generate warnings).
1099
1100 expected_asm_file = find_expected_file(name, 'asm')
1101 actual_asm_file = add_suffix(name, 's')
1102
1103 if not compare_outputs(way, 'asm',
1104 join_normalisers(normalise_errmsg, normalise_asm),
1105 expected_asm_file, actual_asm_file):
1106 return failBecause('asm mismatch')
1107
1108 # no problems found, this test passed
1109 return passed()
1110
1111 # -----------------------------------------------------------------------------
1112 # Compile-and-run tests
1113
1114 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1115 # print 'Compile and run, extra args = ', extra_hc_opts
1116
1117 result = extras_build( way, extra_mods, extra_hc_opts )
1118 if badResult(result):
1119 return result
1120 extra_hc_opts = result['hc_opts']
1121
1122 if way.startswith('ghci'): # interpreted...
1123 return interpreter_run(name, way, extra_hc_opts, top_mod)
1124 else: # compiled...
1125 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1126 if badResult(result):
1127 return result
1128
1129 cmd = './' + name;
1130
1131 # we don't check the compiler's stderr for a compile-and-run test
1132 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1133
1134 def compile_and_run( name, way, extra_hc_opts ):
1135 return compile_and_run__( name, way, '', [], extra_hc_opts)
1136
1137 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1138 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1139
1140 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1141 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1142
1143 def stats( name, way, stats_file ):
1144 opts = getTestOpts()
1145 return check_stats(name, way, stats_file, opts.stats_range_fields)
1146
1147 def metric_dict(name, way, metric, value):
1148 return Perf.PerfStat(
1149 test_env = config.test_env,
1150 test = name,
1151 way = way,
1152 metric = metric,
1153 value = value)
1154
1155 # -----------------------------------------------------------------------------
1156 # Check test stats. This prints the results for the user.
1157 # name: name of the test.
1158 # way: the way.
1159 # stats_file: the path of the stats_file containing the stats for the test.
1160 # range_fields
1161 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1162 # This prints the results for the user.
1163 def check_stats(name, way, stats_file, range_fields):
1164 result = passed()
1165 if range_fields:
1166 try:
1167 f = open(in_testdir(stats_file))
1168 except IOError as e:
1169 return failBecause(str(e))
1170 stats_file_contents = f.read()
1171 f.close()
1172
1173 for (metric, range_val_dev) in range_fields.items():
1174 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1175 if field_match == None:
1176 print('Failed to find metric: ', metric)
1177 metric_result = failBecause('no such stats metric')
1178 else:
1179 actual_val = int(field_match.group(1))
1180
1181 # Store the metric so it can later be stored in a git note.
1182 perf_stat = metric_dict(name, way, metric, actual_val)
1183 change = None
1184
1185 # If this is the first time running the benchmark, then pass.
1186 if range_val_dev == None:
1187 metric_result = passed()
1188 change = MetricChange.NewMetric
1189 else:
1190 (expected_val, tolerance_dev) = range_val_dev
1191 (change, metric_result) = Perf.check_stats_change(
1192 perf_stat,
1193 expected_val,
1194 tolerance_dev,
1195 config.allowed_perf_changes,
1196 config.verbose >= 4)
1197 t.metrics.append((change, perf_stat))
1198
1199 # If any metric fails then the test fails.
1200 # Note, the remaining metrics are still run so that
1201 # a complete list of changes can be presented to the user.
1202 if metric_result['passFail'] == 'fail':
1203 result = metric_result
1204
1205 return result
1206
1207 # -----------------------------------------------------------------------------
1208 # Build a single-module program
1209
1210 def extras_build( way, extra_mods, extra_hc_opts ):
1211 for mod, opts in extra_mods:
1212 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1213 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1214 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1215 if badResult(result):
1216 return result
1217
1218 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1219
1220 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1221 opts = getTestOpts()
1222
1223 # Redirect stdout and stderr to the same file
1224 stdout = in_testdir(name, 'comp.stderr')
1225 stderr = subprocess.STDOUT
1226
1227 if top_mod != '':
1228 srcname = top_mod
1229 elif addsuf:
1230 if backpack:
1231 srcname = add_suffix(name, 'bkp')
1232 else:
1233 srcname = add_hs_lhs_suffix(name)
1234 else:
1235 srcname = name
1236
1237 if top_mod != '':
1238 to_do = '--make '
1239 if link:
1240 to_do = to_do + '-o ' + name
1241 elif backpack:
1242 if link:
1243 to_do = '-o ' + name + ' '
1244 else:
1245 to_do = ''
1246 to_do = to_do + '--backpack '
1247 elif link:
1248 to_do = '-o ' + name
1249 else:
1250 to_do = '-c' # just compile
1251
1252 stats_file = name + '.comp.stats'
1253 if isCompilerStatsTest():
1254 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1255 if backpack:
1256 extra_hc_opts += ' -outputdir ' + name + '.out'
1257
1258 # Required by GHC 7.3+, harmless for earlier versions:
1259 if (getTestOpts().c_src or
1260 getTestOpts().objc_src or
1261 getTestOpts().objcpp_src or
1262 getTestOpts().cmm_src):
1263 extra_hc_opts += ' -no-hs-main '
1264
1265 if getTestOpts().compile_cmd_prefix == '':
1266 cmd_prefix = ''
1267 else:
1268 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1269
1270 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1271
1272 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1273 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1274 ).format(**locals())
1275
1276 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1277
1278 if exit_code != 0 and not should_fail:
1279 if config.verbose >= 1 and _expect_pass(way):
1280 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1281 actual_stderr_path = in_testdir(name, 'comp.stderr')
1282 dump_file(actual_stderr_path)
1283
1284 # ToDo: if the sub-shell was killed by ^C, then exit
1285
1286 if isCompilerStatsTest():
1287 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1288 if badResult(statsResult):
1289 return statsResult
1290
1291 if should_fail:
1292 if exit_code == 0:
1293 return failBecause('exit code 0')
1294 else:
1295 if exit_code != 0:
1296 return failBecause('exit code non-0')
1297
1298 return passed()
1299
1300 # -----------------------------------------------------------------------------
1301 # Run a program and check its output
1302 #
1303 # If testname.stdin exists, route input from that, else
1304 # from /dev/null. Route output to testname.run.stdout and
1305 # testname.run.stderr. Returns the exit code of the run.
1306
1307 def simple_run(name, way, prog, extra_run_opts):
1308 opts = getTestOpts()
1309
1310 # figure out what to use for stdin
1311 if opts.stdin:
1312 stdin = in_testdir(opts.stdin)
1313 elif os.path.exists(in_testdir(name, 'stdin')):
1314 stdin = in_testdir(name, 'stdin')
1315 else:
1316 stdin = None
1317
1318 stdout = in_testdir(name, 'run.stdout')
1319 if opts.combined_output:
1320 stderr = subprocess.STDOUT
1321 else:
1322 stderr = in_testdir(name, 'run.stderr')
1323
1324 my_rts_flags = rts_flags(way)
1325
1326 stats_file = name + '.stats'
1327 if isStatsTest() and not isCompilerStatsTest():
1328 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1329 else:
1330 stats_args = ''
1331
1332 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1333 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1334
1335 if opts.cmd_wrapper != None:
1336 cmd = opts.cmd_wrapper(cmd)
1337
1338 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1339
1340 # run the command
1341 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1342
1343 # check the exit code
1344 if exit_code != opts.exit_code:
1345 if config.verbose >= 1 and _expect_pass(way):
1346 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1347 dump_stdout(name)
1348 dump_stderr(name)
1349 return failBecause('bad exit code')
1350
1351 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1352 return failBecause('bad stderr')
1353 if not (opts.ignore_stdout or stdout_ok(name, way)):
1354 return failBecause('bad stdout')
1355
1356 check_hp = '-h' in my_rts_flags and opts.check_hp
1357 check_prof = '-p' in my_rts_flags
1358
1359 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1360 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1361 return failBecause('bad heap profile')
1362 if check_prof and not check_prof_ok(name, way):
1363 return failBecause('bad profile')
1364
1365 return check_stats(name, way, stats_file, opts.stats_range_fields)
1366
1367 def rts_flags(way):
1368 args = config.way_rts_flags.get(way, [])
1369 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1370
1371 # -----------------------------------------------------------------------------
1372 # Run a program in the interpreter and check its output
1373
1374 def interpreter_run(name, way, extra_hc_opts, top_mod):
1375 opts = getTestOpts()
1376
1377 stdout = in_testdir(name, 'interp.stdout')
1378 stderr = in_testdir(name, 'interp.stderr')
1379 script = in_testdir(name, 'genscript')
1380
1381 if opts.combined_output:
1382 framework_fail(name, 'unsupported',
1383 'WAY=ghci and combined_output together is not supported')
1384
1385 if (top_mod == ''):
1386 srcname = add_hs_lhs_suffix(name)
1387 else:
1388 srcname = top_mod
1389
1390 delimiter = '===== program output begins here\n'
1391
1392 with io.open(script, 'w', encoding='utf8') as f:
1393 # set the prog name and command-line args to match the compiled
1394 # environment.
1395 f.write(':set prog ' + name + '\n')
1396 f.write(':set args ' + opts.extra_run_opts + '\n')
1397 # Add marker lines to the stdout and stderr output files, so we
1398 # can separate GHCi's output from the program's.
1399 f.write(':! echo ' + delimiter)
1400 f.write(':! echo 1>&2 ' + delimiter)
1401 # Set stdout to be line-buffered to match the compiled environment.
1402 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1403 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1404 # in the event of an exception as for the compiled program.
1405 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1406
1407 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1408 if os.path.exists(stdin):
1409 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1410
1411 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1412
1413 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1414 ).format(**locals())
1415
1416 if getTestOpts().cmd_wrapper != None:
1417 cmd = opts.cmd_wrapper(cmd);
1418
1419 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1420
1421 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1422
1423 # split the stdout into compilation/program output
1424 split_file(stdout, delimiter,
1425 in_testdir(name, 'comp.stdout'),
1426 in_testdir(name, 'run.stdout'))
1427 split_file(stderr, delimiter,
1428 in_testdir(name, 'comp.stderr'),
1429 in_testdir(name, 'run.stderr'))
1430
1431 # check the exit code
1432 if exit_code != getTestOpts().exit_code:
1433 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1434 dump_stdout(name)
1435 dump_stderr(name)
1436 return failBecause('bad exit code')
1437
1438 # ToDo: if the sub-shell was killed by ^C, then exit
1439
1440 if not (opts.ignore_stderr or stderr_ok(name, way)):
1441 return failBecause('bad stderr')
1442 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1443 return failBecause('bad stdout')
1444 else:
1445 return passed()
1446
1447 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1448 # See Note [Universal newlines].
1449 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1450 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1451 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1452 line = infile.readline()
1453 while re.sub('^\s*','',line) != delimiter and line != '':
1454 out1.write(line)
1455 line = infile.readline()
1456
1457 line = infile.readline()
1458 while line != '':
1459 out2.write(line)
1460 line = infile.readline()
1461
1462 # -----------------------------------------------------------------------------
1463 # Utils
1464 def get_compiler_flags():
1465 opts = getTestOpts()
1466
1467 flags = copy.copy(opts.compiler_always_flags)
1468
1469 flags.append(opts.extra_hc_opts)
1470
1471 if opts.outputdir != None:
1472 flags.extend(["-outputdir", opts.outputdir])
1473
1474 return flags
1475
1476 def stdout_ok(name, way):
1477 actual_stdout_file = add_suffix(name, 'run.stdout')
1478 expected_stdout_file = find_expected_file(name, 'stdout')
1479
1480 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1481
1482 check_stdout = getTestOpts().check_stdout
1483 if check_stdout:
1484 actual_stdout_path = in_testdir(actual_stdout_file)
1485 return check_stdout(actual_stdout_path, extra_norm)
1486
1487 return compare_outputs(way, 'stdout', extra_norm,
1488 expected_stdout_file, actual_stdout_file)
1489
1490 def dump_stdout( name ):
1491 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1492 str = f.read().strip()
1493 if str:
1494 print("Stdout (", name, "):")
1495 print(str)
1496
1497 def stderr_ok(name, way):
1498 actual_stderr_file = add_suffix(name, 'run.stderr')
1499 expected_stderr_file = find_expected_file(name, 'stderr')
1500
1501 return compare_outputs(way, 'stderr',
1502 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1503 expected_stderr_file, actual_stderr_file,
1504 whitespace_normaliser=normalise_whitespace)
1505
1506 def dump_stderr( name ):
1507 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1508 str = f.read().strip()
1509 if str:
1510 print("Stderr (", name, "):")
1511 print(str)
1512
1513 def read_no_crs(file):
1514 str = ''
1515 try:
1516 # See Note [Universal newlines].
1517 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1518 str = h.read()
1519 except Exception:
1520 # On Windows, if the program fails very early, it seems the
1521 # files stdout/stderr are redirected to may not get created
1522 pass
1523 return str
1524
1525 def write_file(file, str):
1526 # See Note [Universal newlines].
1527 with io.open(file, 'w', encoding='utf8', newline='') as h:
1528 h.write(str)
1529
1530 # Note [Universal newlines]
1531 #
1532 # We don't want to write any Windows style line endings ever, because
1533 # it would mean that `make accept` would touch every line of the file
1534 # when switching between Linux and Windows.
1535 #
1536 # Furthermore, when reading a file, it is convenient to translate all
1537 # Windows style endings to '\n', as it simplifies searching or massaging
1538 # the content.
1539 #
1540 # Solution: use `io.open` instead of `open`
1541 # * when reading: use newline=None to translate '\r\n' to '\n'
1542 # * when writing: use newline='' to not translate '\n' to '\r\n'
1543 #
1544 # See https://docs.python.org/2/library/io.html#io.open.
1545 #
1546 # This should work with both python2 and python3, and with both mingw*
1547 # as msys2 style Python.
1548 #
1549 # Do note that io.open returns unicode strings. So we have to specify
1550 # the expected encoding. But there is at least one file which is not
1551 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1552 # Another solution would be to open files in binary mode always, and
1553 # operate on bytes.
1554
1555 def check_hp_ok(name):
1556 opts = getTestOpts()
1557
1558 # do not qualify for hp2ps because we should be in the right directory
1559 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1560
1561 hp2psResult = runCmd(hp2psCmd)
1562
1563 actual_ps_path = in_testdir(name, 'ps')
1564
1565 if hp2psResult == 0:
1566 if os.path.exists(actual_ps_path):
1567 if gs_working:
1568 gsResult = runCmd(genGSCmd(actual_ps_path))
1569 if (gsResult == 0):
1570 return (True)
1571 else:
1572 print("hp2ps output for " + name + "is not valid PostScript")
1573 else: return (True) # assume postscript is valid without ghostscript
1574 else:
1575 print("hp2ps did not generate PostScript for " + name)
1576 return (False)
1577 else:
1578 print("hp2ps error when processing heap profile for " + name)
1579 return(False)
1580
1581 def check_prof_ok(name, way):
1582 expected_prof_file = find_expected_file(name, 'prof.sample')
1583 expected_prof_path = in_testdir(expected_prof_file)
1584
1585 # Check actual prof file only if we have an expected prof file to
1586 # compare it with.
1587 if not os.path.exists(expected_prof_path):
1588 return True
1589
1590 actual_prof_file = add_suffix(name, 'prof')
1591 actual_prof_path = in_testdir(actual_prof_file)
1592
1593 if not os.path.exists(actual_prof_path):
1594 print(actual_prof_path + " does not exist")
1595 return(False)
1596
1597 if os.path.getsize(actual_prof_path) == 0:
1598 print(actual_prof_path + " is empty")
1599 return(False)
1600
1601 return compare_outputs(way, 'prof', normalise_prof,
1602 expected_prof_file, actual_prof_file,
1603 whitespace_normaliser=normalise_whitespace)
1604
1605 # Compare expected output to actual output, and optionally accept the
1606 # new output. Returns true if output matched or was accepted, false
1607 # otherwise. See Note [Output comparison] for the meaning of the
1608 # normaliser and whitespace_normaliser parameters.
1609 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1610 whitespace_normaliser=lambda x:x):
1611
1612 expected_path = in_srcdir(expected_file)
1613 actual_path = in_testdir(actual_file)
1614
1615 if os.path.exists(expected_path):
1616 expected_str = normaliser(read_no_crs(expected_path))
1617 # Create the .normalised file in the testdir, not in the srcdir.
1618 expected_normalised_file = add_suffix(expected_file, 'normalised')
1619 expected_normalised_path = in_testdir(expected_normalised_file)
1620 else:
1621 expected_str = ''
1622 expected_normalised_path = '/dev/null'
1623
1624 actual_raw = read_no_crs(actual_path)
1625 actual_str = normaliser(actual_raw)
1626
1627 # See Note [Output comparison].
1628 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1629 return True
1630 else:
1631 if config.verbose >= 1 and _expect_pass(way):
1632 print('Actual ' + kind + ' output differs from expected:')
1633
1634 if expected_normalised_path != '/dev/null':
1635 write_file(expected_normalised_path, expected_str)
1636
1637 actual_normalised_path = add_suffix(actual_path, 'normalised')
1638 write_file(actual_normalised_path, actual_str)
1639
1640 if config.verbose >= 1 and _expect_pass(way):
1641 # See Note [Output comparison].
1642 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1643 actual_normalised_path),
1644 print_output=True)
1645
1646 # If for some reason there were no non-whitespace differences,
1647 # then do a full diff
1648 if r == 0:
1649 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1650 actual_normalised_path),
1651 print_output=True)
1652
1653 if config.accept and (getTestOpts().expect == 'fail' or
1654 way in getTestOpts().expect_fail_for):
1655 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1656 return False
1657 elif config.accept and actual_raw:
1658 if config.accept_platform:
1659 if_verbose(1, 'Accepting new output for platform "'
1660 + config.platform + '".')
1661 expected_path += '-' + config.platform
1662 elif config.accept_os:
1663 if_verbose(1, 'Accepting new output for os "'
1664 + config.os + '".')
1665 expected_path += '-' + config.os
1666 else:
1667 if_verbose(1, 'Accepting new output.')
1668
1669 write_file(expected_path, actual_raw)
1670 return True
1671 elif config.accept:
1672 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1673 os.remove(expected_path)
1674 return True
1675 else:
1676 return False
1677
1678 # Note [Output comparison]
1679 #
1680 # We do two types of output comparison:
1681 #
1682 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1683 # optional `whitespace_normaliser` to the expected and the actual
1684 # output, before comparing the two.
1685 #
1686 # 2. To show as a diff to the user when the test indeed failed. We apply
1687 # the same `normaliser` function to the outputs, to make the diff as
1688 # small as possible (only showing the actual problem). But we don't
1689 # apply the `whitespace_normaliser` here, because it might completely
1690 # squash all whitespace, making the diff unreadable. Instead we rely
1691 # on the `diff` program to ignore whitespace changes as much as
1692 # possible (#10152).
1693
1694 def normalise_whitespace( str ):
1695 # Merge contiguous whitespace characters into a single space.
1696 return ' '.join(str.split())
1697
1698 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1699
1700 def normalise_callstacks(s):
1701 opts = getTestOpts()
1702 def repl(matches):
1703 location = matches.group(1)
1704 location = normalise_slashes_(location)
1705 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1706 # Ignore line number differences in call stacks (#10834).
1707 s = re.sub(callSite_re, repl, s)
1708 # Ignore the change in how we identify implicit call-stacks
1709 s = s.replace('from ImplicitParams', 'from HasCallStack')
1710 if not opts.keep_prof_callstacks:
1711 # Don't output prof callstacks. Test output should be
1712 # independent from the WAY we run the test.
1713 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1714 return s
1715
1716 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1717
1718 def normalise_type_reps(str):
1719 """ Normalise out fingerprints from Typeable TyCon representations """
1720 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1721
1722 def normalise_errmsg( str ):
1723 """Normalise error-messages emitted via stderr"""
1724 # IBM AIX's `ld` is a bit chatty
1725 if opsys('aix'):
1726 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1727 # remove " error:" and lower-case " Warning:" to make patch for
1728 # trac issue #10021 smaller
1729 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1730 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1731 str = normalise_callstacks(str)
1732 str = normalise_type_reps(str)
1733
1734 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1735 # the colon is there because it appears in error messages; this
1736 # hacky solution is used in place of more sophisticated filename
1737 # mangling
1738 str = re.sub('([^\\s])\\.exe', '\\1', str)
1739
1740 # normalise slashes, minimise Windows/Unix filename differences
1741 str = re.sub('\\\\', '/', str)
1742
1743 # The inplace ghc's are called ghc-stage[123] to avoid filename
1744 # collisions, so we need to normalise that to just "ghc"
1745 str = re.sub('ghc-stage[123]', 'ghc', str)
1746
1747 # Error messages sometimes contain integer implementation package
1748 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1749
1750 # Error messages sometimes contain this blurb which can vary
1751 # spuriously depending upon build configuration (e.g. based on integer
1752 # backend)
1753 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1754 '...plus N instances involving out-of-scope types', str)
1755
1756 # Also filter out bullet characters. This is because bullets are used to
1757 # separate error sections, and tests shouldn't be sensitive to how the
1758 # the division happens.
1759 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1760 str = str.replace(bullet, '')
1761
1762 # Windows only, this is a bug in hsc2hs but it is preventing
1763 # stable output for the testsuite. See Trac #9775. For now we filter out this
1764 # warning message to get clean output.
1765 if config.msys:
1766 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1767 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1768
1769 return str
1770
1771 # normalise a .prof file, so that we can reasonably compare it against
1772 # a sample. This doesn't compare any of the actual profiling data,
1773 # only the shape of the profile and the number of entries.
1774 def normalise_prof (str):
1775 # strip everything up to the line beginning "COST CENTRE"
1776 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1777
1778 # strip results for CAFs, these tend to change unpredictably
1779 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1780
1781 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1782 # sometimes under MAIN.
1783 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1784
1785 # We have something like this:
1786 #
1787 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1788 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1789 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1790 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1791 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1792 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1793 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1794 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1795 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1796 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1797 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1798 #
1799 # then we remove all the specific profiling data, leaving only the cost
1800 # centre name, module, src, and entries, to end up with this: (modulo
1801 # whitespace between columns)
1802 #
1803 # MAIN MAIN <built-in> 0
1804 # readPrec Main Main_1.hs:7:13-16 1
1805 # readPrec Main Main_1.hs:4:13-16 1
1806 # == Main Main_1.hs:7:25-26 1
1807 # == Main Main_1.hs:4:25-26 1
1808 # showsPrec Main Main_1.hs:7:19-22 2
1809 # showsPrec Main Main_1.hs:4:19-22 2
1810 # readPrec Main Main_1.hs:7:13-16 0
1811 # readPrec Main Main_1.hs:4:13-16 0
1812
1813 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1814 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1815 # this works fine.
1816 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1817 '\\1 \\2 \\3 \\5\n', str)
1818 return str
1819
1820 def normalise_slashes_( str ):
1821 str = re.sub('\\\\', '/', str)
1822 str = re.sub('//', '/', str)
1823 return str
1824
1825 def normalise_exe_( str ):
1826 str = re.sub('\.exe', '', str)
1827 return str
1828
1829 def normalise_output( str ):
1830 # remove " error:" and lower-case " Warning:" to make patch for
1831 # trac issue #10021 smaller
1832 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1833 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1834 # Remove a .exe extension (for Windows)
1835 # This can occur in error messages generated by the program.
1836 str = re.sub('([^\\s])\\.exe', '\\1', str)
1837 str = normalise_callstacks(str)
1838 str = normalise_type_reps(str)
1839 return str
1840
1841 def normalise_asm( str ):
1842 lines = str.split('\n')
1843 # Only keep instructions and labels not starting with a dot.
1844 metadata = re.compile('^[ \t]*\\..*$')
1845 out = []
1846 for line in lines:
1847 # Drop metadata directives (e.g. ".type")
1848 if not metadata.match(line):
1849 line = re.sub('@plt', '', line)
1850 instr = line.lstrip().split()
1851 # Drop empty lines.
1852 if not instr:
1853 continue
1854 # Drop operands, except for call instructions.
1855 elif instr[0] == 'call':
1856 out.append(instr[0] + ' ' + instr[1])
1857 else:
1858 out.append(instr[0])
1859 out = '\n'.join(out)
1860 return out
1861
1862 def if_verbose( n, s ):
1863 if config.verbose >= n:
1864 print(s)
1865
1866 def dump_file(f):
1867 try:
1868 with io.open(f) as file:
1869 print(file.read())
1870 except Exception:
1871 print('')
1872
1873 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1874 timeout_prog = strip_quotes(config.timeout_prog)
1875 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1876
1877 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1878 cmd = cmd.format(**config.__dict__)
1879 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1880
1881 stdin_file = io.open(stdin, 'rb') if stdin else None
1882 stdout_buffer = b''
1883 stderr_buffer = b''
1884
1885 hStdErr = subprocess.PIPE
1886 if stderr is subprocess.STDOUT:
1887 hStdErr = subprocess.STDOUT
1888
1889 try:
1890 # cmd is a complex command in Bourne-shell syntax
1891 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1892 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1893 # to invoke the Bourne shell
1894
1895 r = subprocess.Popen([timeout_prog, timeout, cmd],
1896 stdin=stdin_file,
1897 stdout=subprocess.PIPE,
1898 stderr=hStdErr,
1899 env=ghc_env)
1900
1901 stdout_buffer, stderr_buffer = r.communicate()
1902 finally:
1903 if stdin_file:
1904 stdin_file.close()
1905 if config.verbose >= 1 and print_output:
1906 if stdout_buffer:
1907 sys.stdout.buffer.write(stdout_buffer)
1908 if stderr_buffer:
1909 sys.stderr.buffer.write(stderr_buffer)
1910
1911 if stdout:
1912 with io.open(stdout, 'wb') as f:
1913 f.write(stdout_buffer)
1914 if stderr:
1915 if stderr is not subprocess.STDOUT:
1916 with io.open(stderr, 'wb') as f:
1917 f.write(stderr_buffer)
1918
1919 if r.returncode == 98:
1920 # The python timeout program uses 98 to signal that ^C was pressed
1921 stopNow()
1922 if r.returncode == 99 and getTestOpts().exit_code != 99:
1923 # Only print a message when timeout killed the process unexpectedly.
1924 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1925 return r.returncode
1926
1927 # -----------------------------------------------------------------------------
1928 # checking if ghostscript is available for checking the output of hp2ps
1929
1930 def genGSCmd(psfile):
1931 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1932
1933 def gsNotWorking():
1934 global gs_working
1935 print("GhostScript not available for hp2ps tests")
1936
1937 global gs_working
1938 gs_working = False
1939 if config.have_profiling:
1940 if config.gs != '':
1941 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
1942 if resultGood == 0:
1943 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
1944 ' >/dev/null 2>&1')
1945 if resultBad != 0:
1946 print("GhostScript available for hp2ps tests")
1947 gs_working = True
1948 else:
1949 gsNotWorking();
1950 else:
1951 gsNotWorking();
1952 else:
1953 gsNotWorking();
1954
1955 def add_suffix( name, suffix ):
1956 if suffix == '':
1957 return name
1958 else:
1959 return name + '.' + suffix
1960
1961 def add_hs_lhs_suffix(name):
1962 if getTestOpts().c_src:
1963 return add_suffix(name, 'c')
1964 elif getTestOpts().cmm_src:
1965 return add_suffix(name, 'cmm')
1966 elif getTestOpts().objc_src:
1967 return add_suffix(name, 'm')
1968 elif getTestOpts().objcpp_src:
1969 return add_suffix(name, 'mm')
1970 elif getTestOpts().literate:
1971 return add_suffix(name, 'lhs')
1972 else:
1973 return add_suffix(name, 'hs')
1974
1975 def replace_suffix( name, suffix ):
1976 base, suf = os.path.splitext(name)
1977 return base + '.' + suffix
1978
1979 def in_testdir(name, suffix=''):
1980 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1981
1982 def in_srcdir(name, suffix=''):
1983 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1984
1985 # Finding the sample output. The filename is of the form
1986 #
1987 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
1988 #
1989 def find_expected_file(name, suff):
1990 basename = add_suffix(name, suff)
1991
1992 files = [basename + ws + plat
1993 for plat in ['-' + config.platform, '-' + config.os, '']
1994 for ws in ['-ws-' + config.wordsize, '']]
1995
1996 for f in files:
1997 if os.path.exists(in_srcdir(f)):
1998 return f
1999
2000 return basename
2001
2002 if config.msys:
2003 import stat
2004 def cleanup():
2005 testdir = getTestOpts().testdir
2006 max_attempts = 5
2007 retries = max_attempts
2008 def on_error(function, path, excinfo):
2009 # At least one test (T11489) removes the write bit from a file it
2010 # produces. Windows refuses to delete read-only files with a
2011 # permission error. Try setting the write bit and try again.
2012 os.chmod(path, stat.S_IWRITE)
2013 function(path)
2014
2015 # On Windows we have to retry the delete a couple of times.
2016 # The reason for this is that a FileDelete command just marks a
2017 # file for deletion. The file is really only removed when the last
2018 # handle to the file is closed. Unfortunately there are a lot of
2019 # system services that can have a file temporarily opened using a shared
2020 # readonly lock, such as the built in AV and search indexer.
2021 #
2022 # We can't really guarantee that these are all off, so what we can do is
2023 # whenever after a rmtree the folder still exists to try again and wait a bit.
2024 #
2025 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2026 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2027 # still locked then abort the current test by throwing an exception, this so it won't fail
2028 # with an even more cryptic error.
2029 #
2030 # See Trac #13162
2031 exception = None
2032 while retries > 0 and os.path.exists(testdir):
2033 time.sleep((max_attempts-retries)*6)
2034 try:
2035 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2036 except Exception as e:
2037 exception = e
2038 retries -= 1
2039
2040 if retries == 0 and os.path.exists(testdir):
2041 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2042 % (testdir, exception))
2043 else:
2044 def cleanup():
2045 testdir = getTestOpts().testdir
2046 if os.path.exists(testdir):
2047 shutil.rmtree(testdir, ignore_errors=False)
2048
2049
2050 # -----------------------------------------------------------------------------
2051 # Return a list of all the files ending in '.T' below directories roots.
2052
2053 def findTFiles(roots):
2054 for root in roots:
2055 for path, dirs, files in os.walk(root, topdown=True):
2056 # Never pick up .T files in uncleaned .run directories.
2057 dirs[:] = [dir for dir in sorted(dirs)
2058 if not dir.endswith(testdir_suffix)]
2059 for filename in files:
2060 if filename.endswith('.T'):
2061 yield os.path.join(path, filename)
2062
2063 # -----------------------------------------------------------------------------
2064 # Output a test summary to the specified file object
2065
2066 def summary(t, file, short=False, color=False):
2067
2068 file.write('\n')
2069 printUnexpectedTests(file,
2070 [t.unexpected_passes, t.unexpected_failures,
2071 t.unexpected_stat_failures, t.framework_failures])
2072
2073 if short:
2074 # Only print the list of unexpected tests above.
2075 return
2076
2077 colorize = lambda s: s
2078 if color:
2079 if len(t.unexpected_failures) > 0 or \
2080 len(t.unexpected_stat_failures) > 0 or \
2081 len(t.framework_failures) > 0:
2082 colorize = str_fail
2083 else:
2084 colorize = str_pass
2085
2086 file.write(colorize('SUMMARY') + ' for test run started at '
2087 + time.strftime("%c %Z", t.start_time) + '\n'
2088 + str(datetime.timedelta(seconds=
2089 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2090 + ' spent to go through\n'
2091 + repr(t.total_tests).rjust(8)
2092 + ' total tests, which gave rise to\n'
2093 + repr(t.total_test_cases).rjust(8)
2094 + ' test cases, of which\n'
2095 + repr(t.n_tests_skipped).rjust(8)
2096 + ' were skipped\n'
2097 + '\n'
2098 + repr(len(t.missing_libs)).rjust(8)
2099 + ' had missing libraries\n'
2100 + repr(t.n_expected_passes).rjust(8)
2101 + ' expected passes\n'
2102 + repr(t.n_expected_failures).rjust(8)
2103 + ' expected failures\n'
2104 + '\n'
2105 + repr(len(t.framework_failures)).rjust(8)
2106 + ' caused framework failures\n'
2107 + repr(len(t.framework_warnings)).rjust(8)
2108 + ' caused framework warnings\n'
2109 + repr(len(t.unexpected_passes)).rjust(8)
2110 + ' unexpected passes\n'
2111 + repr(len(t.unexpected_failures)).rjust(8)
2112 + ' unexpected failures\n'
2113 + repr(len(t.unexpected_stat_failures)).rjust(8)
2114 + ' unexpected stat failures\n'
2115 + '\n')
2116
2117 if t.unexpected_passes:
2118 file.write('Unexpected passes:\n')
2119 printTestInfosSummary(file, t.unexpected_passes)
2120
2121 if t.unexpected_failures:
2122 file.write('Unexpected failures:\n')
2123 printTestInfosSummary(file, t.unexpected_failures)
2124
2125 if t.unexpected_stat_failures:
2126 file.write('Unexpected stat failures:\n')
2127 printTestInfosSummary(file, t.unexpected_stat_failures)
2128
2129 if t.framework_failures:
2130 file.write('Framework failures:\n')
2131 printTestInfosSummary(file, t.framework_failures)
2132
2133 if t.framework_warnings:
2134 file.write('Framework warnings:\n')
2135 printTestInfosSummary(file, t.framework_warnings)
2136
2137 if stopping():
2138 file.write('WARNING: Testsuite run was terminated early\n')
2139
2140 def printUnexpectedTests(file, testInfoss):
2141 unexpected = set(name for testInfos in testInfoss
2142 for (_, name, _, _) in testInfos
2143 if not name.endswith('.T'))
2144 if unexpected:
2145 file.write('Unexpected results from:\n')
2146 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2147 file.write('\n')
2148
2149 def printTestInfosSummary(file, testInfos):
2150 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2151 for (directory, name, reason, way) in testInfos:
2152 directory = directory.ljust(maxDirLen)
2153 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2154 file.write('\n')
2155
2156 def modify_lines(s, f):
2157 s = '\n'.join([f(l) for l in s.splitlines()])
2158 if s and s[-1] != '\n':
2159 # Prevent '\ No newline at end of file' warnings when diffing.
2160 s += '\n'
2161 return s