Update terminal title while running test-suite
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87 #
88 # type TestOpt = (name :: String, opts :: Object) -> IO ()
89
90 def normal( name, opts ):
91 return;
92
93 def skip( name, opts ):
94 opts.skip = True
95
96 def expect_fail( name, opts ):
97 # The compiler, testdriver, OS or platform is missing a certain
98 # feature, and we don't plan to or can't fix it now or in the
99 # future.
100 opts.expect = 'fail';
101
102 def reqlib( lib ):
103 return lambda name, opts, l=lib: _reqlib (name, opts, l )
104
105 def stage1(name, opts):
106 # See Note [Why is there no stage1 setup function?]
107 framework_fail(name, 'stage1 setup function does not exist',
108 'add your test to testsuite/tests/stage1 instead')
109
110 # Note [Why is there no stage1 setup function?]
111 #
112 # Presumably a stage1 setup function would signal that the stage1
113 # compiler should be used to compile a test.
114 #
115 # Trouble is, the path to the compiler + the `ghc --info` settings for
116 # that compiler are currently passed in from the `make` part of the
117 # testsuite driver.
118 #
119 # Switching compilers in the Python part would be entirely too late, as
120 # all ghc_with_* settings would be wrong. See config/ghc for possible
121 # consequences (for example, config.run_ways would still be
122 # based on the default compiler, quite likely causing ./validate --slow
123 # to fail).
124 #
125 # It would be possible to let the Python part of the testsuite driver
126 # make the call to `ghc --info`, but doing so would require quite some
127 # work. Care has to be taken to not affect the run_command tests for
128 # example, as they also use the `ghc --info` settings:
129 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
130 #
131 # If you want a test to run using the stage1 compiler, add it to the
132 # testsuite/tests/stage1 directory. Validate runs the tests in that
133 # directory with `make stage=1`.
134
135 # Cache the results of looking to see if we have a library or not.
136 # This makes quite a difference, especially on Windows.
137 have_lib_cache = {}
138
139 def have_library(lib):
140 """ Test whether the given library is available """
141 if lib in have_lib_cache:
142 got_it = have_lib_cache[lib]
143 else:
144 cmd = strip_quotes(config.ghc_pkg)
145 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
146 stdout=subprocess.PIPE,
147 stderr=subprocess.PIPE,
148 env=ghc_env)
149 # read from stdout and stderr to avoid blocking due to
150 # buffers filling
151 p.communicate()
152 r = p.wait()
153 got_it = r == 0
154 have_lib_cache[lib] = got_it
155
156 return got_it
157
158 def _reqlib( name, opts, lib ):
159 if not have_library(lib):
160 opts.expect = 'missing-lib'
161
162 def req_haddock( name, opts ):
163 if not config.haddock:
164 opts.expect = 'missing-lib'
165
166 def req_profiling( name, opts ):
167 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
168 if not config.have_profiling:
169 opts.expect = 'fail'
170
171 def req_shared_libs( name, opts ):
172 if not config.have_shared_libs:
173 opts.expect = 'fail'
174
175 def req_interp( name, opts ):
176 if not config.have_interp:
177 opts.expect = 'fail'
178
179 def req_smp( name, opts ):
180 if not config.have_smp:
181 opts.expect = 'fail'
182
183 def ignore_stdout(name, opts):
184 opts.ignore_stdout = True
185
186 def ignore_stderr(name, opts):
187 opts.ignore_stderr = True
188
189 def combined_output( name, opts ):
190 opts.combined_output = True
191
192 def use_specs( specs ):
193 """
194 use_specs allows one to override files based on suffixes. e.g. 'stdout',
195 'stderr', 'asm', 'prof.sample', etc.
196
197 Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
198 prof002.stdout.
199
200 Full Example:
201 test('T5889', [only_ways(['normal']), req_profiling,
202 extra_files(['T5889/A.hs', 'T5889/B.hs']),
203 use_specs({'stdout' : 'prof002.stdout'})],
204 multimod_compile,
205 ['A B', '-O -prof -fno-prof-count-entries -v0'])
206
207 """
208 return lambda name, opts, s=specs: _use_specs( name, opts, s )
209
210 def _use_specs( name, opts, specs ):
211 opts.extra_files.extend(specs.values ())
212 opts.use_specs = specs
213
214 # -----
215
216 def expect_fail_for( ways ):
217 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
218
219 def _expect_fail_for( name, opts, ways ):
220 opts.expect_fail_for = ways
221
222 def expect_broken( bug ):
223 # This test is a expected not to work due to the indicated trac bug
224 # number.
225 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
226
227 def _expect_broken( name, opts, bug ):
228 record_broken(name, opts, bug)
229 opts.expect = 'fail';
230
231 def expect_broken_for( bug, ways ):
232 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
233
234 def _expect_broken_for( name, opts, bug, ways ):
235 record_broken(name, opts, bug)
236 opts.expect_fail_for = ways
237
238 def record_broken(name, opts, bug):
239 me = (bug, opts.testdir, name)
240 if not me in brokens:
241 brokens.append(me)
242
243 def _expect_pass(way):
244 # Helper function. Not intended for use in .T files.
245 opts = getTestOpts()
246 return opts.expect == 'pass' and way not in opts.expect_fail_for
247
248 # -----
249
250 def fragile( bug ):
251 """
252 Indicates that the test should be skipped due to fragility documented in
253 the given ticket.
254 """
255 def helper( name, opts, bug=bug ):
256 record_broken(name, opts, bug)
257 opts.skip = True
258
259 return helper
260
261 def fragile_for( name, opts, bug, ways ):
262 """
263 Indicates that the test should be skipped due to fragility in the given
264 test ways as documented in the given ticket.
265 """
266 def helper( name, opts, bug=bug, ways=ways ):
267 record_broken(name, opts, bug)
268 opts.omit_ways = ways
269
270 return helper
271
272 # -----
273
274 def omit_ways( ways ):
275 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
276
277 def _omit_ways( name, opts, ways ):
278 opts.omit_ways = ways
279
280 # -----
281
282 def only_ways( ways ):
283 return lambda name, opts, w=ways: _only_ways( name, opts, w )
284
285 def _only_ways( name, opts, ways ):
286 opts.only_ways = ways
287
288 # -----
289
290 def extra_ways( ways ):
291 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
292
293 def _extra_ways( name, opts, ways ):
294 opts.extra_ways = ways
295
296 # -----
297
298 def set_stdin( file ):
299 return lambda name, opts, f=file: _set_stdin(name, opts, f);
300
301 def _set_stdin( name, opts, f ):
302 opts.stdin = f
303
304 # -----
305
306 def exit_code( val ):
307 return lambda name, opts, v=val: _exit_code(name, opts, v);
308
309 def _exit_code( name, opts, v ):
310 opts.exit_code = v
311
312 def signal_exit_code( val ):
313 if opsys('solaris2'):
314 return exit_code( val )
315 else:
316 # When application running on Linux receives fatal error
317 # signal, then its exit code is encoded as 128 + signal
318 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
319 # I assume that Mac OS X behaves in the same way at least Mac
320 # OS X builder behavior suggests this.
321 return exit_code( val+128 )
322
323 # -----
324
325 def compile_timeout_multiplier( val ):
326 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
327
328 def _compile_timeout_multiplier( name, opts, v ):
329 opts.compile_timeout_multiplier = v
330
331 def run_timeout_multiplier( val ):
332 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
333
334 def _run_timeout_multiplier( name, opts, v ):
335 opts.run_timeout_multiplier = v
336
337 # -----
338
339 def extra_run_opts( val ):
340 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
341
342 def _extra_run_opts( name, opts, v ):
343 opts.extra_run_opts = v
344
345 # -----
346
347 def extra_hc_opts( val ):
348 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
349
350 def _extra_hc_opts( name, opts, v ):
351 opts.extra_hc_opts = v
352
353 # -----
354
355 def extra_clean( files ):
356 # TODO. Remove all calls to extra_clean.
357 return lambda _name, _opts: None
358
359 def extra_files(files):
360 return lambda name, opts: _extra_files(name, opts, files)
361
362 def _extra_files(name, opts, files):
363 opts.extra_files.extend(files)
364
365 # -----
366
367 # Defaults to "test everything, and only break on extreme cases"
368 #
369 # The inputs to this function are slightly interesting:
370 # metric can be either:
371 # - 'all', in which case all 3 possible metrics are collected and compared.
372 # - The specific metric one wants to use in the test.
373 # - A list of the metrics one wants to use in the test.
374 #
375 # Deviation defaults to 20% because the goal is correctness over performance.
376 # The testsuite should avoid breaking when there is not an actual error.
377 # Instead, the testsuite should notify of regressions in a non-breaking manner.
378 #
379 # collect_compiler_stats is used when the metrics collected are about the compiler.
380 # collect_stats is used in the majority case when the metrics to be collected
381 # are about the performance of the runtime code generated by the compiler.
382 def collect_compiler_stats(metric='all',deviation=20):
383 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
384
385 def collect_stats(metric='all', deviation=20):
386 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
387
388 def testing_metrics():
389 return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
390
391 # This is an internal function that is used only in the implementation.
392 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
393 # If the boolean is set to true, it indicates that this test is one that
394 # measures the performance numbers of the compiler.
395 # As this is a fairly rare case in the testsuite, it defaults to false to
396 # indicate that it is a 'normal' performance test.
397 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
398 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
399 failBecause('This test has an invalid name.')
400
401 # Normalize metrics to a list of strings.
402 if isinstance(metrics, str):
403 if metrics == 'all':
404 metrics = testing_metrics()
405 else:
406 metrics = [metrics]
407
408 opts.is_stats_test = True
409 if is_compiler_stats_test:
410 opts.is_compiler_stats_test = True
411
412 # Compiler performance numbers change when debugging is on, making the results
413 # useless and confusing. Therefore, skip if debugging is on.
414 if config.compiler_debugged and is_compiler_stats_test:
415 opts.skip = 1
416
417 for metric in metrics:
418 def baselineByWay(way, target_commit, metric=metric):
419 return Perf.baseline_metric( \
420 target_commit, name, config.test_env, metric, way)
421
422 opts.stats_range_fields[metric] = (baselineByWay, deviation)
423
424 # -----
425
426 def when(b, f):
427 # When list_brokens is on, we want to see all expect_broken calls,
428 # so we always do f
429 if b or config.list_broken:
430 return f
431 else:
432 return normal
433
434 def unless(b, f):
435 return when(not b, f)
436
437 def doing_ghci():
438 return 'ghci' in config.run_ways
439
440 def ghc_dynamic():
441 return config.ghc_dynamic
442
443 def fast():
444 return config.speed == 2
445
446 def platform( plat ):
447 return config.platform == plat
448
449 def opsys( os ):
450 return config.os == os
451
452 def arch( arch ):
453 return config.arch == arch
454
455 def wordsize( ws ):
456 return config.wordsize == str(ws)
457
458 def msys( ):
459 return config.msys
460
461 def cygwin( ):
462 return config.cygwin
463
464 def have_vanilla( ):
465 return config.have_vanilla
466
467 def have_ncg( ):
468 return config.have_ncg
469
470 def have_dynamic( ):
471 return config.have_dynamic
472
473 def have_profiling( ):
474 return config.have_profiling
475
476 def in_tree_compiler( ):
477 return config.in_tree_compiler
478
479 def unregisterised( ):
480 return config.unregisterised
481
482 def compiler_profiled( ):
483 return config.compiler_profiled
484
485 def compiler_debugged( ):
486 return config.compiler_debugged
487
488 def have_gdb( ):
489 return config.have_gdb
490
491 def have_readelf( ):
492 return config.have_readelf
493
494 def integer_gmp( ):
495 return have_library("integer-gmp")
496
497 def integer_simple( ):
498 return have_library("integer-simple")
499
500 def llvm_build ( ):
501 return config.ghc_built_by_llvm
502
503 # ---
504
505 def high_memory_usage(name, opts):
506 opts.alone = True
507
508 # If a test is for a multi-CPU race, then running the test alone
509 # increases the chance that we'll actually see it.
510 def multi_cpu_race(name, opts):
511 opts.alone = True
512
513 # ---
514 def literate( name, opts ):
515 opts.literate = True
516
517 def c_src( name, opts ):
518 opts.c_src = True
519
520 def objc_src( name, opts ):
521 opts.objc_src = True
522
523 def objcpp_src( name, opts ):
524 opts.objcpp_src = True
525
526 def cmm_src( name, opts ):
527 opts.cmm_src = True
528
529 def outputdir( odir ):
530 return lambda name, opts, d=odir: _outputdir(name, opts, d)
531
532 def _outputdir( name, opts, odir ):
533 opts.outputdir = odir;
534
535 # ----
536
537 def pre_cmd( cmd ):
538 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
539
540 def _pre_cmd( name, opts, cmd ):
541 opts.pre_cmd = cmd
542
543 # ----
544
545 def cmd_prefix( prefix ):
546 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
547
548 def _cmd_prefix( name, opts, prefix ):
549 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
550
551 # ----
552
553 def cmd_wrapper( fun ):
554 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
555
556 def _cmd_wrapper( name, opts, fun ):
557 opts.cmd_wrapper = fun
558
559 # ----
560
561 def compile_cmd_prefix( prefix ):
562 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
563
564 def _compile_cmd_prefix( name, opts, prefix ):
565 opts.compile_cmd_prefix = prefix
566
567 # ----
568
569 def check_stdout( f ):
570 return lambda name, opts, f=f: _check_stdout(name, opts, f)
571
572 def _check_stdout( name, opts, f ):
573 opts.check_stdout = f
574
575 def no_check_hp(name, opts):
576 opts.check_hp = False
577
578 # ----
579
580 def filter_stdout_lines( regex ):
581 """ Filter lines of stdout with the given regular expression """
582 def f( name, opts ):
583 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
584 return f
585
586 def normalise_slashes( name, opts ):
587 _normalise_fun(name, opts, normalise_slashes_)
588
589 def normalise_exe( name, opts ):
590 _normalise_fun(name, opts, normalise_exe_)
591
592 def normalise_fun( *fs ):
593 return lambda name, opts: _normalise_fun(name, opts, fs)
594
595 def _normalise_fun( name, opts, *fs ):
596 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
597
598 def normalise_errmsg_fun( *fs ):
599 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
600
601 def _normalise_errmsg_fun( name, opts, *fs ):
602 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
603
604 def check_errmsg(needle):
605 def norm(str):
606 if needle in str:
607 return "%s contained in -ddump-simpl\n" % needle
608 else:
609 return "%s not contained in -ddump-simpl\n" % needle
610 return normalise_errmsg_fun(norm)
611
612 def grep_errmsg(needle):
613 def norm(str):
614 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
615 return normalise_errmsg_fun(norm)
616
617 def normalise_whitespace_fun(f):
618 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
619
620 def _normalise_whitespace_fun(name, opts, f):
621 opts.whitespace_normaliser = f
622
623 def normalise_version_( *pkgs ):
624 def normalise_version__( str ):
625 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
626 '\\1-<VERSION>', str)
627 return normalise_version__
628
629 def normalise_version( *pkgs ):
630 def normalise_version__( name, opts ):
631 _normalise_fun(name, opts, normalise_version_(*pkgs))
632 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
633 return normalise_version__
634
635 def normalise_drive_letter(name, opts):
636 # Windows only. Change D:\\ to C:\\.
637 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
638
639 def keep_prof_callstacks(name, opts):
640 """Keep profiling callstacks.
641
642 Use together with `only_ways(prof_ways)`.
643 """
644 opts.keep_prof_callstacks = True
645
646 def join_normalisers(*a):
647 """
648 Compose functions, flattening sequences.
649
650 join_normalisers(f1,[f2,f3],f4)
651
652 is the same as
653
654 lambda x: f1(f2(f3(f4(x))))
655 """
656
657 def flatten(l):
658 """
659 Taken from http://stackoverflow.com/a/2158532/946226
660 """
661 for el in l:
662 if (isinstance(el, collections.Iterable)
663 and not isinstance(el, (bytes, str))):
664 for sub in flatten(el):
665 yield sub
666 else:
667 yield el
668
669 a = flatten(a)
670
671 fn = lambda x:x # identity function
672 for f in a:
673 assert callable(f)
674 fn = lambda x,f=f,fn=fn: fn(f(x))
675 return fn
676
677 # ----
678 # Function for composing two opt-fns together
679
680 def executeSetups(fs, name, opts):
681 if type(fs) is list:
682 # If we have a list of setups, then execute each one
683 for f in fs:
684 executeSetups(f, name, opts)
685 else:
686 # fs is a single function, so just apply it
687 fs(name, opts)
688
689 # -----------------------------------------------------------------------------
690 # The current directory of tests
691
692 def newTestDir(tempdir, dir):
693
694 global thisdir_settings
695 # reset the options for this test directory
696 def settings(name, opts, tempdir=tempdir, dir=dir):
697 return _newTestDir(name, opts, tempdir, dir)
698 thisdir_settings = settings
699
700 # Should be equal to entry in toplevel .gitignore.
701 testdir_suffix = '.run'
702
703 def _newTestDir(name, opts, tempdir, dir):
704 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
705 opts.srcdir = os.path.join(os.getcwd(), dir)
706 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
707 opts.compiler_always_flags = config.compiler_always_flags
708
709 # -----------------------------------------------------------------------------
710 # Actually doing tests
711
712 parallelTests = []
713 aloneTests = []
714 allTestNames = set([])
715
716 def runTest(watcher, opts, name, func, args):
717 if config.use_threads:
718 pool_sema.acquire()
719 t = threading.Thread(target=test_common_thread,
720 name=name,
721 args=(watcher, name, opts, func, args))
722 t.daemon = False
723 t.start()
724 else:
725 test_common_work(watcher, name, opts, func, args)
726
727 # name :: String
728 # setup :: [TestOpt] -> IO ()
729 def test(name, setup, func, args):
730 global aloneTests
731 global parallelTests
732 global allTestNames
733 global thisdir_settings
734 if name in allTestNames:
735 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
736 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
737 framework_fail(name, 'bad_name', 'This test has an invalid name')
738
739 if config.run_only_some_tests:
740 if name not in config.only:
741 return
742 else:
743 # Note [Mutating config.only]
744 # config.only is initially the set of tests requested by
745 # the user (via 'make TEST='). We then remove all tests that
746 # we've already seen (in .T files), so that we can later
747 # report on any tests we couldn't find and error out.
748 config.only.remove(name)
749
750 # Make a deep copy of the default_testopts, as we need our own copy
751 # of any dictionaries etc inside it. Otherwise, if one test modifies
752 # them, all tests will see the modified version!
753 myTestOpts = copy.deepcopy(default_testopts)
754
755 executeSetups([thisdir_settings, setup], name, myTestOpts)
756
757 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
758 if myTestOpts.alone:
759 aloneTests.append(thisTest)
760 else:
761 parallelTests.append(thisTest)
762 allTestNames.add(name)
763
764 if config.use_threads:
765 def test_common_thread(watcher, name, opts, func, args):
766 try:
767 test_common_work(watcher, name, opts, func, args)
768 finally:
769 pool_sema.release()
770
771 def get_package_cache_timestamp():
772 if config.package_conf_cache_file == '':
773 return 0.0
774 else:
775 try:
776 return os.stat(config.package_conf_cache_file).st_mtime
777 except:
778 return 0.0
779
780 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
781
782 def test_common_work(watcher, name, opts, func, args):
783 try:
784 t.total_tests += 1
785 setLocalTestOpts(opts)
786
787 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
788
789 # All the ways we might run this test
790 if func == compile or func == multimod_compile:
791 all_ways = config.compile_ways
792 elif func == compile_and_run or func == multimod_compile_and_run:
793 all_ways = config.run_ways
794 elif func == ghci_script:
795 if 'ghci' in config.run_ways:
796 all_ways = ['ghci']
797 else:
798 all_ways = []
799 else:
800 all_ways = ['normal']
801
802 # A test itself can request extra ways by setting opts.extra_ways
803 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
804
805 t.total_test_cases += len(all_ways)
806
807 ok_way = lambda way: \
808 not getTestOpts().skip \
809 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
810 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
811 and (not (config.skip_perf_tests and isStatsTest())) \
812 and (not (config.only_perf_tests and not isStatsTest())) \
813 and way not in getTestOpts().omit_ways
814
815 # Which ways we are asked to skip
816 do_ways = list(filter (ok_way,all_ways))
817
818 # Only run all ways in slow mode.
819 # See Note [validate and testsuite speed] in toplevel Makefile.
820 if config.accept:
821 # Only ever run one way
822 do_ways = do_ways[:1]
823 elif config.speed > 0:
824 # However, if we EXPLICITLY asked for a way (with extra_ways)
825 # please test it!
826 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
827 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
828 do_ways = other_ways[:1] + explicit_ways
829
830 # Find all files in the source directory that this test
831 # depends on. Do this only once for all ways.
832 # Generously add all filenames that start with the name of
833 # the test to this set, as a convenience to test authors.
834 # They will have to use the `extra_files` setup function to
835 # specify all other files that their test depends on (but
836 # this seems to be necessary for only about 10% of all
837 # tests).
838 files = set(f for f in os.listdir(opts.srcdir)
839 if f.startswith(name) and not f == name and
840 not f.endswith(testdir_suffix) and
841 not os.path.splitext(f)[1] in do_not_copy)
842 for filename in (opts.extra_files + extra_src_files.get(name, [])):
843 if filename.startswith('/'):
844 framework_fail(name, 'whole-test',
845 'no absolute paths in extra_files please: ' + filename)
846
847 elif '*' in filename:
848 # Don't use wildcards in extra_files too much, as
849 # globbing is slow.
850 files.update((os.path.relpath(f, opts.srcdir)
851 for f in glob.iglob(in_srcdir(filename))))
852
853 elif filename:
854 files.add(filename)
855
856 else:
857 framework_fail(name, 'whole-test', 'extra_file is empty string')
858
859 # Run the required tests...
860 for way in do_ways:
861 if stopping():
862 break
863 try:
864 do_test(name, way, func, args, files)
865 except KeyboardInterrupt:
866 stopNow()
867 except Exception as e:
868 framework_fail(name, way, str(e))
869 traceback.print_exc()
870
871 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
872
873 if config.cleanup and do_ways:
874 try:
875 cleanup()
876 except Exception as e:
877 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
878
879 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
880
881 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
882 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
883
884 except Exception as e:
885 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
886 finally:
887 watcher.notify()
888
889 def do_test(name, way, func, args, files):
890 opts = getTestOpts()
891
892 full_name = name + '(' + way + ')'
893
894 progress_args = [ full_name, t.total_tests, len(allTestNames),
895 [len(t.unexpected_passes),
896 len(t.unexpected_failures),
897 len(t.framework_failures)]]
898 if_verbose(2, "=====> {0} {1} of {2} {3}".format(*progress_args))
899
900 # Update terminal title
901 # useful progress indicator even when make test VERBOSE=1
902 if config.supports_colors:
903 print("\033]0;{0} {1} of {2} {3}\007".format(*progress_args), end="")
904 sys.stdout.flush()
905
906 # Clean up prior to the test, so that we can't spuriously conclude
907 # that it passed on the basis of old run outputs.
908 cleanup()
909 os.makedirs(opts.testdir)
910
911 # Link all source files for this test into a new directory in
912 # /tmp, and run the test in that directory. This makes it
913 # possible to run tests in parallel, without modification, that
914 # would otherwise (accidentally) write to the same output file.
915 # It also makes it easier to keep the testsuite clean.
916
917 for extra_file in files:
918 src = in_srcdir(extra_file)
919 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
920 if os.path.isfile(src):
921 link_or_copy_file(src, dst)
922 elif os.path.isdir(src):
923 if os.path.exists(dst):
924 shutil.rmtree(dst)
925 os.mkdir(dst)
926 lndir(src, dst)
927 else:
928 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
929 # When using a ghc built without haddock support, .t
930 # files are rightfully missing. Don't
931 # framework_fail. Test will be skipped later.
932 pass
933 else:
934 framework_fail(name, way,
935 'extra_file does not exist: ' + extra_file)
936
937 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
938 # When running 'MAKE' make sure 'TOP' still points to the
939 # root of the testsuite.
940 src_makefile = in_srcdir('Makefile')
941 dst_makefile = in_testdir('Makefile')
942 if os.path.exists(src_makefile):
943 with io.open(src_makefile, 'r', encoding='utf8') as src:
944 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
945 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
946 dst.write(makefile)
947
948 if opts.pre_cmd:
949 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
950 stderr = subprocess.STDOUT,
951 print_output = config.verbose >= 3)
952
953 # If user used expect_broken then don't record failures of pre_cmd
954 if exit_code != 0 and opts.expect not in ['fail']:
955 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
956 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
957
958 result = func(*[name,way] + args)
959
960 if opts.expect not in ['pass', 'fail', 'missing-lib']:
961 framework_fail(name, way, 'bad expected ' + opts.expect)
962
963 try:
964 passFail = result['passFail']
965 except (KeyError, TypeError):
966 passFail = 'No passFail found'
967
968 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
969
970 if passFail == 'pass':
971 if _expect_pass(way):
972 t.expected_passes.append(TestResult(directory, name, "", way))
973 t.n_expected_passes += 1
974 else:
975 if_verbose(1, '*** unexpected pass for %s' % full_name)
976 t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
977 elif passFail == 'fail':
978 if _expect_pass(way):
979 reason = result['reason']
980 tag = result.get('tag')
981 if tag == 'stat':
982 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
983 t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
984 else:
985 if_verbose(1, '*** unexpected failure for %s' % full_name)
986 result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
987 t.unexpected_failures.append(result)
988 else:
989 if opts.expect == 'missing-lib':
990 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
991 else:
992 t.n_expected_failures += 1
993 else:
994 framework_fail(name, way, 'bad result ' + passFail)
995
996 # Make is often invoked with -s, which means if it fails, we get
997 # no feedback at all. This is annoying. So let's remove the option
998 # if found and instead have the testsuite decide on what to do
999 # with the output.
1000 def override_options(pre_cmd):
1001 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
1002 return pre_cmd.replace('-s' , '') \
1003 .replace('--silent', '') \
1004 .replace('--quiet' , '')
1005
1006 return pre_cmd
1007
1008 def framework_fail(name, way, reason):
1009 opts = getTestOpts()
1010 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1011 full_name = name + '(' + way + ')'
1012 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
1013 t.framework_failures.append(TestResult(directory, name, reason, way))
1014
1015 def framework_warn(name, way, reason):
1016 opts = getTestOpts()
1017 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1018 full_name = name + '(' + way + ')'
1019 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
1020 t.framework_warnings.append(TestResult(directory, name, reason, way))
1021
1022 def badResult(result):
1023 try:
1024 if result['passFail'] == 'pass':
1025 return False
1026 return True
1027 except (KeyError, TypeError):
1028 return True
1029
1030 # -----------------------------------------------------------------------------
1031 # Generic command tests
1032
1033 # A generic command test is expected to run and exit successfully.
1034 #
1035 # The expected exit code can be changed via exit_code() as normal, and
1036 # the expected stdout/stderr are stored in <testname>.stdout and
1037 # <testname>.stderr. The output of the command can be ignored
1038 # altogether by using the setup function ignore_stdout instead of
1039 # run_command.
1040
1041 def run_command( name, way, cmd ):
1042 return simple_run( name, '', override_options(cmd), '' )
1043
1044 def makefile_test( name, way, target=None ):
1045 if target is None:
1046 target = name
1047
1048 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1049 return run_command(name, way, cmd)
1050
1051 # -----------------------------------------------------------------------------
1052 # GHCi tests
1053
1054 def ghci_script( name, way, script):
1055 flags = ' '.join(get_compiler_flags())
1056 way_flags = ' '.join(config.way_flags[way])
1057
1058 # We pass HC and HC_OPTS as environment variables, so that the
1059 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1060 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1061 ).format(flags=flags, way_flags=way_flags)
1062 # NB: put way_flags before flags so that flags in all.T can overrie others
1063
1064 getTestOpts().stdin = script
1065 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1066
1067 # -----------------------------------------------------------------------------
1068 # Compile-only tests
1069
1070 def compile( name, way, extra_hc_opts ):
1071 return do_compile( name, way, 0, '', [], extra_hc_opts )
1072
1073 def compile_fail( name, way, extra_hc_opts ):
1074 return do_compile( name, way, 1, '', [], extra_hc_opts )
1075
1076 def backpack_typecheck( name, way, extra_hc_opts ):
1077 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1078
1079 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1080 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1081
1082 def backpack_compile( name, way, extra_hc_opts ):
1083 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1084
1085 def backpack_compile_fail( name, way, extra_hc_opts ):
1086 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1087
1088 def backpack_run( name, way, extra_hc_opts ):
1089 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1090
1091 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1092 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1093
1094 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1095 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1096
1097 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1098 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1099
1100 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1101 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1102
1103 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1104 # print 'Compile only, extra args = ', extra_hc_opts
1105
1106 result = extras_build( way, extra_mods, extra_hc_opts )
1107 if badResult(result):
1108 return result
1109 extra_hc_opts = result['hc_opts']
1110
1111 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1112
1113 if badResult(result):
1114 return result
1115
1116 # the actual stderr should always match the expected, regardless
1117 # of whether we expected the compilation to fail or not (successful
1118 # compilations may generate warnings).
1119
1120 expected_stderr_file = find_expected_file(name, 'stderr')
1121 actual_stderr_file = add_suffix(name, 'comp.stderr')
1122 diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
1123
1124 if not compare_outputs(way, 'stderr',
1125 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1126 normalise_errmsg),
1127 expected_stderr_file, actual_stderr_file,
1128 diff_file=diff_file_name,
1129 whitespace_normaliser=getattr(getTestOpts(),
1130 "whitespace_normaliser",
1131 normalise_whitespace)):
1132 stderr = open(diff_file_name, 'rb').read()
1133 os.remove(diff_file_name)
1134 return failBecauseStderr('stderr mismatch', stderr=stderr )
1135
1136
1137 # no problems found, this test passed
1138 return passed()
1139
1140 def compile_cmp_asm( name, way, ext, extra_hc_opts ):
1141 print('Compile only, extra args = ', extra_hc_opts)
1142 result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1143
1144 if badResult(result):
1145 return result
1146
1147 # the actual stderr should always match the expected, regardless
1148 # of whether we expected the compilation to fail or not (successful
1149 # compilations may generate warnings).
1150
1151 expected_asm_file = find_expected_file(name, 'asm')
1152 actual_asm_file = add_suffix(name, 's')
1153
1154 if not compare_outputs(way, 'asm',
1155 join_normalisers(normalise_errmsg, normalise_asm),
1156 expected_asm_file, actual_asm_file):
1157 return failBecause('asm mismatch')
1158
1159 # no problems found, this test passed
1160 return passed()
1161
1162 def compile_grep_asm( name, way, ext, is_substring, extra_hc_opts ):
1163 print('Compile only, extra args = ', extra_hc_opts)
1164 result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1165
1166 if badResult(result):
1167 return result
1168
1169 expected_pat_file = find_expected_file(name, 'asm')
1170 actual_asm_file = add_suffix(name, 's')
1171
1172 if not grep_output(join_normalisers(normalise_errmsg),
1173 expected_pat_file, actual_asm_file,
1174 is_substring):
1175 return failBecause('asm mismatch')
1176
1177 # no problems found, this test passed
1178 return passed()
1179
1180 # -----------------------------------------------------------------------------
1181 # Compile-and-run tests
1182
1183 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1184 # print 'Compile and run, extra args = ', extra_hc_opts
1185
1186 result = extras_build( way, extra_mods, extra_hc_opts )
1187 if badResult(result):
1188 return result
1189 extra_hc_opts = result['hc_opts']
1190
1191 if way.startswith('ghci'): # interpreted...
1192 return interpreter_run(name, way, extra_hc_opts, top_mod)
1193 else: # compiled...
1194 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1195 if badResult(result):
1196 return result
1197
1198 cmd = './' + name;
1199
1200 # we don't check the compiler's stderr for a compile-and-run test
1201 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1202
1203 def compile_and_run( name, way, extra_hc_opts ):
1204 return compile_and_run__( name, way, '', [], extra_hc_opts)
1205
1206 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1207 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1208
1209 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1210 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1211
1212 def stats( name, way, stats_file ):
1213 opts = getTestOpts()
1214 return check_stats(name, way, stats_file, opts.stats_range_fields)
1215
1216 def metric_dict(name, way, metric, value):
1217 return Perf.PerfStat(
1218 test_env = config.test_env,
1219 test = name,
1220 way = way,
1221 metric = metric,
1222 value = value)
1223
1224 # -----------------------------------------------------------------------------
1225 # Check test stats. This prints the results for the user.
1226 # name: name of the test.
1227 # way: the way.
1228 # stats_file: the path of the stats_file containing the stats for the test.
1229 # range_fields: see TestOptions.stats_range_fields
1230 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1231 # This prints the results for the user.
1232 def check_stats(name, way, stats_file, range_fields):
1233 head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
1234 result = passed()
1235 if range_fields:
1236 try:
1237 f = open(in_testdir(stats_file))
1238 except IOError as e:
1239 return failBecause(str(e))
1240 stats_file_contents = f.read()
1241 f.close()
1242
1243 for (metric, baseline_and_dev) in range_fields.items():
1244 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1245 if field_match == None:
1246 print('Failed to find metric: ', metric)
1247 metric_result = failBecause('no such stats metric')
1248 else:
1249 actual_val = int(field_match.group(1))
1250
1251 # Store the metric so it can later be stored in a git note.
1252 perf_stat = metric_dict(name, way, metric, actual_val)
1253 change = None
1254
1255 # If this is the first time running the benchmark, then pass.
1256 baseline = baseline_and_dev[0](way, head_commit) \
1257 if Perf.inside_git_repo() else None
1258 if baseline == None:
1259 metric_result = passed()
1260 change = MetricChange.NewMetric
1261 else:
1262 tolerance_dev = baseline_and_dev[1]
1263 (change, metric_result) = Perf.check_stats_change(
1264 perf_stat,
1265 baseline,
1266 tolerance_dev,
1267 config.allowed_perf_changes,
1268 config.verbose >= 4)
1269 t.metrics.append((change, perf_stat))
1270
1271 # If any metric fails then the test fails.
1272 # Note, the remaining metrics are still run so that
1273 # a complete list of changes can be presented to the user.
1274 if metric_result['passFail'] == 'fail':
1275 result = metric_result
1276
1277 return result
1278
1279 # -----------------------------------------------------------------------------
1280 # Build a single-module program
1281
1282 def extras_build( way, extra_mods, extra_hc_opts ):
1283 for mod, opts in extra_mods:
1284 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1285 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1286 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1287 if badResult(result):
1288 return result
1289
1290 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1291
1292 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1293 opts = getTestOpts()
1294
1295 # Redirect stdout and stderr to the same file
1296 stdout = in_testdir(name, 'comp.stderr')
1297 stderr = subprocess.STDOUT
1298
1299 if top_mod != '':
1300 srcname = top_mod
1301 elif addsuf:
1302 if backpack:
1303 srcname = add_suffix(name, 'bkp')
1304 else:
1305 srcname = add_hs_lhs_suffix(name)
1306 else:
1307 srcname = name
1308
1309 if top_mod != '':
1310 to_do = '--make '
1311 if link:
1312 to_do = to_do + '-o ' + name
1313 elif backpack:
1314 if link:
1315 to_do = '-o ' + name + ' '
1316 else:
1317 to_do = ''
1318 to_do = to_do + '--backpack '
1319 elif link:
1320 to_do = '-o ' + name
1321 else:
1322 to_do = '-c' # just compile
1323
1324 stats_file = name + '.comp.stats'
1325 if isCompilerStatsTest():
1326 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1327 if backpack:
1328 extra_hc_opts += ' -outputdir ' + name + '.out'
1329
1330 # Required by GHC 7.3+, harmless for earlier versions:
1331 if (getTestOpts().c_src or
1332 getTestOpts().objc_src or
1333 getTestOpts().objcpp_src or
1334 getTestOpts().cmm_src):
1335 extra_hc_opts += ' -no-hs-main '
1336
1337 if getTestOpts().compile_cmd_prefix == '':
1338 cmd_prefix = ''
1339 else:
1340 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1341
1342 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1343
1344 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1345 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1346 ).format(**locals())
1347
1348 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1349
1350 actual_stderr_path = in_testdir(name, 'comp.stderr')
1351
1352 if exit_code != 0 and not should_fail:
1353 if config.verbose >= 1 and _expect_pass(way):
1354 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1355 dump_file(actual_stderr_path)
1356
1357 # ToDo: if the sub-shell was killed by ^C, then exit
1358
1359 if isCompilerStatsTest():
1360 statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
1361 if badResult(statsResult):
1362 return statsResult
1363
1364 if should_fail:
1365 if exit_code == 0:
1366 stderr_contents = open(actual_stderr_path, 'rb').read()
1367 return failBecauseStderr('exit code 0', stderr_contents)
1368 else:
1369 if exit_code != 0:
1370 stderr_contents = open(actual_stderr_path, 'rb').read()
1371 return failBecauseStderr('exit code non-0', stderr_contents)
1372
1373 return passed()
1374
1375 # -----------------------------------------------------------------------------
1376 # Run a program and check its output
1377 #
1378 # If testname.stdin exists, route input from that, else
1379 # from /dev/null. Route output to testname.run.stdout and
1380 # testname.run.stderr. Returns the exit code of the run.
1381
1382 def simple_run(name, way, prog, extra_run_opts):
1383 opts = getTestOpts()
1384
1385 # figure out what to use for stdin
1386 if opts.stdin:
1387 stdin = in_testdir(opts.stdin)
1388 elif os.path.exists(in_testdir(name, 'stdin')):
1389 stdin = in_testdir(name, 'stdin')
1390 else:
1391 stdin = None
1392
1393 stdout = in_testdir(name, 'run.stdout')
1394 if opts.combined_output:
1395 stderr = subprocess.STDOUT
1396 else:
1397 stderr = in_testdir(name, 'run.stderr')
1398
1399 my_rts_flags = rts_flags(way)
1400
1401 # Collect stats if necessary:
1402 # isStatsTest and not isCompilerStatsTest():
1403 # assume we are running a ghc compiled program. Collect stats.
1404 # isStatsTest and way == 'ghci':
1405 # assume we are running a program via ghci. Collect stats
1406 stats_file = name + '.stats'
1407 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1408 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1409 else:
1410 stats_args = ''
1411
1412 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1413 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1414
1415 if opts.cmd_wrapper != None:
1416 cmd = opts.cmd_wrapper(cmd)
1417
1418 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1419
1420 # run the command
1421 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1422
1423 # check the exit code
1424 if exit_code != opts.exit_code:
1425 if config.verbose >= 1 and _expect_pass(way):
1426 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1427 dump_stdout(name)
1428 dump_stderr(name)
1429 return failBecause('bad exit code (%d)' % exit_code)
1430
1431 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1432 return failBecause('bad stderr')
1433 if not (opts.ignore_stdout or stdout_ok(name, way)):
1434 return failBecause('bad stdout')
1435
1436 check_hp = '-h' in my_rts_flags and opts.check_hp
1437 check_prof = '-p' in my_rts_flags
1438
1439 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1440 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1441 return failBecause('bad heap profile')
1442 if check_prof and not check_prof_ok(name, way):
1443 return failBecause('bad profile')
1444
1445 return check_stats(name, way, stats_file, opts.stats_range_fields)
1446
1447 def rts_flags(way):
1448 args = config.way_rts_flags.get(way, [])
1449 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1450
1451 # -----------------------------------------------------------------------------
1452 # Run a program in the interpreter and check its output
1453
1454 def interpreter_run(name, way, extra_hc_opts, top_mod):
1455 opts = getTestOpts()
1456
1457 stdout = in_testdir(name, 'interp.stdout')
1458 stderr = in_testdir(name, 'interp.stderr')
1459 script = in_testdir(name, 'genscript')
1460
1461 if opts.combined_output:
1462 framework_fail(name, 'unsupported',
1463 'WAY=ghci and combined_output together is not supported')
1464
1465 if (top_mod == ''):
1466 srcname = add_hs_lhs_suffix(name)
1467 else:
1468 srcname = top_mod
1469
1470 delimiter = '===== program output begins here\n'
1471
1472 with io.open(script, 'w', encoding='utf8') as f:
1473 # set the prog name and command-line args to match the compiled
1474 # environment.
1475 f.write(':set prog ' + name + '\n')
1476 f.write(':set args ' + opts.extra_run_opts + '\n')
1477 # Add marker lines to the stdout and stderr output files, so we
1478 # can separate GHCi's output from the program's.
1479 f.write(':! echo ' + delimiter)
1480 f.write(':! echo 1>&2 ' + delimiter)
1481 # Set stdout to be line-buffered to match the compiled environment.
1482 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1483 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1484 # in the event of an exception as for the compiled program.
1485 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1486
1487 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1488 if os.path.exists(stdin):
1489 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1490
1491 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1492
1493 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1494 ).format(**locals())
1495
1496 if getTestOpts().cmd_wrapper != None:
1497 cmd = opts.cmd_wrapper(cmd);
1498
1499 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1500
1501 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1502
1503 # split the stdout into compilation/program output
1504 split_file(stdout, delimiter,
1505 in_testdir(name, 'comp.stdout'),
1506 in_testdir(name, 'run.stdout'))
1507 split_file(stderr, delimiter,
1508 in_testdir(name, 'comp.stderr'),
1509 in_testdir(name, 'run.stderr'))
1510
1511 # check the exit code
1512 if exit_code != getTestOpts().exit_code:
1513 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1514 dump_stdout(name)
1515 dump_stderr(name)
1516 return failBecause('bad exit code (%d)' % exit_code)
1517
1518 # ToDo: if the sub-shell was killed by ^C, then exit
1519
1520 if not (opts.ignore_stderr or stderr_ok(name, way)):
1521 return failBecause('bad stderr')
1522 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1523 return failBecause('bad stdout')
1524 else:
1525 return passed()
1526
1527 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1528 # See Note [Universal newlines].
1529 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1530 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1531 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1532 line = infile.readline()
1533 while re.sub('^\s*','',line) != delimiter and line != '':
1534 out1.write(line)
1535 line = infile.readline()
1536
1537 line = infile.readline()
1538 while line != '':
1539 out2.write(line)
1540 line = infile.readline()
1541
1542 # -----------------------------------------------------------------------------
1543 # Utils
1544 def get_compiler_flags():
1545 opts = getTestOpts()
1546
1547 flags = copy.copy(opts.compiler_always_flags)
1548
1549 flags.append(opts.extra_hc_opts)
1550
1551 if opts.outputdir != None:
1552 flags.extend(["-outputdir", opts.outputdir])
1553
1554 return flags
1555
1556 def stdout_ok(name, way):
1557 actual_stdout_file = add_suffix(name, 'run.stdout')
1558 expected_stdout_file = find_expected_file(name, 'stdout')
1559
1560 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1561
1562 check_stdout = getTestOpts().check_stdout
1563 if check_stdout:
1564 actual_stdout_path = in_testdir(actual_stdout_file)
1565 return check_stdout(actual_stdout_path, extra_norm)
1566
1567 return compare_outputs(way, 'stdout', extra_norm,
1568 expected_stdout_file, actual_stdout_file)
1569
1570 def dump_stdout( name ):
1571 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1572 str = f.read().strip()
1573 if str:
1574 print("Stdout (", name, "):")
1575 print(str)
1576
1577 def stderr_ok(name, way):
1578 actual_stderr_file = add_suffix(name, 'run.stderr')
1579 expected_stderr_file = find_expected_file(name, 'stderr')
1580
1581 return compare_outputs(way, 'stderr',
1582 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1583 expected_stderr_file, actual_stderr_file,
1584 whitespace_normaliser=normalise_whitespace)
1585
1586 def dump_stderr( name ):
1587 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1588 str = f.read().strip()
1589 if str:
1590 print("Stderr (", name, "):")
1591 print(str)
1592
1593 def read_no_crs(file):
1594 str = ''
1595 try:
1596 # See Note [Universal newlines].
1597 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1598 str = h.read()
1599 except Exception:
1600 # On Windows, if the program fails very early, it seems the
1601 # files stdout/stderr are redirected to may not get created
1602 pass
1603 return str
1604
1605 def write_file(file, str):
1606 # See Note [Universal newlines].
1607 with io.open(file, 'w', encoding='utf8', newline='') as h:
1608 h.write(str)
1609
1610 # Note [Universal newlines]
1611 #
1612 # We don't want to write any Windows style line endings ever, because
1613 # it would mean that `make accept` would touch every line of the file
1614 # when switching between Linux and Windows.
1615 #
1616 # Furthermore, when reading a file, it is convenient to translate all
1617 # Windows style endings to '\n', as it simplifies searching or massaging
1618 # the content.
1619 #
1620 # Solution: use `io.open` instead of `open`
1621 # * when reading: use newline=None to translate '\r\n' to '\n'
1622 # * when writing: use newline='' to not translate '\n' to '\r\n'
1623 #
1624 # See https://docs.python.org/2/library/io.html#io.open.
1625 #
1626 # This should work with both python2 and python3, and with both mingw*
1627 # as msys2 style Python.
1628 #
1629 # Do note that io.open returns unicode strings. So we have to specify
1630 # the expected encoding. But there is at least one file which is not
1631 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1632 # Another solution would be to open files in binary mode always, and
1633 # operate on bytes.
1634
1635 def check_hp_ok(name):
1636 opts = getTestOpts()
1637
1638 # do not qualify for hp2ps because we should be in the right directory
1639 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1640
1641 hp2psResult = runCmd(hp2psCmd)
1642
1643 actual_ps_path = in_testdir(name, 'ps')
1644
1645 if hp2psResult == 0:
1646 if os.path.exists(actual_ps_path):
1647 if gs_working:
1648 gsResult = runCmd(genGSCmd(actual_ps_path))
1649 if (gsResult == 0):
1650 return (True)
1651 else:
1652 print("hp2ps output for " + name + " is not valid PostScript")
1653 else: return (True) # assume postscript is valid without ghostscript
1654 else:
1655 print("hp2ps did not generate PostScript for " + name)
1656 return (False)
1657 else:
1658 print("hp2ps error when processing heap profile for " + name)
1659 return(False)
1660
1661 def check_prof_ok(name, way):
1662 expected_prof_file = find_expected_file(name, 'prof.sample')
1663 expected_prof_path = in_testdir(expected_prof_file)
1664
1665 # Check actual prof file only if we have an expected prof file to
1666 # compare it with.
1667 if not os.path.exists(expected_prof_path):
1668 return True
1669
1670 actual_prof_file = add_suffix(name, 'prof')
1671 actual_prof_path = in_testdir(actual_prof_file)
1672
1673 if not os.path.exists(actual_prof_path):
1674 print(actual_prof_path + " does not exist")
1675 return(False)
1676
1677 if os.path.getsize(actual_prof_path) == 0:
1678 print(actual_prof_path + " is empty")
1679 return(False)
1680
1681 return compare_outputs(way, 'prof', normalise_prof,
1682 expected_prof_file, actual_prof_file,
1683 whitespace_normaliser=normalise_whitespace)
1684
1685 # Compare expected output to actual output, and optionally accept the
1686 # new output. Returns true if output matched or was accepted, false
1687 # otherwise. See Note [Output comparison] for the meaning of the
1688 # normaliser and whitespace_normaliser parameters.
1689 def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
1690 whitespace_normaliser=lambda x:x):
1691
1692 expected_path = in_srcdir(expected_file)
1693 actual_path = in_testdir(actual_file)
1694
1695 if os.path.exists(expected_path):
1696 expected_str = normaliser(read_no_crs(expected_path))
1697 # Create the .normalised file in the testdir, not in the srcdir.
1698 expected_normalised_file = add_suffix(expected_file, 'normalised')
1699 expected_normalised_path = in_testdir(expected_normalised_file)
1700 else:
1701 expected_str = ''
1702 expected_normalised_path = '/dev/null'
1703
1704 actual_raw = read_no_crs(actual_path)
1705 actual_str = normaliser(actual_raw)
1706
1707 # See Note [Output comparison].
1708 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1709 return True
1710 else:
1711 if config.verbose >= 1 and _expect_pass(way):
1712 print('Actual ' + kind + ' output differs from expected:')
1713
1714 if expected_normalised_path != '/dev/null':
1715 write_file(expected_normalised_path, expected_str)
1716
1717 actual_normalised_path = add_suffix(actual_path, 'normalised')
1718 write_file(actual_normalised_path, actual_str)
1719
1720 if config.verbose >= 1 and _expect_pass(way):
1721 # See Note [Output comparison].
1722 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1723 actual_normalised_path),
1724 stdout=diff_file,
1725 print_output=True)
1726
1727 # If for some reason there were no non-whitespace differences,
1728 # then do a full diff
1729 if r == 0:
1730 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1731 actual_normalised_path),
1732 stdout=diff_file,
1733 print_output=True)
1734 elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
1735 # we will try to read it later
1736
1737 if config.accept and (getTestOpts().expect == 'fail' or
1738 way in getTestOpts().expect_fail_for):
1739 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1740 return False
1741 elif config.accept and actual_raw:
1742 if config.accept_platform:
1743 if_verbose(1, 'Accepting new output for platform "'
1744 + config.platform + '".')
1745 expected_path += '-' + config.platform
1746 elif config.accept_os:
1747 if_verbose(1, 'Accepting new output for os "'
1748 + config.os + '".')
1749 expected_path += '-' + config.os
1750 else:
1751 if_verbose(1, 'Accepting new output.')
1752
1753 write_file(expected_path, actual_raw)
1754 return True
1755 elif config.accept:
1756 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1757 os.remove(expected_path)
1758 return True
1759 else:
1760 return False
1761
1762 # Checks that each line from pattern_file is present in actual_file as
1763 # a substring or regex pattern depending on is_substring.
1764 def grep_output(normaliser, pattern_file, actual_file, is_substring=True):
1765 expected_path = in_srcdir(pattern_file)
1766 actual_path = in_testdir(actual_file)
1767
1768 expected_patterns = read_no_crs(expected_path).strip().split('\n')
1769 actual_raw = read_no_crs(actual_path)
1770 actual_str = normaliser(actual_raw)
1771
1772 success = True
1773 failed_patterns = []
1774
1775 def regex_match(pat, actual):
1776 return re.search(pat, actual) is not None
1777
1778 def substring_match(pat, actual):
1779 return pat in actual
1780
1781 def is_match(pat, actual):
1782 if is_substring:
1783 return substring_match(pat, actual)
1784 else:
1785 return regex_match(pat, actual)
1786
1787 for pat in expected_patterns:
1788 if not is_match(pat, actual_str):
1789 success = False
1790 failed_patterns.append(pat)
1791
1792 if not success:
1793 print('Actual output does not contain the following patterns:')
1794 for pat in failed_patterns:
1795 print(pat)
1796
1797 return success
1798
1799 # Note [Output comparison]
1800 #
1801 # We do two types of output comparison:
1802 #
1803 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1804 # optional `whitespace_normaliser` to the expected and the actual
1805 # output, before comparing the two.
1806 #
1807 # 2. To show as a diff to the user when the test indeed failed. We apply
1808 # the same `normaliser` function to the outputs, to make the diff as
1809 # small as possible (only showing the actual problem). But we don't
1810 # apply the `whitespace_normaliser` here, because it might completely
1811 # squash all whitespace, making the diff unreadable. Instead we rely
1812 # on the `diff` program to ignore whitespace changes as much as
1813 # possible (#10152).
1814
1815 def normalise_whitespace( str ):
1816 # Merge contiguous whitespace characters into a single space.
1817 return ' '.join(str.split())
1818
1819 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1820
1821 def normalise_callstacks(s):
1822 opts = getTestOpts()
1823 def repl(matches):
1824 location = matches.group(1)
1825 location = normalise_slashes_(location)
1826 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1827 # Ignore line number differences in call stacks (#10834).
1828 s = re.sub(callSite_re, repl, s)
1829 # Ignore the change in how we identify implicit call-stacks
1830 s = s.replace('from ImplicitParams', 'from HasCallStack')
1831 if not opts.keep_prof_callstacks:
1832 # Don't output prof callstacks. Test output should be
1833 # independent from the WAY we run the test.
1834 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1835 return s
1836
1837 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1838
1839 def normalise_type_reps(str):
1840 """ Normalise out fingerprints from Typeable TyCon representations """
1841 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1842
1843 def normalise_errmsg( str ):
1844 """Normalise error-messages emitted via stderr"""
1845 # IBM AIX's `ld` is a bit chatty
1846 if opsys('aix'):
1847 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1848 # remove " error:" and lower-case " Warning:" to make patch for
1849 # trac issue #10021 smaller
1850 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1851 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1852 str = normalise_callstacks(str)
1853 str = normalise_type_reps(str)
1854
1855 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1856 # the colon is there because it appears in error messages; this
1857 # hacky solution is used in place of more sophisticated filename
1858 # mangling
1859 str = re.sub('([^\\s])\\.exe', '\\1', str)
1860
1861 # normalise slashes, minimise Windows/Unix filename differences
1862 str = re.sub('\\\\', '/', str)
1863
1864 # The inplace ghc's are called ghc-stage[123] to avoid filename
1865 # collisions, so we need to normalise that to just "ghc"
1866 str = re.sub('ghc-stage[123]', 'ghc', str)
1867
1868 # Error messages sometimes contain integer implementation package
1869 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1870
1871 # Error messages sometimes contain this blurb which can vary
1872 # spuriously depending upon build configuration (e.g. based on integer
1873 # backend)
1874 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1875 '...plus N instances involving out-of-scope types', str)
1876
1877 # Also filter out bullet characters. This is because bullets are used to
1878 # separate error sections, and tests shouldn't be sensitive to how the
1879 # the division happens.
1880 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1881 str = str.replace(bullet, '')
1882
1883 # Windows only, this is a bug in hsc2hs but it is preventing
1884 # stable output for the testsuite. See #9775. For now we filter out this
1885 # warning message to get clean output.
1886 if config.msys:
1887 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1888 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1889
1890 return str
1891
1892 # normalise a .prof file, so that we can reasonably compare it against
1893 # a sample. This doesn't compare any of the actual profiling data,
1894 # only the shape of the profile and the number of entries.
1895 def normalise_prof (str):
1896 # strip everything up to the line beginning "COST CENTRE"
1897 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1898
1899 # strip results for CAFs, these tend to change unpredictably
1900 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1901
1902 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1903 # sometimes under MAIN.
1904 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1905
1906 # We have something like this:
1907 #
1908 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1909 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1910 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1911 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1912 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1913 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1914 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1915 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1916 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1917 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1918 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1919 #
1920 # then we remove all the specific profiling data, leaving only the cost
1921 # centre name, module, src, and entries, to end up with this: (modulo
1922 # whitespace between columns)
1923 #
1924 # MAIN MAIN <built-in> 0
1925 # readPrec Main Main_1.hs:7:13-16 1
1926 # readPrec Main Main_1.hs:4:13-16 1
1927 # == Main Main_1.hs:7:25-26 1
1928 # == Main Main_1.hs:4:25-26 1
1929 # showsPrec Main Main_1.hs:7:19-22 2
1930 # showsPrec Main Main_1.hs:4:19-22 2
1931 # readPrec Main Main_1.hs:7:13-16 0
1932 # readPrec Main Main_1.hs:4:13-16 0
1933
1934 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1935 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1936 # this works fine.
1937 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1938 '\\1 \\2 \\3 \\5\n', str)
1939 return str
1940
1941 def normalise_slashes_( str ):
1942 str = re.sub('\\\\', '/', str)
1943 str = re.sub('//', '/', str)
1944 return str
1945
1946 def normalise_exe_( str ):
1947 str = re.sub('\.exe', '', str)
1948 return str
1949
1950 def normalise_output( str ):
1951 # remove " error:" and lower-case " Warning:" to make patch for
1952 # trac issue #10021 smaller
1953 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1954 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1955 # Remove a .exe extension (for Windows)
1956 # This can occur in error messages generated by the program.
1957 str = re.sub('([^\\s])\\.exe', '\\1', str)
1958 str = normalise_callstacks(str)
1959 str = normalise_type_reps(str)
1960 return str
1961
1962 def normalise_asm( str ):
1963 lines = str.split('\n')
1964 # Only keep instructions and labels not starting with a dot.
1965 metadata = re.compile('^[ \t]*\\..*$')
1966 out = []
1967 for line in lines:
1968 # Drop metadata directives (e.g. ".type")
1969 if not metadata.match(line):
1970 line = re.sub('@plt', '', line)
1971 instr = line.lstrip().split()
1972 # Drop empty lines.
1973 if not instr:
1974 continue
1975 # Drop operands, except for call instructions.
1976 elif instr[0] == 'call':
1977 out.append(instr[0] + ' ' + instr[1])
1978 else:
1979 out.append(instr[0])
1980 out = '\n'.join(out)
1981 return out
1982
1983 def if_verbose( n, s ):
1984 if config.verbose >= n:
1985 print(s)
1986
1987 def dump_file(f):
1988 try:
1989 with io.open(f) as file:
1990 print(file.read())
1991 except Exception:
1992 print('')
1993
1994 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1995 timeout_prog = strip_quotes(config.timeout_prog)
1996 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1997
1998 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1999 cmd = cmd.format(**config.__dict__)
2000 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
2001
2002 stdin_file = io.open(stdin, 'rb') if stdin else None
2003 stdout_buffer = b''
2004 stderr_buffer = b''
2005
2006 hStdErr = subprocess.PIPE
2007 if stderr is subprocess.STDOUT:
2008 hStdErr = subprocess.STDOUT
2009
2010 try:
2011 # cmd is a complex command in Bourne-shell syntax
2012 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
2013 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
2014 # to invoke the Bourne shell
2015
2016 r = subprocess.Popen([timeout_prog, timeout, cmd],
2017 stdin=stdin_file,
2018 stdout=subprocess.PIPE,
2019 stderr=hStdErr,
2020 env=ghc_env)
2021
2022 stdout_buffer, stderr_buffer = r.communicate()
2023 finally:
2024 if stdin_file:
2025 stdin_file.close()
2026 if config.verbose >= 1 and print_output:
2027 if stdout_buffer:
2028 sys.stdout.buffer.write(stdout_buffer)
2029 if stderr_buffer:
2030 sys.stderr.buffer.write(stderr_buffer)
2031
2032 if stdout:
2033 with io.open(stdout, 'wb') as f:
2034 f.write(stdout_buffer)
2035 if stderr:
2036 if stderr is not subprocess.STDOUT:
2037 with io.open(stderr, 'wb') as f:
2038 f.write(stderr_buffer)
2039
2040 if r.returncode == 98:
2041 # The python timeout program uses 98 to signal that ^C was pressed
2042 stopNow()
2043 if r.returncode == 99 and getTestOpts().exit_code != 99:
2044 # Only print a message when timeout killed the process unexpectedly.
2045 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
2046 return r.returncode
2047
2048 # -----------------------------------------------------------------------------
2049 # checking if ghostscript is available for checking the output of hp2ps
2050
2051 def genGSCmd(psfile):
2052 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
2053
2054 def gsNotWorking():
2055 global gs_working
2056 print("GhostScript not available for hp2ps tests")
2057
2058 global gs_working
2059 gs_working = False
2060 if config.have_profiling:
2061 if config.gs != '':
2062 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
2063 if resultGood == 0:
2064 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
2065 ' >/dev/null 2>&1')
2066 if resultBad != 0:
2067 print("GhostScript available for hp2ps tests")
2068 gs_working = True
2069 else:
2070 gsNotWorking();
2071 else:
2072 gsNotWorking();
2073 else:
2074 gsNotWorking();
2075
2076 def add_suffix( name, suffix ):
2077 if suffix == '':
2078 return name
2079 else:
2080 return name + '.' + suffix
2081
2082 def add_hs_lhs_suffix(name):
2083 if getTestOpts().c_src:
2084 return add_suffix(name, 'c')
2085 elif getTestOpts().cmm_src:
2086 return add_suffix(name, 'cmm')
2087 elif getTestOpts().objc_src:
2088 return add_suffix(name, 'm')
2089 elif getTestOpts().objcpp_src:
2090 return add_suffix(name, 'mm')
2091 elif getTestOpts().literate:
2092 return add_suffix(name, 'lhs')
2093 else:
2094 return add_suffix(name, 'hs')
2095
2096 def replace_suffix( name, suffix ):
2097 base, suf = os.path.splitext(name)
2098 return base + '.' + suffix
2099
2100 def in_testdir(name, suffix=''):
2101 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2102
2103 def in_srcdir(name, suffix=''):
2104 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2105
2106 # Finding the sample output. The filename is of the form
2107 #
2108 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
2109 #
2110 def find_expected_file(name, suff):
2111 basename = add_suffix(name, suff)
2112 # Override the basename if the user has specified one, this will then be
2113 # subjected to the same name mangling scheme as normal to allow platform
2114 # specific overrides to work.
2115 basename = getTestOpts().use_specs.get (suff, basename)
2116
2117 files = [basename + ws + plat
2118 for plat in ['-' + config.platform, '-' + config.os, '']
2119 for ws in ['-ws-' + config.wordsize, '']]
2120
2121 for f in files:
2122 if os.path.exists(in_srcdir(f)):
2123 return f
2124
2125 return basename
2126
2127 if config.msys:
2128 import stat
2129 def cleanup():
2130 testdir = getTestOpts().testdir
2131 max_attempts = 5
2132 retries = max_attempts
2133 def on_error(function, path, excinfo):
2134 # At least one test (T11489) removes the write bit from a file it
2135 # produces. Windows refuses to delete read-only files with a
2136 # permission error. Try setting the write bit and try again.
2137 os.chmod(path, stat.S_IWRITE)
2138 function(path)
2139
2140 # On Windows we have to retry the delete a couple of times.
2141 # The reason for this is that a FileDelete command just marks a
2142 # file for deletion. The file is really only removed when the last
2143 # handle to the file is closed. Unfortunately there are a lot of
2144 # system services that can have a file temporarily opened using a shared
2145 # readonly lock, such as the built in AV and search indexer.
2146 #
2147 # We can't really guarantee that these are all off, so what we can do is
2148 # whenever after a rmtree the folder still exists to try again and wait a bit.
2149 #
2150 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2151 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2152 # still locked then abort the current test by throwing an exception, this so it won't fail
2153 # with an even more cryptic error.
2154 #
2155 # See #13162
2156 exception = None
2157 while retries > 0 and os.path.exists(testdir):
2158 time.sleep((max_attempts-retries)*6)
2159 try:
2160 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2161 except Exception as e:
2162 exception = e
2163 retries -= 1
2164
2165 if retries == 0 and os.path.exists(testdir):
2166 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2167 % (testdir, exception))
2168 else:
2169 def cleanup():
2170 testdir = getTestOpts().testdir
2171 if os.path.exists(testdir):
2172 shutil.rmtree(testdir, ignore_errors=False)
2173
2174
2175 # -----------------------------------------------------------------------------
2176 # Return a list of all the files ending in '.T' below directories roots.
2177
2178 def findTFiles(roots):
2179 for root in roots:
2180 for path, dirs, files in os.walk(root, topdown=True):
2181 # Never pick up .T files in uncleaned .run directories.
2182 dirs[:] = [dir for dir in sorted(dirs)
2183 if not dir.endswith(testdir_suffix)]
2184 for filename in files:
2185 if filename.endswith('.T'):
2186 yield os.path.join(path, filename)
2187
2188 # -----------------------------------------------------------------------------
2189 # Output a test summary to the specified file object
2190
2191 def summary(t, file, short=False, color=False):
2192
2193 file.write('\n')
2194 printUnexpectedTests(file,
2195 [t.unexpected_passes, t.unexpected_failures,
2196 t.unexpected_stat_failures, t.framework_failures])
2197
2198 if short:
2199 # Only print the list of unexpected tests above.
2200 return
2201
2202 colorize = lambda s: s
2203 if color:
2204 if len(t.unexpected_failures) > 0 or \
2205 len(t.unexpected_stat_failures) > 0 or \
2206 len(t.unexpected_passes) > 0 or \
2207 len(t.framework_failures) > 0:
2208 colorize = str_fail
2209 else:
2210 colorize = str_pass
2211
2212 file.write(colorize('SUMMARY') + ' for test run started at '
2213 + time.strftime("%c %Z", t.start_time) + '\n'
2214 + str(datetime.timedelta(seconds=
2215 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2216 + ' spent to go through\n'
2217 + repr(t.total_tests).rjust(8)
2218 + ' total tests, which gave rise to\n'
2219 + repr(t.total_test_cases).rjust(8)
2220 + ' test cases, of which\n'
2221 + repr(t.n_tests_skipped).rjust(8)
2222 + ' were skipped\n'
2223 + '\n'
2224 + repr(len(t.missing_libs)).rjust(8)
2225 + ' had missing libraries\n'
2226 + repr(t.n_expected_passes).rjust(8)
2227 + ' expected passes\n'
2228 + repr(t.n_expected_failures).rjust(8)
2229 + ' expected failures\n'
2230 + '\n'
2231 + repr(len(t.framework_failures)).rjust(8)
2232 + ' caused framework failures\n'
2233 + repr(len(t.framework_warnings)).rjust(8)
2234 + ' caused framework warnings\n'
2235 + repr(len(t.unexpected_passes)).rjust(8)
2236 + ' unexpected passes\n'
2237 + repr(len(t.unexpected_failures)).rjust(8)
2238 + ' unexpected failures\n'
2239 + repr(len(t.unexpected_stat_failures)).rjust(8)
2240 + ' unexpected stat failures\n'
2241 + '\n')
2242
2243 if t.unexpected_passes:
2244 file.write('Unexpected passes:\n')
2245 printTestInfosSummary(file, t.unexpected_passes)
2246
2247 if t.unexpected_failures:
2248 file.write('Unexpected failures:\n')
2249 printTestInfosSummary(file, t.unexpected_failures)
2250
2251 if t.unexpected_stat_failures:
2252 file.write('Unexpected stat failures:\n')
2253 printTestInfosSummary(file, t.unexpected_stat_failures)
2254
2255 if t.framework_failures:
2256 file.write('Framework failures:\n')
2257 printTestInfosSummary(file, t.framework_failures)
2258
2259 if t.framework_warnings:
2260 file.write('Framework warnings:\n')
2261 printTestInfosSummary(file, t.framework_warnings)
2262
2263 if stopping():
2264 file.write('WARNING: Testsuite run was terminated early\n')
2265
2266 def printUnexpectedTests(file, testInfoss):
2267 unexpected = set(result.testname
2268 for testInfos in testInfoss
2269 for result in testInfos
2270 if not result.testname.endswith('.T'))
2271 if unexpected:
2272 file.write('Unexpected results from:\n')
2273 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2274 file.write('\n')
2275
2276 def printTestInfosSummary(file, testInfos):
2277 maxDirLen = max(len(tr.directory) for tr in testInfos)
2278 for result in sorted(testInfos, key=lambda r: (r.testname.lower(), r.way, r.directory)):
2279 directory = result.directory.ljust(maxDirLen)
2280 file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
2281 r = result,
2282 directory = directory))
2283 file.write('\n')
2284
2285 def modify_lines(s, f):
2286 s = '\n'.join([f(l) for l in s.splitlines()])
2287 if s and s[-1] != '\n':
2288 # Prevent '\ No newline at end of file' warnings when diffing.
2289 s += '\n'
2290 return s