acd7e1ab9d1b5e15bd177d715dc31b0af79879ab
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 import io
7 import shutil
8 import os
9 import re
10 import traceback
11 import time
12 import datetime
13 import copy
14 import glob
15 import sys
16 from math import ceil, trunc
17 from pathlib import PurePath
18 import collections
19 import subprocess
20
21 from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
22 from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass, testing_metrics
23 from cpu_features import have_cpu_feature
24 import perf_notes as Perf
25 from perf_notes import MetricChange
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 global pool_sema
29 if config.use_threads:
30 import threading
31 pool_sema = threading.BoundedSemaphore(value=config.threads)
32
33 global wantToStop
34 wantToStop = False
35
36 def stopNow():
37 global wantToStop
38 wantToStop = True
39
40 def stopping():
41 return wantToStop
42
43
44 # Options valid for the current test only (these get reset to
45 # testdir_testopts after each test).
46
47 global testopts_local
48 if config.use_threads:
49 testopts_local = threading.local()
50 else:
51 class TestOpts_Local:
52 pass
53 testopts_local = TestOpts_Local()
54
55 def getTestOpts():
56 return testopts_local.x
57
58 def setLocalTestOpts(opts):
59 global testopts_local
60 testopts_local.x=opts
61
62 def isCompilerStatsTest():
63 opts = getTestOpts()
64 return bool(opts.is_compiler_stats_test)
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return opts.is_stats_test
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86 #
87 # type TestOpt = (name :: String, opts :: Object) -> IO ()
88
89 def normal( name, opts ):
90 return;
91
92 def skip( name, opts ):
93 opts.skip = True
94
95 def expect_fail( name, opts ):
96 # The compiler, testdriver, OS or platform is missing a certain
97 # feature, and we don't plan to or can't fix it now or in the
98 # future.
99 opts.expect = 'fail';
100
101 def reqlib( lib ):
102 return lambda name, opts, l=lib: _reqlib (name, opts, l )
103
104 def stage1(name, opts):
105 # See Note [Why is there no stage1 setup function?]
106 framework_fail(name, 'stage1 setup function does not exist',
107 'add your test to testsuite/tests/stage1 instead')
108
109 # Note [Why is there no stage1 setup function?]
110 #
111 # Presumably a stage1 setup function would signal that the stage1
112 # compiler should be used to compile a test.
113 #
114 # Trouble is, the path to the compiler + the `ghc --info` settings for
115 # that compiler are currently passed in from the `make` part of the
116 # testsuite driver.
117 #
118 # Switching compilers in the Python part would be entirely too late, as
119 # all ghc_with_* settings would be wrong. See config/ghc for possible
120 # consequences (for example, config.run_ways would still be
121 # based on the default compiler, quite likely causing ./validate --slow
122 # to fail).
123 #
124 # It would be possible to let the Python part of the testsuite driver
125 # make the call to `ghc --info`, but doing so would require quite some
126 # work. Care has to be taken to not affect the run_command tests for
127 # example, as they also use the `ghc --info` settings:
128 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
129 #
130 # If you want a test to run using the stage1 compiler, add it to the
131 # testsuite/tests/stage1 directory. Validate runs the tests in that
132 # directory with `make stage=1`.
133
134 # Cache the results of looking to see if we have a library or not.
135 # This makes quite a difference, especially on Windows.
136 have_lib_cache = {}
137
138 def have_library(lib):
139 """ Test whether the given library is available """
140 if lib in have_lib_cache:
141 got_it = have_lib_cache[lib]
142 else:
143 cmd = strip_quotes(config.ghc_pkg)
144 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
145 stdout=subprocess.PIPE,
146 stderr=subprocess.PIPE,
147 env=ghc_env)
148 # read from stdout and stderr to avoid blocking due to
149 # buffers filling
150 p.communicate()
151 r = p.wait()
152 got_it = r == 0
153 have_lib_cache[lib] = got_it
154
155 return got_it
156
157 def _reqlib( name, opts, lib ):
158 if not have_library(lib):
159 opts.expect = 'missing-lib'
160
161 def req_haddock( name, opts ):
162 if not config.haddock:
163 opts.expect = 'missing-lib'
164
165 def req_profiling( name, opts ):
166 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
167 if not config.have_profiling:
168 opts.expect = 'fail'
169
170 def req_shared_libs( name, opts ):
171 if not config.have_shared_libs:
172 opts.expect = 'fail'
173
174 def req_interp( name, opts ):
175 if not config.have_interp:
176 opts.expect = 'fail'
177
178 def req_smp( name, opts ):
179 if not config.have_smp:
180 opts.expect = 'fail'
181
182 def ignore_stdout(name, opts):
183 opts.ignore_stdout = True
184
185 def ignore_stderr(name, opts):
186 opts.ignore_stderr = True
187
188 def combined_output( name, opts ):
189 opts.combined_output = True
190
191 def use_specs( specs ):
192 """
193 use_specs allows one to override files based on suffixes. e.g. 'stdout',
194 'stderr', 'asm', 'prof.sample', etc.
195
196 Example use_specs({'stdout' : 'prof002.stdout'}) to make the test re-use
197 prof002.stdout.
198
199 Full Example:
200 test('T5889', [only_ways(['normal']), req_profiling,
201 extra_files(['T5889/A.hs', 'T5889/B.hs']),
202 use_specs({'stdout' : 'prof002.stdout'})],
203 multimod_compile,
204 ['A B', '-O -prof -fno-prof-count-entries -v0'])
205
206 """
207 return lambda name, opts, s=specs: _use_specs( name, opts, s )
208
209 def _use_specs( name, opts, specs ):
210 opts.extra_files.extend(specs.values ())
211 opts.use_specs = specs
212
213 # -----
214
215 def expect_fail_for( ways ):
216 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
217
218 def _expect_fail_for( name, opts, ways ):
219 opts.expect_fail_for = ways
220
221 def expect_broken( bug ):
222 # This test is a expected not to work due to the indicated trac bug
223 # number.
224 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
225
226 def _expect_broken( name, opts, bug ):
227 record_broken(name, opts, bug)
228 opts.expect = 'fail';
229
230 def expect_broken_for( bug, ways ):
231 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
232
233 def _expect_broken_for( name, opts, bug, ways ):
234 record_broken(name, opts, bug)
235 opts.expect_fail_for = ways
236
237 def record_broken(name, opts, bug):
238 me = (bug, opts.testdir, name)
239 if not me in brokens:
240 brokens.append(me)
241
242 def _expect_pass(way):
243 # Helper function. Not intended for use in .T files.
244 opts = getTestOpts()
245 return opts.expect == 'pass' and way not in opts.expect_fail_for
246
247 # -----
248
249 def fragile( bug ):
250 """
251 Indicates that the test should be skipped due to fragility documented in
252 the given ticket.
253 """
254 def helper( name, opts, bug=bug ):
255 record_broken(name, opts, bug)
256 opts.skip = True
257
258 return helper
259
260 def fragile_for( name, opts, bug, ways ):
261 """
262 Indicates that the test should be skipped due to fragility in the given
263 test ways as documented in the given ticket.
264 """
265 def helper( name, opts, bug=bug, ways=ways ):
266 record_broken(name, opts, bug)
267 opts.omit_ways = ways
268
269 return helper
270
271 # -----
272
273 def omit_ways( ways ):
274 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
275
276 def _omit_ways( name, opts, ways ):
277 opts.omit_ways = ways
278
279 # -----
280
281 def only_ways( ways ):
282 return lambda name, opts, w=ways: _only_ways( name, opts, w )
283
284 def _only_ways( name, opts, ways ):
285 opts.only_ways = ways
286
287 # -----
288
289 def extra_ways( ways ):
290 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
291
292 def _extra_ways( name, opts, ways ):
293 opts.extra_ways = ways
294
295 # -----
296
297 def set_stdin( file ):
298 return lambda name, opts, f=file: _set_stdin(name, opts, f);
299
300 def _set_stdin( name, opts, f ):
301 opts.stdin = f
302
303 # -----
304
305 def exit_code( val ):
306 return lambda name, opts, v=val: _exit_code(name, opts, v);
307
308 def _exit_code( name, opts, v ):
309 opts.exit_code = v
310
311 def signal_exit_code( val ):
312 if opsys('solaris2'):
313 return exit_code( val )
314 else:
315 # When application running on Linux receives fatal error
316 # signal, then its exit code is encoded as 128 + signal
317 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
318 # I assume that Mac OS X behaves in the same way at least Mac
319 # OS X builder behavior suggests this.
320 return exit_code( val+128 )
321
322 # -----
323
324 def compile_timeout_multiplier( val ):
325 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
326
327 def _compile_timeout_multiplier( name, opts, v ):
328 opts.compile_timeout_multiplier = v
329
330 def run_timeout_multiplier( val ):
331 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
332
333 def _run_timeout_multiplier( name, opts, v ):
334 opts.run_timeout_multiplier = v
335
336 # -----
337
338 def extra_run_opts( val ):
339 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
340
341 def _extra_run_opts( name, opts, v ):
342 opts.extra_run_opts = v
343
344 # -----
345
346 def extra_hc_opts( val ):
347 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
348
349 def _extra_hc_opts( name, opts, v ):
350 opts.extra_hc_opts = v
351
352 # -----
353
354 def extra_clean( files ):
355 # TODO. Remove all calls to extra_clean.
356 return lambda _name, _opts: None
357
358 def extra_files(files):
359 return lambda name, opts: _extra_files(name, opts, files)
360
361 def _extra_files(name, opts, files):
362 opts.extra_files.extend(files)
363
364 # -----
365
366 # Defaults to "test everything, and only break on extreme cases"
367 #
368 # The inputs to this function are slightly interesting:
369 # metric can be either:
370 # - 'all', in which case all 3 possible metrics are collected and compared.
371 # - The specific metric one wants to use in the test.
372 # - A list of the metrics one wants to use in the test.
373 #
374 # Deviation defaults to 20% because the goal is correctness over performance.
375 # The testsuite should avoid breaking when there is not an actual error.
376 # Instead, the testsuite should notify of regressions in a non-breaking manner.
377 #
378 # collect_compiler_stats is used when the metrics collected are about the compiler.
379 # collect_stats is used in the majority case when the metrics to be collected
380 # are about the performance of the runtime code generated by the compiler.
381 def collect_compiler_stats(metric='all',deviation=20):
382 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
383
384 def collect_stats(metric='all', deviation=20):
385 return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
386
387 # This is an internal function that is used only in the implementation.
388 # 'is_compiler_stats_test' is somewhat of an unfortunate name.
389 # If the boolean is set to true, it indicates that this test is one that
390 # measures the performance numbers of the compiler.
391 # As this is a fairly rare case in the testsuite, it defaults to false to
392 # indicate that it is a 'normal' performance test.
393 def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False):
394 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
395 failBecause('This test has an invalid name.')
396
397 # Normalize metrics to a list of strings.
398 if isinstance(metrics, str):
399 if metrics == 'all':
400 metrics = testing_metrics()
401 else:
402 metrics = [metrics]
403
404 opts.is_stats_test = True
405 if is_compiler_stats_test:
406 opts.is_compiler_stats_test = True
407
408 # Compiler performance numbers change when debugging is on, making the results
409 # useless and confusing. Therefore, skip if debugging is on.
410 if config.compiler_debugged and is_compiler_stats_test:
411 opts.skip = 1
412
413 for metric in metrics:
414 def baselineByWay(way, target_commit, metric=metric):
415 return Perf.baseline_metric( \
416 target_commit, name, config.test_env, metric, way)
417
418 opts.stats_range_fields[metric] = (baselineByWay, deviation)
419
420 # -----
421
422 def when(b, f):
423 # When list_brokens is on, we want to see all expect_broken calls,
424 # so we always do f
425 if b or config.list_broken:
426 return f
427 else:
428 return normal
429
430 def unless(b, f):
431 return when(not b, f)
432
433 def doing_ghci():
434 return 'ghci' in config.run_ways
435
436 def ghc_dynamic():
437 return config.ghc_dynamic
438
439 def fast():
440 return config.speed == 2
441
442 def platform( plat ):
443 return config.platform == plat
444
445 def opsys( os ):
446 return config.os == os
447
448 def arch( arch ):
449 return config.arch == arch
450
451 def wordsize( ws ):
452 return config.wordsize == str(ws)
453
454 def msys( ):
455 return config.msys
456
457 def cygwin( ):
458 return config.cygwin
459
460 def have_vanilla( ):
461 return config.have_vanilla
462
463 def have_ncg( ):
464 return config.have_ncg
465
466 def have_dynamic( ):
467 return config.have_dynamic
468
469 def have_profiling( ):
470 return config.have_profiling
471
472 def in_tree_compiler( ):
473 return config.in_tree_compiler
474
475 def unregisterised( ):
476 return config.unregisterised
477
478 def compiler_profiled( ):
479 return config.compiler_profiled
480
481 def compiler_debugged( ):
482 return config.compiler_debugged
483
484 def have_gdb( ):
485 return config.have_gdb
486
487 def have_readelf( ):
488 return config.have_readelf
489
490 def integer_gmp( ):
491 return have_library("integer-gmp")
492
493 def integer_simple( ):
494 return have_library("integer-simple")
495
496 def llvm_build ( ):
497 return config.ghc_built_by_llvm
498
499 # ---
500
501 def high_memory_usage(name, opts):
502 opts.alone = True
503
504 # If a test is for a multi-CPU race, then running the test alone
505 # increases the chance that we'll actually see it.
506 def multi_cpu_race(name, opts):
507 opts.alone = True
508
509 # ---
510 def literate( name, opts ):
511 opts.literate = True
512
513 def c_src( name, opts ):
514 opts.c_src = True
515
516 def objc_src( name, opts ):
517 opts.objc_src = True
518
519 def objcpp_src( name, opts ):
520 opts.objcpp_src = True
521
522 def cmm_src( name, opts ):
523 opts.cmm_src = True
524
525 def outputdir( odir ):
526 return lambda name, opts, d=odir: _outputdir(name, opts, d)
527
528 def _outputdir( name, opts, odir ):
529 opts.outputdir = odir;
530
531 # ----
532
533 def pre_cmd( cmd ):
534 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
535
536 def _pre_cmd( name, opts, cmd ):
537 opts.pre_cmd = cmd
538
539 # ----
540
541 def cmd_prefix( prefix ):
542 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
543
544 def _cmd_prefix( name, opts, prefix ):
545 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
546
547 # ----
548
549 def cmd_wrapper( fun ):
550 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
551
552 def _cmd_wrapper( name, opts, fun ):
553 opts.cmd_wrapper = fun
554
555 # ----
556
557 def compile_cmd_prefix( prefix ):
558 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
559
560 def _compile_cmd_prefix( name, opts, prefix ):
561 opts.compile_cmd_prefix = prefix
562
563 # ----
564
565 def check_stdout( f ):
566 return lambda name, opts, f=f: _check_stdout(name, opts, f)
567
568 def _check_stdout( name, opts, f ):
569 opts.check_stdout = f
570
571 def no_check_hp(name, opts):
572 opts.check_hp = False
573
574 # ----
575
576 def filter_stdout_lines( regex ):
577 """ Filter lines of stdout with the given regular expression """
578 def f( name, opts ):
579 _normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
580 return f
581
582 def normalise_slashes( name, opts ):
583 _normalise_fun(name, opts, normalise_slashes_)
584
585 def normalise_exe( name, opts ):
586 _normalise_fun(name, opts, normalise_exe_)
587
588 def normalise_fun( *fs ):
589 return lambda name, opts: _normalise_fun(name, opts, fs)
590
591 def _normalise_fun( name, opts, *fs ):
592 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
593
594 def normalise_errmsg_fun( *fs ):
595 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
596
597 def _normalise_errmsg_fun( name, opts, *fs ):
598 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
599
600 def check_errmsg(needle):
601 def norm(str):
602 if needle in str:
603 return "%s contained in -ddump-simpl\n" % needle
604 else:
605 return "%s not contained in -ddump-simpl\n" % needle
606 return normalise_errmsg_fun(norm)
607
608 def grep_errmsg(needle):
609 def norm(str):
610 return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
611 return normalise_errmsg_fun(norm)
612
613 def normalise_whitespace_fun(f):
614 return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
615
616 def _normalise_whitespace_fun(name, opts, f):
617 opts.whitespace_normaliser = f
618
619 def normalise_version_( *pkgs ):
620 def normalise_version__( str ):
621 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
622 '\\1-<VERSION>', str)
623 return normalise_version__
624
625 def normalise_version( *pkgs ):
626 def normalise_version__( name, opts ):
627 _normalise_fun(name, opts, normalise_version_(*pkgs))
628 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
629 return normalise_version__
630
631 def normalise_drive_letter(name, opts):
632 # Windows only. Change D:\\ to C:\\.
633 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
634
635 def keep_prof_callstacks(name, opts):
636 """Keep profiling callstacks.
637
638 Use together with `only_ways(prof_ways)`.
639 """
640 opts.keep_prof_callstacks = True
641
642 def join_normalisers(*a):
643 """
644 Compose functions, flattening sequences.
645
646 join_normalisers(f1,[f2,f3],f4)
647
648 is the same as
649
650 lambda x: f1(f2(f3(f4(x))))
651 """
652
653 def flatten(l):
654 """
655 Taken from http://stackoverflow.com/a/2158532/946226
656 """
657 for el in l:
658 if (isinstance(el, collections.Iterable)
659 and not isinstance(el, (bytes, str))):
660 for sub in flatten(el):
661 yield sub
662 else:
663 yield el
664
665 a = flatten(a)
666
667 fn = lambda x:x # identity function
668 for f in a:
669 assert callable(f)
670 fn = lambda x,f=f,fn=fn: fn(f(x))
671 return fn
672
673 # ----
674 # Function for composing two opt-fns together
675
676 def executeSetups(fs, name, opts):
677 if type(fs) is list:
678 # If we have a list of setups, then execute each one
679 for f in fs:
680 executeSetups(f, name, opts)
681 else:
682 # fs is a single function, so just apply it
683 fs(name, opts)
684
685 # -----------------------------------------------------------------------------
686 # The current directory of tests
687
688 def newTestDir(tempdir, dir):
689
690 global thisdir_settings
691 # reset the options for this test directory
692 def settings(name, opts, tempdir=tempdir, dir=dir):
693 return _newTestDir(name, opts, tempdir, dir)
694 thisdir_settings = settings
695
696 # Should be equal to entry in toplevel .gitignore.
697 testdir_suffix = '.run'
698
699 def _newTestDir(name, opts, tempdir, dir):
700 testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
701 opts.srcdir = os.path.join(os.getcwd(), dir)
702 opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
703 opts.compiler_always_flags = config.compiler_always_flags
704
705 # -----------------------------------------------------------------------------
706 # Actually doing tests
707
708 parallelTests = []
709 aloneTests = []
710 allTestNames = set([])
711
712 def runTest(watcher, opts, name, func, args):
713 if config.use_threads:
714 pool_sema.acquire()
715 t = threading.Thread(target=test_common_thread,
716 name=name,
717 args=(watcher, name, opts, func, args))
718 t.daemon = False
719 t.start()
720 else:
721 test_common_work(watcher, name, opts, func, args)
722
723 # name :: String
724 # setup :: [TestOpt] -> IO ()
725 def test(name, setup, func, args):
726 global aloneTests
727 global parallelTests
728 global allTestNames
729 global thisdir_settings
730 if name in allTestNames:
731 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
732 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
733 framework_fail(name, 'bad_name', 'This test has an invalid name')
734
735 if config.run_only_some_tests:
736 if name not in config.only:
737 return
738 else:
739 # Note [Mutating config.only]
740 # config.only is initially the set of tests requested by
741 # the user (via 'make TEST='). We then remove all tests that
742 # we've already seen (in .T files), so that we can later
743 # report on any tests we couldn't find and error out.
744 config.only.remove(name)
745
746 # Make a deep copy of the default_testopts, as we need our own copy
747 # of any dictionaries etc inside it. Otherwise, if one test modifies
748 # them, all tests will see the modified version!
749 myTestOpts = copy.deepcopy(default_testopts)
750
751 executeSetups([thisdir_settings, setup], name, myTestOpts)
752
753 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
754 if myTestOpts.alone:
755 aloneTests.append(thisTest)
756 else:
757 parallelTests.append(thisTest)
758 allTestNames.add(name)
759
760 if config.use_threads:
761 def test_common_thread(watcher, name, opts, func, args):
762 try:
763 test_common_work(watcher, name, opts, func, args)
764 finally:
765 pool_sema.release()
766
767 def get_package_cache_timestamp():
768 if config.package_conf_cache_file == '':
769 return 0.0
770 else:
771 try:
772 return os.stat(config.package_conf_cache_file).st_mtime
773 except:
774 return 0.0
775
776 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
777
778 def test_common_work(watcher, name, opts, func, args):
779 try:
780 t.total_tests += 1
781 setLocalTestOpts(opts)
782
783 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
784
785 # All the ways we might run this test
786 if func == compile or func == multimod_compile:
787 all_ways = config.compile_ways
788 elif func == compile_and_run or func == multimod_compile_and_run:
789 all_ways = config.run_ways
790 elif func == ghci_script:
791 if 'ghci' in config.run_ways:
792 all_ways = ['ghci']
793 else:
794 all_ways = []
795 else:
796 all_ways = ['normal']
797
798 # A test itself can request extra ways by setting opts.extra_ways
799 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
800
801 t.total_test_cases += len(all_ways)
802
803 ok_way = lambda way: \
804 not getTestOpts().skip \
805 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
806 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
807 and (not (config.skip_perf_tests and isStatsTest())) \
808 and (not (config.only_perf_tests and not isStatsTest())) \
809 and way not in getTestOpts().omit_ways
810
811 # Which ways we are asked to skip
812 do_ways = list(filter (ok_way,all_ways))
813
814 # Only run all ways in slow mode.
815 # See Note [validate and testsuite speed] in toplevel Makefile.
816 if config.accept:
817 # Only ever run one way
818 do_ways = do_ways[:1]
819 elif config.speed > 0:
820 # However, if we EXPLICITLY asked for a way (with extra_ways)
821 # please test it!
822 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
823 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
824 do_ways = other_ways[:1] + explicit_ways
825
826 # Find all files in the source directory that this test
827 # depends on. Do this only once for all ways.
828 # Generously add all filenames that start with the name of
829 # the test to this set, as a convenience to test authors.
830 # They will have to use the `extra_files` setup function to
831 # specify all other files that their test depends on (but
832 # this seems to be necessary for only about 10% of all
833 # tests).
834 files = set(f for f in os.listdir(opts.srcdir)
835 if f.startswith(name) and not f == name and
836 not f.endswith(testdir_suffix) and
837 not os.path.splitext(f)[1] in do_not_copy)
838 for filename in (opts.extra_files + extra_src_files.get(name, [])):
839 if filename.startswith('/'):
840 framework_fail(name, 'whole-test',
841 'no absolute paths in extra_files please: ' + filename)
842
843 elif '*' in filename:
844 # Don't use wildcards in extra_files too much, as
845 # globbing is slow.
846 files.update((os.path.relpath(f, opts.srcdir)
847 for f in glob.iglob(in_srcdir(filename))))
848
849 elif filename:
850 files.add(filename)
851
852 else:
853 framework_fail(name, 'whole-test', 'extra_file is empty string')
854
855 # Run the required tests...
856 for way in do_ways:
857 if stopping():
858 break
859 try:
860 do_test(name, way, func, args, files)
861 except KeyboardInterrupt:
862 stopNow()
863 except Exception as e:
864 framework_fail(name, way, str(e))
865 traceback.print_exc()
866
867 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
868
869 if config.cleanup and do_ways:
870 try:
871 cleanup()
872 except Exception as e:
873 framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
874
875 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
876
877 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
878 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
879
880 except Exception as e:
881 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
882 finally:
883 watcher.notify()
884
885 def do_test(name, way, func, args, files):
886 opts = getTestOpts()
887
888 full_name = name + '(' + way + ')'
889
890 progress_args = [ full_name, t.total_tests, len(allTestNames),
891 [len(t.unexpected_passes),
892 len(t.unexpected_failures),
893 len(t.framework_failures)]]
894 if_verbose(2, "=====> {0} {1} of {2} {3}".format(*progress_args))
895
896 # Update terminal title
897 # useful progress indicator even when make test VERBOSE=1
898 if config.supports_colors:
899 print("\033]0;{0} {1} of {2} {3}\007".format(*progress_args), end="")
900 sys.stdout.flush()
901
902 # Clean up prior to the test, so that we can't spuriously conclude
903 # that it passed on the basis of old run outputs.
904 cleanup()
905 os.makedirs(opts.testdir)
906
907 # Link all source files for this test into a new directory in
908 # /tmp, and run the test in that directory. This makes it
909 # possible to run tests in parallel, without modification, that
910 # would otherwise (accidentally) write to the same output file.
911 # It also makes it easier to keep the testsuite clean.
912
913 for extra_file in files:
914 src = in_srcdir(extra_file)
915 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
916 if os.path.isfile(src):
917 link_or_copy_file(src, dst)
918 elif os.path.isdir(src):
919 if os.path.exists(dst):
920 shutil.rmtree(dst)
921 os.mkdir(dst)
922 lndir(src, dst)
923 else:
924 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
925 # When using a ghc built without haddock support, .t
926 # files are rightfully missing. Don't
927 # framework_fail. Test will be skipped later.
928 pass
929 else:
930 framework_fail(name, way,
931 'extra_file does not exist: ' + extra_file)
932
933 if func.__name__ == 'run_command' or func.__name__ == 'makefile_test' or opts.pre_cmd:
934 # When running 'MAKE' make sure 'TOP' still points to the
935 # root of the testsuite.
936 src_makefile = in_srcdir('Makefile')
937 dst_makefile = in_testdir('Makefile')
938 if os.path.exists(src_makefile):
939 with io.open(src_makefile, 'r', encoding='utf8') as src:
940 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
941 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
942 dst.write(makefile)
943
944 if opts.pre_cmd:
945 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
946 stderr = subprocess.STDOUT,
947 print_output = config.verbose >= 3)
948
949 # If user used expect_broken then don't record failures of pre_cmd
950 if exit_code != 0 and opts.expect not in ['fail']:
951 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
952 if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
953
954 result = func(*[name,way] + args)
955
956 if opts.expect not in ['pass', 'fail', 'missing-lib']:
957 framework_fail(name, way, 'bad expected ' + opts.expect)
958
959 try:
960 passFail = result['passFail']
961 except (KeyError, TypeError):
962 passFail = 'No passFail found'
963
964 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
965
966 if passFail == 'pass':
967 if _expect_pass(way):
968 t.expected_passes.append(TestResult(directory, name, "", way))
969 t.n_expected_passes += 1
970 else:
971 if_verbose(1, '*** unexpected pass for %s' % full_name)
972 t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
973 elif passFail == 'fail':
974 if _expect_pass(way):
975 reason = result['reason']
976 tag = result.get('tag')
977 if tag == 'stat':
978 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
979 t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
980 else:
981 if_verbose(1, '*** unexpected failure for %s' % full_name)
982 result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
983 t.unexpected_failures.append(result)
984 else:
985 if opts.expect == 'missing-lib':
986 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
987 else:
988 t.n_expected_failures += 1
989 else:
990 framework_fail(name, way, 'bad result ' + passFail)
991
992 # Make is often invoked with -s, which means if it fails, we get
993 # no feedback at all. This is annoying. So let's remove the option
994 # if found and instead have the testsuite decide on what to do
995 # with the output.
996 def override_options(pre_cmd):
997 if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
998 return pre_cmd.replace('-s' , '') \
999 .replace('--silent', '') \
1000 .replace('--quiet' , '')
1001
1002 return pre_cmd
1003
1004 def framework_fail(name, way, reason):
1005 opts = getTestOpts()
1006 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1007 full_name = name + '(' + way + ')'
1008 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
1009 t.framework_failures.append(TestResult(directory, name, reason, way))
1010
1011 def framework_warn(name, way, reason):
1012 opts = getTestOpts()
1013 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
1014 full_name = name + '(' + way + ')'
1015 if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
1016 t.framework_warnings.append(TestResult(directory, name, reason, way))
1017
1018 def badResult(result):
1019 try:
1020 if result['passFail'] == 'pass':
1021 return False
1022 return True
1023 except (KeyError, TypeError):
1024 return True
1025
1026 # -----------------------------------------------------------------------------
1027 # Generic command tests
1028
1029 # A generic command test is expected to run and exit successfully.
1030 #
1031 # The expected exit code can be changed via exit_code() as normal, and
1032 # the expected stdout/stderr are stored in <testname>.stdout and
1033 # <testname>.stderr. The output of the command can be ignored
1034 # altogether by using the setup function ignore_stdout instead of
1035 # run_command.
1036
1037 def run_command( name, way, cmd ):
1038 return simple_run( name, '', override_options(cmd), '' )
1039
1040 def makefile_test( name, way, target=None ):
1041 if target is None:
1042 target = name
1043
1044 cmd = '$MAKE -s --no-print-directory {target}'.format(target=target)
1045 return run_command(name, way, cmd)
1046
1047 # -----------------------------------------------------------------------------
1048 # GHCi tests
1049
1050 def ghci_script( name, way, script):
1051 flags = ' '.join(get_compiler_flags())
1052 way_flags = ' '.join(config.way_flags[way])
1053
1054 # We pass HC and HC_OPTS as environment variables, so that the
1055 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1056 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
1057 ).format(flags=flags, way_flags=way_flags)
1058 # NB: put way_flags before flags so that flags in all.T can overrie others
1059
1060 getTestOpts().stdin = script
1061 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1062
1063 # -----------------------------------------------------------------------------
1064 # Compile-only tests
1065
1066 def compile( name, way, extra_hc_opts ):
1067 return do_compile( name, way, 0, '', [], extra_hc_opts )
1068
1069 def compile_fail( name, way, extra_hc_opts ):
1070 return do_compile( name, way, 1, '', [], extra_hc_opts )
1071
1072 def backpack_typecheck( name, way, extra_hc_opts ):
1073 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1074
1075 def backpack_typecheck_fail( name, way, extra_hc_opts ):
1076 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
1077
1078 def backpack_compile( name, way, extra_hc_opts ):
1079 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
1080
1081 def backpack_compile_fail( name, way, extra_hc_opts ):
1082 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
1083
1084 def backpack_run( name, way, extra_hc_opts ):
1085 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
1086
1087 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1088 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1089
1090 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1091 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1092
1093 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1094 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1095
1096 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1097 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1098
1099 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
1100 # print 'Compile only, extra args = ', extra_hc_opts
1101
1102 result = extras_build( way, extra_mods, extra_hc_opts )
1103 if badResult(result):
1104 return result
1105 extra_hc_opts = result['hc_opts']
1106
1107 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
1108
1109 if badResult(result):
1110 return result
1111
1112 # the actual stderr should always match the expected, regardless
1113 # of whether we expected the compilation to fail or not (successful
1114 # compilations may generate warnings).
1115
1116 expected_stderr_file = find_expected_file(name, 'stderr')
1117 actual_stderr_file = add_suffix(name, 'comp.stderr')
1118 diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
1119
1120 if not compare_outputs(way, 'stderr',
1121 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1122 normalise_errmsg),
1123 expected_stderr_file, actual_stderr_file,
1124 diff_file=diff_file_name,
1125 whitespace_normaliser=getattr(getTestOpts(),
1126 "whitespace_normaliser",
1127 normalise_whitespace)):
1128 stderr = open(diff_file_name, 'rb').read()
1129 os.remove(diff_file_name)
1130 return failBecauseStderr('stderr mismatch', stderr=stderr )
1131
1132
1133 # no problems found, this test passed
1134 return passed()
1135
1136 def compile_cmp_asm( name, way, ext, extra_hc_opts ):
1137 print('Compile only, extra args = ', extra_hc_opts)
1138 result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1139
1140 if badResult(result):
1141 return result
1142
1143 # the actual stderr should always match the expected, regardless
1144 # of whether we expected the compilation to fail or not (successful
1145 # compilations may generate warnings).
1146
1147 expected_asm_file = find_expected_file(name, 'asm')
1148 actual_asm_file = add_suffix(name, 's')
1149
1150 if not compare_outputs(way, 'asm',
1151 join_normalisers(normalise_errmsg, normalise_asm),
1152 expected_asm_file, actual_asm_file):
1153 return failBecause('asm mismatch')
1154
1155 # no problems found, this test passed
1156 return passed()
1157
1158 def compile_grep_asm( name, way, ext, is_substring, extra_hc_opts ):
1159 print('Compile only, extra args = ', extra_hc_opts)
1160 result = simple_build(name + '.' + ext, way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1161
1162 if badResult(result):
1163 return result
1164
1165 expected_pat_file = find_expected_file(name, 'asm')
1166 actual_asm_file = add_suffix(name, 's')
1167
1168 if not grep_output(join_normalisers(normalise_errmsg),
1169 expected_pat_file, actual_asm_file,
1170 is_substring):
1171 return failBecause('asm mismatch')
1172
1173 # no problems found, this test passed
1174 return passed()
1175
1176 # -----------------------------------------------------------------------------
1177 # Compile-and-run tests
1178
1179 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1180 # print 'Compile and run, extra args = ', extra_hc_opts
1181
1182 result = extras_build( way, extra_mods, extra_hc_opts )
1183 if badResult(result):
1184 return result
1185 extra_hc_opts = result['hc_opts']
1186
1187 if way.startswith('ghci'): # interpreted...
1188 return interpreter_run(name, way, extra_hc_opts, top_mod)
1189 else: # compiled...
1190 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1191 if badResult(result):
1192 return result
1193
1194 cmd = './' + name;
1195
1196 # we don't check the compiler's stderr for a compile-and-run test
1197 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1198
1199 def compile_and_run( name, way, extra_hc_opts ):
1200 return compile_and_run__( name, way, '', [], extra_hc_opts)
1201
1202 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1203 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1204
1205 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1206 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1207
1208 def stats( name, way, stats_file ):
1209 opts = getTestOpts()
1210 return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
1211
1212 def static_stats( name, way, stats_file ):
1213 opts = getTestOpts()
1214 return check_stats(name, way, in_statsdir(stats_file), opts.stats_range_fields)
1215
1216 def metric_dict(name, way, metric, value):
1217 return Perf.PerfStat(
1218 test_env = config.test_env,
1219 test = name,
1220 way = way,
1221 metric = metric,
1222 value = value)
1223
1224 # -----------------------------------------------------------------------------
1225 # Check test stats. This prints the results for the user.
1226 # name: name of the test.
1227 # way: the way.
1228 # stats_file: the path of the stats_file containing the stats for the test.
1229 # range_fields: see TestOptions.stats_range_fields
1230 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
1231 # This prints the results for the user.
1232 def check_stats(name, way, stats_file, range_fields):
1233 head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
1234 result = passed()
1235 if range_fields:
1236 try:
1237 f = open(stats_file)
1238 except IOError as e:
1239 return failBecause(str(e))
1240 stats_file_contents = f.read()
1241 f.close()
1242
1243 for (metric, baseline_and_dev) in range_fields.items():
1244 field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
1245 if field_match == None:
1246 print('Failed to find metric: ', metric)
1247 metric_result = failBecause('no such stats metric')
1248 else:
1249 actual_val = int(field_match.group(1))
1250
1251 # Store the metric so it can later be stored in a git note.
1252 perf_stat = metric_dict(name, way, metric, actual_val)
1253 change = None
1254
1255 # If this is the first time running the benchmark, then pass.
1256 baseline = baseline_and_dev[0](way, head_commit) \
1257 if Perf.inside_git_repo() else None
1258 if baseline == None:
1259 metric_result = passed()
1260 change = MetricChange.NewMetric
1261 else:
1262 tolerance_dev = baseline_and_dev[1]
1263 (change, metric_result) = Perf.check_stats_change(
1264 perf_stat,
1265 baseline,
1266 tolerance_dev,
1267 config.allowed_perf_changes,
1268 config.verbose >= 4)
1269 t.metrics.append((change, perf_stat))
1270
1271 # If any metric fails then the test fails.
1272 # Note, the remaining metrics are still run so that
1273 # a complete list of changes can be presented to the user.
1274 if metric_result['passFail'] == 'fail':
1275 result = metric_result
1276
1277 return result
1278
1279 # -----------------------------------------------------------------------------
1280 # Build a single-module program
1281
1282 def extras_build( way, extra_mods, extra_hc_opts ):
1283 for mod, opts in extra_mods:
1284 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1285 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1286 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1287 if badResult(result):
1288 return result
1289
1290 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1291
1292 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1293 opts = getTestOpts()
1294
1295 # Redirect stdout and stderr to the same file
1296 stdout = in_testdir(name, 'comp.stderr')
1297 stderr = subprocess.STDOUT
1298
1299 if top_mod != '':
1300 srcname = top_mod
1301 elif addsuf:
1302 if backpack:
1303 srcname = add_suffix(name, 'bkp')
1304 else:
1305 srcname = add_hs_lhs_suffix(name)
1306 else:
1307 srcname = name
1308
1309 if top_mod != '':
1310 to_do = '--make '
1311 if link:
1312 to_do = to_do + '-o ' + name
1313 elif backpack:
1314 if link:
1315 to_do = '-o ' + name + ' '
1316 else:
1317 to_do = ''
1318 to_do = to_do + '--backpack '
1319 elif link:
1320 to_do = '-o ' + name
1321 else:
1322 to_do = '-c' # just compile
1323
1324 stats_file = name + '.comp.stats'
1325 if isCompilerStatsTest():
1326 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1327 if backpack:
1328 extra_hc_opts += ' -outputdir ' + name + '.out'
1329
1330 # Required by GHC 7.3+, harmless for earlier versions:
1331 if (getTestOpts().c_src or
1332 getTestOpts().objc_src or
1333 getTestOpts().objcpp_src or
1334 getTestOpts().cmm_src):
1335 extra_hc_opts += ' -no-hs-main '
1336
1337 if getTestOpts().compile_cmd_prefix == '':
1338 cmd_prefix = ''
1339 else:
1340 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1341
1342 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1343
1344 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1345 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1346 ).format(**locals())
1347
1348 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1349
1350 actual_stderr_path = in_testdir(name, 'comp.stderr')
1351
1352 if exit_code != 0 and not should_fail:
1353 if config.verbose >= 1 and _expect_pass(way):
1354 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1355 dump_file(actual_stderr_path)
1356
1357 # ToDo: if the sub-shell was killed by ^C, then exit
1358
1359 if isCompilerStatsTest():
1360 statsResult = check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
1361 if badResult(statsResult):
1362 return statsResult
1363
1364 if should_fail:
1365 if exit_code == 0:
1366 stderr_contents = open(actual_stderr_path, 'rb').read()
1367 return failBecauseStderr('exit code 0', stderr_contents)
1368 else:
1369 if exit_code != 0:
1370 stderr_contents = open(actual_stderr_path, 'rb').read()
1371 return failBecauseStderr('exit code non-0', stderr_contents)
1372
1373 return passed()
1374
1375 # -----------------------------------------------------------------------------
1376 # Run a program and check its output
1377 #
1378 # If testname.stdin exists, route input from that, else
1379 # from /dev/null. Route output to testname.run.stdout and
1380 # testname.run.stderr. Returns the exit code of the run.
1381
1382 def simple_run(name, way, prog, extra_run_opts):
1383 opts = getTestOpts()
1384
1385 # figure out what to use for stdin
1386 if opts.stdin:
1387 stdin = in_testdir(opts.stdin)
1388 elif os.path.exists(in_testdir(name, 'stdin')):
1389 stdin = in_testdir(name, 'stdin')
1390 else:
1391 stdin = None
1392
1393 stdout = in_testdir(name, 'run.stdout')
1394 if opts.combined_output:
1395 stderr = subprocess.STDOUT
1396 else:
1397 stderr = in_testdir(name, 'run.stderr')
1398
1399 my_rts_flags = rts_flags(way)
1400
1401 # Collect stats if necessary:
1402 # isStatsTest and not isCompilerStatsTest():
1403 # assume we are running a ghc compiled program. Collect stats.
1404 # isStatsTest and way == 'ghci':
1405 # assume we are running a program via ghci. Collect stats
1406 stats_file = name + '.stats'
1407 if isStatsTest() and (not isCompilerStatsTest() or way == 'ghci'):
1408 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1409 else:
1410 stats_args = ''
1411
1412 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1413 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1414
1415 if opts.cmd_wrapper != None:
1416 cmd = opts.cmd_wrapper(cmd)
1417
1418 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1419
1420 # run the command
1421 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1422
1423 # check the exit code
1424 if exit_code != opts.exit_code:
1425 if config.verbose >= 1 and _expect_pass(way):
1426 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1427 dump_stdout(name)
1428 dump_stderr(name)
1429 return failBecause('bad exit code (%d)' % exit_code)
1430
1431 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1432 return failBecause('bad stderr')
1433 if not (opts.ignore_stdout or stdout_ok(name, way)):
1434 return failBecause('bad stdout')
1435
1436 check_hp = '-h' in my_rts_flags and opts.check_hp
1437 check_prof = '-p' in my_rts_flags
1438
1439 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1440 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1441 return failBecause('bad heap profile')
1442 if check_prof and not check_prof_ok(name, way):
1443 return failBecause('bad profile')
1444
1445 return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
1446
1447 def rts_flags(way):
1448 args = config.way_rts_flags.get(way, [])
1449 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1450
1451 # -----------------------------------------------------------------------------
1452 # Run a program in the interpreter and check its output
1453
1454 def interpreter_run(name, way, extra_hc_opts, top_mod):
1455 opts = getTestOpts()
1456
1457 stdout = in_testdir(name, 'interp.stdout')
1458 stderr = in_testdir(name, 'interp.stderr')
1459 script = in_testdir(name, 'genscript')
1460
1461 if opts.combined_output:
1462 framework_fail(name, 'unsupported',
1463 'WAY=ghci and combined_output together is not supported')
1464
1465 if (top_mod == ''):
1466 srcname = add_hs_lhs_suffix(name)
1467 else:
1468 srcname = top_mod
1469
1470 delimiter = '===== program output begins here\n'
1471
1472 with io.open(script, 'w', encoding='utf8') as f:
1473 # set the prog name and command-line args to match the compiled
1474 # environment.
1475 f.write(':set prog ' + name + '\n')
1476 f.write(':set args ' + opts.extra_run_opts + '\n')
1477 # Add marker lines to the stdout and stderr output files, so we
1478 # can separate GHCi's output from the program's.
1479 f.write(':! echo ' + delimiter)
1480 f.write(':! echo 1>&2 ' + delimiter)
1481 # Set stdout to be line-buffered to match the compiled environment.
1482 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1483 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1484 # in the event of an exception as for the compiled program.
1485 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1486
1487 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1488 if os.path.exists(stdin):
1489 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1490
1491 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1492
1493 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1494 ).format(**locals())
1495
1496 if getTestOpts().cmd_wrapper != None:
1497 cmd = opts.cmd_wrapper(cmd);
1498
1499 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1500
1501 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1502
1503 # split the stdout into compilation/program output
1504 split_file(stdout, delimiter,
1505 in_testdir(name, 'comp.stdout'),
1506 in_testdir(name, 'run.stdout'))
1507 split_file(stderr, delimiter,
1508 in_testdir(name, 'comp.stderr'),
1509 in_testdir(name, 'run.stderr'))
1510
1511 # check the exit code
1512 if exit_code != getTestOpts().exit_code:
1513 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1514 dump_stdout(name)
1515 dump_stderr(name)
1516 return failBecause('bad exit code (%d)' % exit_code)
1517
1518 # ToDo: if the sub-shell was killed by ^C, then exit
1519
1520 if not (opts.ignore_stderr or stderr_ok(name, way)):
1521 return failBecause('bad stderr')
1522 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1523 return failBecause('bad stdout')
1524 else:
1525 return passed()
1526
1527 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1528 # See Note [Universal newlines].
1529 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1530 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1531 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1532 line = infile.readline()
1533 while re.sub('^\s*','',line) != delimiter and line != '':
1534 out1.write(line)
1535 line = infile.readline()
1536
1537 line = infile.readline()
1538 while line != '':
1539 out2.write(line)
1540 line = infile.readline()
1541
1542 # -----------------------------------------------------------------------------
1543 # Utils
1544 def get_compiler_flags():
1545 opts = getTestOpts()
1546
1547 flags = copy.copy(opts.compiler_always_flags)
1548
1549 flags.append(opts.extra_hc_opts)
1550
1551 if opts.outputdir != None:
1552 flags.extend(["-outputdir", opts.outputdir])
1553
1554 return flags
1555
1556 def stdout_ok(name, way):
1557 actual_stdout_file = add_suffix(name, 'run.stdout')
1558 expected_stdout_file = find_expected_file(name, 'stdout')
1559
1560 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1561
1562 check_stdout = getTestOpts().check_stdout
1563 if check_stdout:
1564 actual_stdout_path = in_testdir(actual_stdout_file)
1565 return check_stdout(actual_stdout_path, extra_norm)
1566
1567 return compare_outputs(way, 'stdout', extra_norm,
1568 expected_stdout_file, actual_stdout_file)
1569
1570 def dump_stdout( name ):
1571 with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
1572 str = f.read().strip()
1573 if str:
1574 print("Stdout (", name, "):")
1575 print(str)
1576
1577 def stderr_ok(name, way):
1578 actual_stderr_file = add_suffix(name, 'run.stderr')
1579 expected_stderr_file = find_expected_file(name, 'stderr')
1580
1581 return compare_outputs(way, 'stderr',
1582 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1583 expected_stderr_file, actual_stderr_file,
1584 whitespace_normaliser=normalise_whitespace)
1585
1586 def dump_stderr( name ):
1587 with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
1588 str = f.read().strip()
1589 if str:
1590 print("Stderr (", name, "):")
1591 print(str)
1592
1593 def read_no_crs(file):
1594 str = ''
1595 try:
1596 # See Note [Universal newlines].
1597 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1598 str = h.read()
1599 except Exception:
1600 # On Windows, if the program fails very early, it seems the
1601 # files stdout/stderr are redirected to may not get created
1602 pass
1603 return str
1604
1605 def write_file(file, str):
1606 # See Note [Universal newlines].
1607 with io.open(file, 'w', encoding='utf8', newline='') as h:
1608 h.write(str)
1609
1610 # Note [Universal newlines]
1611 #
1612 # We don't want to write any Windows style line endings ever, because
1613 # it would mean that `make accept` would touch every line of the file
1614 # when switching between Linux and Windows.
1615 #
1616 # Furthermore, when reading a file, it is convenient to translate all
1617 # Windows style endings to '\n', as it simplifies searching or massaging
1618 # the content.
1619 #
1620 # Solution: use `io.open` instead of `open`
1621 # * when reading: use newline=None to translate '\r\n' to '\n'
1622 # * when writing: use newline='' to not translate '\n' to '\r\n'
1623 #
1624 # See https://docs.python.org/2/library/io.html#io.open.
1625 #
1626 # This should work with both python2 and python3, and with both mingw*
1627 # as msys2 style Python.
1628 #
1629 # Do note that io.open returns unicode strings. So we have to specify
1630 # the expected encoding. But there is at least one file which is not
1631 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1632 # Another solution would be to open files in binary mode always, and
1633 # operate on bytes.
1634
1635 def check_hp_ok(name):
1636 opts = getTestOpts()
1637
1638 # do not qualify for hp2ps because we should be in the right directory
1639 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1640
1641 hp2psResult = runCmd(hp2psCmd)
1642
1643 actual_ps_path = in_testdir(name, 'ps')
1644
1645 if hp2psResult == 0:
1646 if os.path.exists(actual_ps_path):
1647 if gs_working:
1648 gsResult = runCmd(genGSCmd(actual_ps_path))
1649 if (gsResult == 0):
1650 return (True)
1651 else:
1652 print("hp2ps output for " + name + " is not valid PostScript")
1653 else: return (True) # assume postscript is valid without ghostscript
1654 else:
1655 print("hp2ps did not generate PostScript for " + name)
1656 return (False)
1657 else:
1658 print("hp2ps error when processing heap profile for " + name)
1659 return(False)
1660
1661 def check_prof_ok(name, way):
1662 expected_prof_file = find_expected_file(name, 'prof.sample')
1663 expected_prof_path = in_testdir(expected_prof_file)
1664
1665 # Check actual prof file only if we have an expected prof file to
1666 # compare it with.
1667 if not os.path.exists(expected_prof_path):
1668 return True
1669
1670 actual_prof_file = add_suffix(name, 'prof')
1671 actual_prof_path = in_testdir(actual_prof_file)
1672
1673 if not os.path.exists(actual_prof_path):
1674 print(actual_prof_path + " does not exist")
1675 return(False)
1676
1677 if os.path.getsize(actual_prof_path) == 0:
1678 print(actual_prof_path + " is empty")
1679 return(False)
1680
1681 return compare_outputs(way, 'prof', normalise_prof,
1682 expected_prof_file, actual_prof_file,
1683 whitespace_normaliser=normalise_whitespace)
1684
1685 # Compare expected output to actual output, and optionally accept the
1686 # new output. Returns true if output matched or was accepted, false
1687 # otherwise. See Note [Output comparison] for the meaning of the
1688 # normaliser and whitespace_normaliser parameters.
1689 def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
1690 whitespace_normaliser=lambda x:x):
1691
1692 expected_path = in_srcdir(expected_file)
1693 actual_path = in_testdir(actual_file)
1694
1695 if os.path.exists(expected_path):
1696 expected_str = normaliser(read_no_crs(expected_path))
1697 # Create the .normalised file in the testdir, not in the srcdir.
1698 expected_normalised_file = add_suffix(expected_file, 'normalised')
1699 expected_normalised_path = in_testdir(expected_normalised_file)
1700 else:
1701 expected_str = ''
1702 expected_normalised_path = '/dev/null'
1703
1704 actual_raw = read_no_crs(actual_path)
1705 actual_str = normaliser(actual_raw)
1706
1707 # See Note [Output comparison].
1708 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1709 return True
1710 else:
1711 if config.verbose >= 1 and _expect_pass(way):
1712 print('Actual ' + kind + ' output differs from expected:')
1713
1714 if expected_normalised_path != '/dev/null':
1715 write_file(expected_normalised_path, expected_str)
1716
1717 actual_normalised_path = add_suffix(actual_path, 'normalised')
1718 write_file(actual_normalised_path, actual_str)
1719
1720 if config.verbose >= 1 and _expect_pass(way):
1721 # See Note [Output comparison].
1722 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1723 actual_normalised_path),
1724 stdout=diff_file,
1725 print_output=True)
1726
1727 # If for some reason there were no non-whitespace differences,
1728 # then do a full diff
1729 if r == 0:
1730 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1731 actual_normalised_path),
1732 stdout=diff_file,
1733 print_output=True)
1734 elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
1735 # we will try to read it later
1736
1737 if config.accept and (getTestOpts().expect == 'fail' or
1738 way in getTestOpts().expect_fail_for):
1739 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1740 return False
1741 elif config.accept and actual_raw:
1742 if config.accept_platform:
1743 if_verbose(1, 'Accepting new output for platform "'
1744 + config.platform + '".')
1745 expected_path += '-' + config.platform
1746 elif config.accept_os:
1747 if_verbose(1, 'Accepting new output for os "'
1748 + config.os + '".')
1749 expected_path += '-' + config.os
1750 else:
1751 if_verbose(1, 'Accepting new output.')
1752
1753 write_file(expected_path, actual_raw)
1754 return True
1755 elif config.accept:
1756 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1757 os.remove(expected_path)
1758 return True
1759 else:
1760 return False
1761
1762 # Checks that each line from pattern_file is present in actual_file as
1763 # a substring or regex pattern depending on is_substring.
1764 def grep_output(normaliser, pattern_file, actual_file, is_substring=True):
1765 expected_path = in_srcdir(pattern_file)
1766 actual_path = in_testdir(actual_file)
1767
1768 expected_patterns = read_no_crs(expected_path).strip().split('\n')
1769 actual_raw = read_no_crs(actual_path)
1770 actual_str = normaliser(actual_raw)
1771
1772 success = True
1773 failed_patterns = []
1774
1775 def regex_match(pat, actual):
1776 return re.search(pat, actual) is not None
1777
1778 def substring_match(pat, actual):
1779 return pat in actual
1780
1781 def is_match(pat, actual):
1782 if is_substring:
1783 return substring_match(pat, actual)
1784 else:
1785 return regex_match(pat, actual)
1786
1787 for pat in expected_patterns:
1788 if not is_match(pat, actual_str):
1789 success = False
1790 failed_patterns.append(pat)
1791
1792 if not success:
1793 print('Actual output does not contain the following patterns:')
1794 for pat in failed_patterns:
1795 print(pat)
1796
1797 return success
1798
1799 # Note [Output comparison]
1800 #
1801 # We do two types of output comparison:
1802 #
1803 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1804 # optional `whitespace_normaliser` to the expected and the actual
1805 # output, before comparing the two.
1806 #
1807 # 2. To show as a diff to the user when the test indeed failed. We apply
1808 # the same `normaliser` function to the outputs, to make the diff as
1809 # small as possible (only showing the actual problem). But we don't
1810 # apply the `whitespace_normaliser` here, because it might completely
1811 # squash all whitespace, making the diff unreadable. Instead we rely
1812 # on the `diff` program to ignore whitespace changes as much as
1813 # possible (#10152).
1814
1815 def normalise_whitespace( str ):
1816 # Merge contiguous whitespace characters into a single space.
1817 return ' '.join(str.split())
1818
1819 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1820
1821 def normalise_callstacks(s):
1822 opts = getTestOpts()
1823 def repl(matches):
1824 location = matches.group(1)
1825 location = normalise_slashes_(location)
1826 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1827 # Ignore line number differences in call stacks (#10834).
1828 s = re.sub(callSite_re, repl, s)
1829 # Ignore the change in how we identify implicit call-stacks
1830 s = s.replace('from ImplicitParams', 'from HasCallStack')
1831 if not opts.keep_prof_callstacks:
1832 # Don't output prof callstacks. Test output should be
1833 # independent from the WAY we run the test.
1834 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1835 return s
1836
1837 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1838
1839 def normalise_type_reps(str):
1840 """ Normalise out fingerprints from Typeable TyCon representations """
1841 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1842
1843 def normalise_errmsg( str ):
1844 """Normalise error-messages emitted via stderr"""
1845 # IBM AIX's `ld` is a bit chatty
1846 if opsys('aix'):
1847 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1848 # remove " error:" and lower-case " Warning:" to make patch for
1849 # trac issue #10021 smaller
1850 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1851 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1852 str = normalise_callstacks(str)
1853 str = normalise_type_reps(str)
1854
1855 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1856 # the colon is there because it appears in error messages; this
1857 # hacky solution is used in place of more sophisticated filename
1858 # mangling
1859 str = re.sub('([^\\s])\\.exe', '\\1', str)
1860
1861 # normalise slashes, minimise Windows/Unix filename differences
1862 str = re.sub('\\\\', '/', str)
1863
1864 # The inplace ghc's are called ghc-stage[123] to avoid filename
1865 # collisions, so we need to normalise that to just "ghc"
1866 str = re.sub('ghc-stage[123]', 'ghc', str)
1867
1868 # Error messages sometimes contain integer implementation package
1869 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1870
1871 # Error messages sometimes contain this blurb which can vary
1872 # spuriously depending upon build configuration (e.g. based on integer
1873 # backend)
1874 str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
1875 '...plus N instances involving out-of-scope types', str)
1876
1877 # Also filter out bullet characters. This is because bullets are used to
1878 # separate error sections, and tests shouldn't be sensitive to how the
1879 # the division happens.
1880 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1881 str = str.replace(bullet, '')
1882
1883 # Windows only, this is a bug in hsc2hs but it is preventing
1884 # stable output for the testsuite. See #9775. For now we filter out this
1885 # warning message to get clean output.
1886 if config.msys:
1887 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1888 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1889
1890 return str
1891
1892 # normalise a .prof file, so that we can reasonably compare it against
1893 # a sample. This doesn't compare any of the actual profiling data,
1894 # only the shape of the profile and the number of entries.
1895 def normalise_prof (str):
1896 # strip everything up to the line beginning "COST CENTRE"
1897 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1898
1899 # strip results for CAFs, these tend to change unpredictably
1900 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1901
1902 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1903 # sometimes under MAIN.
1904 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1905
1906 # We have something like this:
1907 #
1908 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1909 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1910 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1911 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1912 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1913 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1914 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1915 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1916 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1917 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1918 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1919 #
1920 # then we remove all the specific profiling data, leaving only the cost
1921 # centre name, module, src, and entries, to end up with this: (modulo
1922 # whitespace between columns)
1923 #
1924 # MAIN MAIN <built-in> 0
1925 # readPrec Main Main_1.hs:7:13-16 1
1926 # readPrec Main Main_1.hs:4:13-16 1
1927 # == Main Main_1.hs:7:25-26 1
1928 # == Main Main_1.hs:4:25-26 1
1929 # showsPrec Main Main_1.hs:7:19-22 2
1930 # showsPrec Main Main_1.hs:4:19-22 2
1931 # readPrec Main Main_1.hs:7:13-16 0
1932 # readPrec Main Main_1.hs:4:13-16 0
1933
1934 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1935 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1936 # this works fine.
1937 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1938 '\\1 \\2 \\3 \\5\n', str)
1939 return str
1940
1941 def normalise_slashes_( str ):
1942 str = re.sub('\\\\', '/', str)
1943 str = re.sub('//', '/', str)
1944 return str
1945
1946 def normalise_exe_( str ):
1947 str = re.sub('\.exe', '', str)
1948 return str
1949
1950 def normalise_output( str ):
1951 # remove " error:" and lower-case " Warning:" to make patch for
1952 # trac issue #10021 smaller
1953 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1954 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1955 # Remove a .exe extension (for Windows)
1956 # This can occur in error messages generated by the program.
1957 str = re.sub('([^\\s])\\.exe', '\\1', str)
1958 str = normalise_callstacks(str)
1959 str = normalise_type_reps(str)
1960 return str
1961
1962 def normalise_asm( str ):
1963 lines = str.split('\n')
1964 # Only keep instructions and labels not starting with a dot.
1965 metadata = re.compile('^[ \t]*\\..*$')
1966 out = []
1967 for line in lines:
1968 # Drop metadata directives (e.g. ".type")
1969 if not metadata.match(line):
1970 line = re.sub('@plt', '', line)
1971 instr = line.lstrip().split()
1972 # Drop empty lines.
1973 if not instr:
1974 continue
1975 # Drop operands, except for call instructions.
1976 elif instr[0] == 'call':
1977 out.append(instr[0] + ' ' + instr[1])
1978 else:
1979 out.append(instr[0])
1980 out = '\n'.join(out)
1981 return out
1982
1983 def if_verbose( n, s ):
1984 if config.verbose >= n:
1985 print(s)
1986
1987 def dump_file(f):
1988 try:
1989 with io.open(f) as file:
1990 print(file.read())
1991 except Exception:
1992 print('')
1993
1994 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
1995 timeout_prog = strip_quotes(config.timeout_prog)
1996 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1997
1998 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1999 cmd = cmd.format(**config.__dict__)
2000 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
2001
2002 stdin_file = io.open(stdin, 'rb') if stdin else None
2003 stdout_buffer = b''
2004 stderr_buffer = b''
2005
2006 hStdErr = subprocess.PIPE
2007 if stderr is subprocess.STDOUT:
2008 hStdErr = subprocess.STDOUT
2009
2010 try:
2011 # cmd is a complex command in Bourne-shell syntax
2012 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
2013 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
2014 # to invoke the Bourne shell
2015
2016 r = subprocess.Popen([timeout_prog, timeout, cmd],
2017 stdin=stdin_file,
2018 stdout=subprocess.PIPE,
2019 stderr=hStdErr,
2020 env=ghc_env)
2021
2022 stdout_buffer, stderr_buffer = r.communicate()
2023 finally:
2024 if stdin_file:
2025 stdin_file.close()
2026 if config.verbose >= 1 and print_output:
2027 if stdout_buffer:
2028 sys.stdout.buffer.write(stdout_buffer)
2029 if stderr_buffer:
2030 sys.stderr.buffer.write(stderr_buffer)
2031
2032 if stdout:
2033 with io.open(stdout, 'wb') as f:
2034 f.write(stdout_buffer)
2035 if stderr:
2036 if stderr is not subprocess.STDOUT:
2037 with io.open(stderr, 'wb') as f:
2038 f.write(stderr_buffer)
2039
2040 if r.returncode == 98:
2041 # The python timeout program uses 98 to signal that ^C was pressed
2042 stopNow()
2043 if r.returncode == 99 and getTestOpts().exit_code != 99:
2044 # Only print a message when timeout killed the process unexpectedly.
2045 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
2046 return r.returncode
2047
2048 # -----------------------------------------------------------------------------
2049 # checking if ghostscript is available for checking the output of hp2ps
2050
2051 def genGSCmd(psfile):
2052 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
2053
2054 def gsNotWorking():
2055 global gs_working
2056 print("GhostScript not available for hp2ps tests")
2057
2058 global gs_working
2059 gs_working = False
2060 if config.have_profiling:
2061 if config.gs != '':
2062 resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
2063 if resultGood == 0:
2064 resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
2065 ' >/dev/null 2>&1')
2066 if resultBad != 0:
2067 print("GhostScript available for hp2ps tests")
2068 gs_working = True
2069 else:
2070 gsNotWorking();
2071 else:
2072 gsNotWorking();
2073 else:
2074 gsNotWorking();
2075
2076 def add_suffix( name, suffix ):
2077 if suffix == '':
2078 return name
2079 else:
2080 return name + '.' + suffix
2081
2082 def add_hs_lhs_suffix(name):
2083 if getTestOpts().c_src:
2084 return add_suffix(name, 'c')
2085 elif getTestOpts().cmm_src:
2086 return add_suffix(name, 'cmm')
2087 elif getTestOpts().objc_src:
2088 return add_suffix(name, 'm')
2089 elif getTestOpts().objcpp_src:
2090 return add_suffix(name, 'mm')
2091 elif getTestOpts().literate:
2092 return add_suffix(name, 'lhs')
2093 else:
2094 return add_suffix(name, 'hs')
2095
2096 def replace_suffix( name, suffix ):
2097 base, suf = os.path.splitext(name)
2098 return base + '.' + suffix
2099
2100 def in_testdir(name, suffix=''):
2101 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2102
2103 def in_srcdir(name, suffix=''):
2104 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2105
2106 def in_statsdir(name, suffix=''):
2107 return os.path.join(config.stats_files_dir, add_suffix(name, suffix))
2108
2109 # Finding the sample output. The filename is of the form
2110 #
2111 # <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
2112 #
2113 def find_expected_file(name, suff):
2114 basename = add_suffix(name, suff)
2115 # Override the basename if the user has specified one, this will then be
2116 # subjected to the same name mangling scheme as normal to allow platform
2117 # specific overrides to work.
2118 basename = getTestOpts().use_specs.get (suff, basename)
2119
2120 files = [basename + ws + plat
2121 for plat in ['-' + config.platform, '-' + config.os, '']
2122 for ws in ['-ws-' + config.wordsize, '']]
2123
2124 for f in files:
2125 if os.path.exists(in_srcdir(f)):
2126 return f
2127
2128 return basename
2129
2130 if config.msys:
2131 import stat
2132 def cleanup():
2133 testdir = getTestOpts().testdir
2134 max_attempts = 5
2135 retries = max_attempts
2136 def on_error(function, path, excinfo):
2137 # At least one test (T11489) removes the write bit from a file it
2138 # produces. Windows refuses to delete read-only files with a
2139 # permission error. Try setting the write bit and try again.
2140 os.chmod(path, stat.S_IWRITE)
2141 function(path)
2142
2143 # On Windows we have to retry the delete a couple of times.
2144 # The reason for this is that a FileDelete command just marks a
2145 # file for deletion. The file is really only removed when the last
2146 # handle to the file is closed. Unfortunately there are a lot of
2147 # system services that can have a file temporarily opened using a shared
2148 # readonly lock, such as the built in AV and search indexer.
2149 #
2150 # We can't really guarantee that these are all off, so what we can do is
2151 # whenever after a rmtree the folder still exists to try again and wait a bit.
2152 #
2153 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
2154 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
2155 # still locked then abort the current test by throwing an exception, this so it won't fail
2156 # with an even more cryptic error.
2157 #
2158 # See #13162
2159 exception = None
2160 while retries > 0 and os.path.exists(testdir):
2161 time.sleep((max_attempts-retries)*6)
2162 try:
2163 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
2164 except Exception as e:
2165 exception = e
2166 retries -= 1
2167
2168 if retries == 0 and os.path.exists(testdir):
2169 raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
2170 % (testdir, exception))
2171 else:
2172 def cleanup():
2173 testdir = getTestOpts().testdir
2174 if os.path.exists(testdir):
2175 shutil.rmtree(testdir, ignore_errors=False)
2176
2177
2178 # -----------------------------------------------------------------------------
2179 # Return a list of all the files ending in '.T' below directories roots.
2180
2181 def findTFiles(roots):
2182 for root in roots:
2183 for path, dirs, files in os.walk(root, topdown=True):
2184 # Never pick up .T files in uncleaned .run directories.
2185 dirs[:] = [dir for dir in sorted(dirs)
2186 if not dir.endswith(testdir_suffix)]
2187 for filename in files:
2188 if filename.endswith('.T'):
2189 yield os.path.join(path, filename)
2190
2191 # -----------------------------------------------------------------------------
2192 # Output a test summary to the specified file object
2193
2194 def summary(t, file, short=False, color=False):
2195
2196 file.write('\n')
2197 printUnexpectedTests(file,
2198 [t.unexpected_passes, t.unexpected_failures,
2199 t.unexpected_stat_failures, t.framework_failures])
2200
2201 if short:
2202 # Only print the list of unexpected tests above.
2203 return
2204
2205 colorize = lambda s: s
2206 if color:
2207 if len(t.unexpected_failures) > 0 or \
2208 len(t.unexpected_stat_failures) > 0 or \
2209 len(t.unexpected_passes) > 0 or \
2210 len(t.framework_failures) > 0:
2211 colorize = str_fail
2212 else:
2213 colorize = str_pass
2214
2215 file.write(colorize('SUMMARY') + ' for test run started at '
2216 + time.strftime("%c %Z", t.start_time) + '\n'
2217 + str(datetime.timedelta(seconds=
2218 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2219 + ' spent to go through\n'
2220 + repr(t.total_tests).rjust(8)
2221 + ' total tests, which gave rise to\n'
2222 + repr(t.total_test_cases).rjust(8)
2223 + ' test cases, of which\n'
2224 + repr(t.n_tests_skipped).rjust(8)
2225 + ' were skipped\n'
2226 + '\n'
2227 + repr(len(t.missing_libs)).rjust(8)
2228 + ' had missing libraries\n'
2229 + repr(t.n_expected_passes).rjust(8)
2230 + ' expected passes\n'
2231 + repr(t.n_expected_failures).rjust(8)
2232 + ' expected failures\n'
2233 + '\n'
2234 + repr(len(t.framework_failures)).rjust(8)
2235 + ' caused framework failures\n'
2236 + repr(len(t.framework_warnings)).rjust(8)
2237 + ' caused framework warnings\n'
2238 + repr(len(t.unexpected_passes)).rjust(8)
2239 + ' unexpected passes\n'
2240 + repr(len(t.unexpected_failures)).rjust(8)
2241 + ' unexpected failures\n'
2242 + repr(len(t.unexpected_stat_failures)).rjust(8)
2243 + ' unexpected stat failures\n'
2244 + '\n')
2245
2246 if t.unexpected_passes:
2247 file.write('Unexpected passes:\n')
2248 printTestInfosSummary(file, t.unexpected_passes)
2249
2250 if t.unexpected_failures:
2251 file.write('Unexpected failures:\n')
2252 printTestInfosSummary(file, t.unexpected_failures)
2253
2254 if t.unexpected_stat_failures:
2255 file.write('Unexpected stat failures:\n')
2256 printTestInfosSummary(file, t.unexpected_stat_failures)
2257
2258 if t.framework_failures:
2259 file.write('Framework failures:\n')
2260 printTestInfosSummary(file, t.framework_failures)
2261
2262 if t.framework_warnings:
2263 file.write('Framework warnings:\n')
2264 printTestInfosSummary(file, t.framework_warnings)
2265
2266 if stopping():
2267 file.write('WARNING: Testsuite run was terminated early\n')
2268
2269 def printUnexpectedTests(file, testInfoss):
2270 unexpected = set(result.testname
2271 for testInfos in testInfoss
2272 for result in testInfos
2273 if not result.testname.endswith('.T'))
2274 if unexpected:
2275 file.write('Unexpected results from:\n')
2276 file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
2277 file.write('\n')
2278
2279 def printTestInfosSummary(file, testInfos):
2280 maxDirLen = max(len(tr.directory) for tr in testInfos)
2281 for result in sorted(testInfos, key=lambda r: (r.testname.lower(), r.way, r.directory)):
2282 directory = result.directory.ljust(maxDirLen)
2283 file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
2284 r = result,
2285 directory = directory))
2286 file.write('\n')
2287
2288 def modify_lines(s, f):
2289 s = '\n'.join([f(l) for l in s.splitlines()])
2290 if s and s[-1] != '\n':
2291 # Prevent '\ No newline at end of file' warnings when diffing.
2292 s += '\n'
2293 return s