Testsuite: run tests in <testdir>.run instead of /tmp
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import shutil
9 import sys
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 from math import ceil, trunc
20 import collections
21 import subprocess
22
23 from testglobals import *
24 from testutil import *
25 from extra_files import extra_src_files
26
27 try:
28 basestring
29 except: # Python 3
30 basestring = (str,bytes)
31
32 if config.use_threads:
33 import threading
34 try:
35 import thread
36 except ImportError: # Python 3
37 import _thread as thread
38
39 global wantToStop
40 wantToStop = False
41 def stopNow():
42 global wantToStop
43 wantToStop = True
44 def stopping():
45 return wantToStop
46
47 # Options valid for the current test only (these get reset to
48 # testdir_testopts after each test).
49
50 global testopts_local
51 if config.use_threads:
52 testopts_local = threading.local()
53 else:
54 class TestOpts_Local:
55 pass
56 testopts_local = TestOpts_Local()
57
58 def getTestOpts():
59 return testopts_local.x
60
61 def setLocalTestOpts(opts):
62 global testopts_local
63 testopts_local.x=opts
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86
87 def normal( name, opts ):
88 return;
89
90 def skip( name, opts ):
91 opts.skip = 1
92
93 def expect_fail( name, opts ):
94 # The compiler, testdriver, OS or platform is missing a certain
95 # feature, and we don't plan to or can't fix it now or in the
96 # future.
97 opts.expect = 'fail';
98
99 def reqlib( lib ):
100 return lambda name, opts, l=lib: _reqlib (name, opts, l )
101
102 # Cache the results of looking to see if we have a library or not.
103 # This makes quite a difference, especially on Windows.
104 have_lib = {}
105
106 def _reqlib( name, opts, lib ):
107 if lib in have_lib:
108 got_it = have_lib[lib]
109 else:
110 cmd = strip_quotes(config.ghc_pkg)
111 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
112 stdout=subprocess.PIPE,
113 stderr=subprocess.PIPE)
114 # read from stdout and stderr to avoid blocking due to
115 # buffers filling
116 p.communicate()
117 r = p.wait()
118 got_it = r == 0
119 have_lib[lib] = got_it
120
121 if not got_it:
122 opts.expect = 'missing-lib'
123
124 def req_haddock( name, opts ):
125 if not config.haddock:
126 opts.expect = 'missing-lib'
127
128 def req_profiling( name, opts ):
129 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
130 if not config.have_profiling:
131 opts.expect = 'fail'
132
133 def req_shared_libs( name, opts ):
134 if not config.have_shared_libs:
135 opts.expect = 'fail'
136
137 def req_interp( name, opts ):
138 if not config.have_interp:
139 opts.expect = 'fail'
140
141 def req_smp( name, opts ):
142 if not config.have_smp:
143 opts.expect = 'fail'
144
145 def ignore_output( name, opts ):
146 opts.ignore_output = 1
147
148 def no_stdin( name, opts ):
149 opts.no_stdin = 1
150
151 def combined_output( name, opts ):
152 opts.combined_output = True
153
154 # -----
155
156 def expect_fail_for( ways ):
157 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
158
159 def _expect_fail_for( name, opts, ways ):
160 opts.expect_fail_for = ways
161
162 def expect_broken( bug ):
163 # This test is a expected not to work due to the indicated trac bug
164 # number.
165 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
166
167 def _expect_broken( name, opts, bug ):
168 record_broken(name, opts, bug)
169 opts.expect = 'fail';
170
171 def expect_broken_for( bug, ways ):
172 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
173
174 def _expect_broken_for( name, opts, bug, ways ):
175 record_broken(name, opts, bug)
176 opts.expect_fail_for = ways
177
178 def record_broken(name, opts, bug):
179 global brokens
180 me = (bug, opts.testdir, name)
181 if not me in brokens:
182 brokens.append(me)
183
184 def _expect_pass(way):
185 # Helper function. Not intended for use in .T files.
186 opts = getTestOpts()
187 return opts.expect == 'pass' and way not in opts.expect_fail_for
188
189 # -----
190
191 def omit_ways( ways ):
192 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
193
194 def _omit_ways( name, opts, ways ):
195 opts.omit_ways = ways
196
197 # -----
198
199 def only_ways( ways ):
200 return lambda name, opts, w=ways: _only_ways( name, opts, w )
201
202 def _only_ways( name, opts, ways ):
203 opts.only_ways = ways
204
205 # -----
206
207 def extra_ways( ways ):
208 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
209
210 def _extra_ways( name, opts, ways ):
211 opts.extra_ways = ways
212
213 # -----
214
215 def set_stdin( file ):
216 return lambda name, opts, f=file: _set_stdin(name, opts, f);
217
218 def _set_stdin( name, opts, f ):
219 opts.stdin = f
220
221 # -----
222
223 def exit_code( val ):
224 return lambda name, opts, v=val: _exit_code(name, opts, v);
225
226 def _exit_code( name, opts, v ):
227 opts.exit_code = v
228
229 def signal_exit_code( val ):
230 if opsys('solaris2'):
231 return exit_code( val );
232 else:
233 # When application running on Linux receives fatal error
234 # signal, then its exit code is encoded as 128 + signal
235 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
236 # I assume that Mac OS X behaves in the same way at least Mac
237 # OS X builder behavior suggests this.
238 return exit_code( val+128 );
239
240 # -----
241
242 def compile_timeout_multiplier( val ):
243 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
244
245 def _compile_timeout_multiplier( name, opts, v ):
246 opts.compile_timeout_multiplier = v
247
248 def run_timeout_multiplier( val ):
249 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
250
251 def _run_timeout_multiplier( name, opts, v ):
252 opts.run_timeout_multiplier = v
253
254 # -----
255
256 def extra_run_opts( val ):
257 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
258
259 def _extra_run_opts( name, opts, v ):
260 opts.extra_run_opts = v
261
262 # -----
263
264 def extra_hc_opts( val ):
265 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
266
267 def _extra_hc_opts( name, opts, v ):
268 opts.extra_hc_opts = v
269
270 # -----
271
272 def extra_clean( files ):
273 # TODO. Remove all calls to extra_clean.
274 return lambda _name, _opts: None
275
276 def extra_files(files):
277 return lambda name, opts: _extra_files(name, opts, files)
278
279 def _extra_files(name, opts, files):
280 opts.extra_files.extend(files)
281
282 # -----
283
284 def stats_num_field( field, expecteds ):
285 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
286
287 def _stats_num_field( name, opts, field, expecteds ):
288 if field in opts.stats_range_fields:
289 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
290
291 if type(expecteds) is list:
292 for (b, expected, dev) in expecteds:
293 if b:
294 opts.stats_range_fields[field] = (expected, dev)
295 return
296 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
297
298 else:
299 (expected, dev) = expecteds
300 opts.stats_range_fields[field] = (expected, dev)
301
302 def compiler_stats_num_field( field, expecteds ):
303 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
304
305 def _compiler_stats_num_field( name, opts, field, expecteds ):
306 if field in opts.compiler_stats_range_fields:
307 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
308
309 # Compiler performance numbers change when debugging is on, making the results
310 # useless and confusing. Therefore, skip if debugging is on.
311 if compiler_debugged():
312 skip(name, opts)
313
314 for (b, expected, dev) in expecteds:
315 if b:
316 opts.compiler_stats_range_fields[field] = (expected, dev)
317 return
318
319 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
320
321 # -----
322
323 def when(b, f):
324 # When list_brokens is on, we want to see all expect_broken calls,
325 # so we always do f
326 if b or config.list_broken:
327 return f
328 else:
329 return normal
330
331 def unless(b, f):
332 return when(not b, f)
333
334 def doing_ghci():
335 return 'ghci' in config.run_ways
336
337 def ghc_dynamic():
338 return config.ghc_dynamic
339
340 def fast():
341 return config.speed == 2
342
343 def platform( plat ):
344 return config.platform == plat
345
346 def opsys( os ):
347 return config.os == os
348
349 def arch( arch ):
350 return config.arch == arch
351
352 def wordsize( ws ):
353 return config.wordsize == str(ws)
354
355 def msys( ):
356 return config.msys
357
358 def cygwin( ):
359 return config.cygwin
360
361 def have_vanilla( ):
362 return config.have_vanilla
363
364 def have_dynamic( ):
365 return config.have_dynamic
366
367 def have_profiling( ):
368 return config.have_profiling
369
370 def in_tree_compiler( ):
371 return config.in_tree_compiler
372
373 def unregisterised( ):
374 return config.unregisterised
375
376 def compiler_profiled( ):
377 return config.compiler_profiled
378
379 def compiler_debugged( ):
380 return config.compiler_debugged
381
382 # ---
383
384 def high_memory_usage(name, opts):
385 opts.alone = True
386
387 # If a test is for a multi-CPU race, then running the test alone
388 # increases the chance that we'll actually see it.
389 def multi_cpu_race(name, opts):
390 opts.alone = True
391
392 # ---
393 def literate( name, opts ):
394 opts.literate = 1;
395
396 def c_src( name, opts ):
397 opts.c_src = 1;
398
399 def objc_src( name, opts ):
400 opts.objc_src = 1;
401
402 def objcpp_src( name, opts ):
403 opts.objcpp_src = 1;
404
405 def cmm_src( name, opts ):
406 opts.cmm_src = 1;
407
408 def outputdir( odir ):
409 return lambda name, opts, d=odir: _outputdir(name, opts, d)
410
411 def _outputdir( name, opts, odir ):
412 opts.outputdir = odir;
413
414 # ----
415
416 def pre_cmd( cmd ):
417 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
418
419 def _pre_cmd( name, opts, cmd ):
420 opts.pre_cmd = cmd
421
422 # ----
423
424 def clean_cmd( cmd ):
425 # TODO. Remove all calls to clean_cmd.
426 return lambda _name, _opts: None
427
428 # ----
429
430 def cmd_prefix( prefix ):
431 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
432
433 def _cmd_prefix( name, opts, prefix ):
434 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
435
436 # ----
437
438 def cmd_wrapper( fun ):
439 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
440
441 def _cmd_wrapper( name, opts, fun ):
442 opts.cmd_wrapper = fun
443
444 # ----
445
446 def compile_cmd_prefix( prefix ):
447 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
448
449 def _compile_cmd_prefix( name, opts, prefix ):
450 opts.compile_cmd_prefix = prefix
451
452 # ----
453
454 def check_stdout( f ):
455 return lambda name, opts, f=f: _check_stdout(name, opts, f)
456
457 def _check_stdout( name, opts, f ):
458 opts.check_stdout = f
459
460 # ----
461
462 def normalise_slashes( name, opts ):
463 _normalise_fun(name, opts, normalise_slashes_)
464
465 def normalise_exe( name, opts ):
466 _normalise_fun(name, opts, normalise_exe_)
467
468 def normalise_fun( *fs ):
469 return lambda name, opts: _normalise_fun(name, opts, fs)
470
471 def _normalise_fun( name, opts, *fs ):
472 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
473
474 def normalise_errmsg_fun( *fs ):
475 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
476
477 def _normalise_errmsg_fun( name, opts, *fs ):
478 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
479
480 def normalise_version_( *pkgs ):
481 def normalise_version__( str ):
482 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
483 '\\1-<VERSION>', str)
484 return normalise_version__
485
486 def normalise_version( *pkgs ):
487 def normalise_version__( name, opts ):
488 _normalise_fun(name, opts, normalise_version_(*pkgs))
489 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
490 return normalise_version__
491
492 def normalise_drive_letter(name, opts):
493 # Windows only. Change D:\\ to C:\\.
494 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
495
496 def keep_prof_callstacks(name, opts):
497 """Keep profiling callstacks.
498
499 Use together with `only_ways(prof_ways)`.
500 """
501 opts.keep_prof_callstacks = True
502
503 def join_normalisers(*a):
504 """
505 Compose functions, flattening sequences.
506
507 join_normalisers(f1,[f2,f3],f4)
508
509 is the same as
510
511 lambda x: f1(f2(f3(f4(x))))
512 """
513
514 def flatten(l):
515 """
516 Taken from http://stackoverflow.com/a/2158532/946226
517 """
518 for el in l:
519 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
520 for sub in flatten(el):
521 yield sub
522 else:
523 yield el
524
525 a = flatten(a)
526
527 fn = lambda x:x # identity function
528 for f in a:
529 assert callable(f)
530 fn = lambda x,f=f,fn=fn: fn(f(x))
531 return fn
532
533 # ----
534 # Function for composing two opt-fns together
535
536 def executeSetups(fs, name, opts):
537 if type(fs) is list:
538 # If we have a list of setups, then execute each one
539 for f in fs:
540 executeSetups(f, name, opts)
541 else:
542 # fs is a single function, so just apply it
543 fs(name, opts)
544
545 # -----------------------------------------------------------------------------
546 # The current directory of tests
547
548 def newTestDir(tempdir, dir):
549
550 global thisdir_settings
551 # reset the options for this test directory
552 def settings(name, opts, tempdir=tempdir, dir=dir):
553 return _newTestDir(name, opts, tempdir, dir)
554 thisdir_settings = settings
555
556 # Should be equal to entry in toplevel .gitignore.
557 testdir_suffix = '.run'
558
559 def _newTestDir(name, opts, tempdir, dir):
560 opts.srcdir = os.path.join(os.getcwd(), dir)
561 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
562 opts.compiler_always_flags = config.compiler_always_flags
563
564 # -----------------------------------------------------------------------------
565 # Actually doing tests
566
567 parallelTests = []
568 aloneTests = []
569 allTestNames = set([])
570
571 def runTest (opts, name, func, args):
572 ok = 0
573
574 if config.use_threads:
575 t.thread_pool.acquire()
576 try:
577 while config.threads<(t.running_threads+1):
578 t.thread_pool.wait()
579 t.running_threads = t.running_threads+1
580 ok=1
581 t.thread_pool.release()
582 thread.start_new_thread(test_common_thread, (name, opts, func, args))
583 except:
584 if not ok:
585 t.thread_pool.release()
586 else:
587 test_common_work (name, opts, func, args)
588
589 # name :: String
590 # setup :: TestOpts -> IO ()
591 def test (name, setup, func, args):
592 if config.run_only_some_tests:
593 if name not in config.only:
594 return
595 else:
596 # Note [Mutating config.only]
597 # config.only is initiallly the set of tests requested by
598 # the user (via 'make TEST='). We then remove all tests that
599 # we've already seen (in .T files), so that we can later
600 # report on any tests we couldn't find and error out.
601 config.only.remove(name)
602
603 global aloneTests
604 global parallelTests
605 global allTestNames
606 global thisdir_settings
607 if name in allTestNames:
608 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
609 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
610 framework_fail(name, 'bad_name', 'This test has an invalid name')
611
612 # Make a deep copy of the default_testopts, as we need our own copy
613 # of any dictionaries etc inside it. Otherwise, if one test modifies
614 # them, all tests will see the modified version!
615 myTestOpts = copy.deepcopy(default_testopts)
616
617 executeSetups([thisdir_settings, setup], name, myTestOpts)
618
619 thisTest = lambda : runTest(myTestOpts, name, func, args)
620 if myTestOpts.alone:
621 aloneTests.append(thisTest)
622 else:
623 parallelTests.append(thisTest)
624 allTestNames.add(name)
625
626 if config.use_threads:
627 def test_common_thread(name, opts, func, args):
628 t.lock.acquire()
629 try:
630 test_common_work(name,opts,func,args)
631 finally:
632 t.lock.release()
633 t.thread_pool.acquire()
634 t.running_threads = t.running_threads - 1
635 t.thread_pool.notify()
636 t.thread_pool.release()
637
638 def get_package_cache_timestamp():
639 if config.package_conf_cache_file == '':
640 return 0.0
641 else:
642 try:
643 return os.stat(config.package_conf_cache_file).st_mtime
644 except:
645 return 0.0
646
647
648 def test_common_work (name, opts, func, args):
649 try:
650 t.total_tests = t.total_tests+1
651 setLocalTestOpts(opts)
652
653 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
654
655 # All the ways we might run this test
656 if func == compile or func == multimod_compile:
657 all_ways = config.compile_ways
658 elif func == compile_and_run or func == multimod_compile_and_run:
659 all_ways = config.run_ways
660 elif func == ghci_script:
661 if 'ghci' in config.run_ways:
662 all_ways = ['ghci']
663 else:
664 all_ways = []
665 else:
666 all_ways = ['normal']
667
668 # A test itself can request extra ways by setting opts.extra_ways
669 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
670
671 t.total_test_cases = t.total_test_cases + len(all_ways)
672
673 ok_way = lambda way: \
674 not getTestOpts().skip \
675 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
676 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
677 and (not (config.skip_perf_tests and isStatsTest())) \
678 and way not in getTestOpts().omit_ways
679
680 # Which ways we are asked to skip
681 do_ways = list(filter (ok_way,all_ways))
682
683 # Only run all ways in slow mode.
684 # See Note [validate and testsuite speed] in toplevel Makefile.
685 if config.accept:
686 # Only ever run one way
687 do_ways = do_ways[:1]
688 elif config.speed > 0:
689 # However, if we EXPLICITLY asked for a way (with extra_ways)
690 # please test it!
691 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
692 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
693 do_ways = other_ways[:1] + explicit_ways
694
695 # Find all files in the source directory that this test
696 # depends on. Do this only once for all ways.
697 # Generously add all filenames that start with the name of
698 # the test to this set, as a convenience to test authors.
699 # They will have to use the `extra_files` setup function to
700 # specify all other files that their test depends on (but
701 # this seems to be necessary for only about 10% of all
702 # tests).
703 files = set((f for f in os.listdir(opts.srcdir)
704 if f.startswith(name) and
705 not f.endswith(testdir_suffix)))
706 for filename in (opts.extra_files + extra_src_files.get(name, [])):
707 if filename.startswith('/'):
708 framework_fail(name, 'whole-test',
709 'no absolute paths in extra_files please: ' + filename)
710
711 elif '*' in filename:
712 # Don't use wildcards in extra_files too much, as
713 # globbing is slow.
714 files.update((os.path.relpath(f, opts.srcdir)
715 for f in glob.iglob(in_srcdir(filename))))
716
717 else:
718 files.add(filename)
719
720 # Run the required tests...
721 for way in do_ways:
722 if stopping():
723 break
724 do_test(name, way, func, args, files)
725
726 for way in all_ways:
727 if way not in do_ways:
728 skiptest (name,way)
729
730 if config.cleanup and do_ways:
731 cleanup()
732
733 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
734
735 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
736 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
737
738 try:
739 for f in files_written[name]:
740 if os.path.exists(f):
741 try:
742 if not f in files_written_not_removed[name]:
743 files_written_not_removed[name].append(f)
744 except:
745 files_written_not_removed[name] = [f]
746 except:
747 pass
748 except Exception as e:
749 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
750
751 def do_test(name, way, func, args, files):
752 opts = getTestOpts()
753
754 full_name = name + '(' + way + ')'
755
756 try:
757 if_verbose(2, "=====> %s %d of %d %s " % \
758 (full_name, t.total_tests, len(allTestNames), \
759 [t.n_unexpected_passes, \
760 t.n_unexpected_failures, \
761 t.n_framework_failures]))
762
763 # Clean up prior to the test, so that we can't spuriously conclude
764 # that it passed on the basis of old run outputs.
765 cleanup()
766
767 # Link all source files for this test into a new directory in
768 # /tmp, and run the test in that directory. This makes it
769 # possible to run tests in parallel, without modification, that
770 # would otherwise (accidentally) write to the same output file.
771 # It also makes it easier to keep the testsuite clean.
772
773 for extra_file in files:
774 src = in_srcdir(extra_file)
775 if extra_file.startswith('..'):
776 # In case the extra_file is a file in an ancestor
777 # directory (e.g. extra_files(['../shell.hs'])), make
778 # sure it is copied to the test directory
779 # (testdir/shell.hs), instead of ending up somewhere
780 # else in the tree (testdir/../shell.hs)
781 filename = os.path.basename(extra_file)
782 else:
783 filename = extra_file
784 assert not '..' in filename # no funny stuff (foo/../../bar)
785 dst = in_testdir(filename)
786
787 if os.path.isfile(src):
788 dirname = os.path.dirname(dst)
789 if dirname:
790 mkdirp(dirname)
791 try:
792 link_or_copy_file(src, dst)
793 except OSError as e:
794 if e.errno == errno.EEXIST and os.path.isfile(dst):
795 # Some tests depend on files from ancestor
796 # directories (e.g. '../shell.hs'). It is
797 # possible such a file was already copied over
798 # for another test, since cleanup() doesn't
799 # delete them.
800 pass
801 else:
802 raise
803 elif os.path.isdir(src):
804 os.makedirs(dst)
805 lndir(src, dst)
806 else:
807 if not config.haddock and os.path.splitext(filename)[1] == '.t':
808 # When using a ghc built without haddock support, .t
809 # files are rightfully missing. Don't
810 # framework_fail. Test will be skipped later.
811 pass
812 else:
813 framework_fail(name, way,
814 'extra_file does not exist: ' + extra_file)
815
816 if not files:
817 # Always create the testdir, even when no files were copied
818 # (because user forgot to specify extra_files setup function), to
819 # prevent the confusing error: can't cd to <testdir>.
820 os.makedirs(opts.testdir)
821
822 if func.__name__ == 'run_command' or opts.pre_cmd:
823 # When running 'MAKE' make sure 'TOP' still points to the
824 # root of the testsuite.
825 src_makefile = in_srcdir('Makefile')
826 dst_makefile = in_testdir('Makefile')
827 if os.path.exists(src_makefile):
828 with open(src_makefile, 'r') as src:
829 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
830 with open(dst_makefile, 'w') as dst:
831 dst.write(makefile)
832
833 if config.use_threads:
834 t.lock.release()
835
836 try:
837 preCmd = getTestOpts().pre_cmd
838 if preCmd != None:
839 result = runCmdFor(name, 'cd "{opts.testdir}" && {preCmd}'.format(**locals()))
840 if result != 0:
841 framework_fail(name, way, 'pre-command failed: ' + str(result))
842 except:
843 framework_fail(name, way, 'pre-command exception')
844
845 try:
846 result = func(*[name,way] + args)
847 finally:
848 if config.use_threads:
849 t.lock.acquire()
850
851 if getTestOpts().expect != 'pass' and \
852 getTestOpts().expect != 'fail' and \
853 getTestOpts().expect != 'missing-lib':
854 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
855
856 try:
857 passFail = result['passFail']
858 except:
859 passFail = 'No passFail found'
860
861 if passFail == 'pass':
862 if _expect_pass(way):
863 t.n_expected_passes = t.n_expected_passes + 1
864 if name in t.expected_passes:
865 t.expected_passes[name].append(way)
866 else:
867 t.expected_passes[name] = [way]
868 else:
869 if_verbose(1, '*** unexpected pass for %s' % full_name)
870 t.n_unexpected_passes = t.n_unexpected_passes + 1
871 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
872 elif passFail == 'fail':
873 if _expect_pass(way):
874 reason = result['reason']
875 tag = result.get('tag')
876 if tag == 'stat':
877 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
878 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
879 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
880 else:
881 if_verbose(1, '*** unexpected failure for %s' % full_name)
882 t.n_unexpected_failures = t.n_unexpected_failures + 1
883 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
884 else:
885 if getTestOpts().expect == 'missing-lib':
886 t.n_missing_libs = t.n_missing_libs + 1
887 if name in t.missing_libs:
888 t.missing_libs[name].append(way)
889 else:
890 t.missing_libs[name] = [way]
891 else:
892 t.n_expected_failures = t.n_expected_failures + 1
893 if name in t.expected_failures:
894 t.expected_failures[name].append(way)
895 else:
896 t.expected_failures[name] = [way]
897 else:
898 framework_fail(name, way, 'bad result ' + passFail)
899 except KeyboardInterrupt:
900 stopNow()
901 except:
902 framework_fail(name, way, 'do_test exception')
903 traceback.print_exc()
904
905 def addPassingTestInfo (testInfos, directory, name, way):
906 directory = re.sub('^\\.[/\\\\]', '', directory)
907
908 if not directory in testInfos:
909 testInfos[directory] = {}
910
911 if not name in testInfos[directory]:
912 testInfos[directory][name] = []
913
914 testInfos[directory][name].append(way)
915
916 def addFailingTestInfo (testInfos, directory, name, reason, way):
917 directory = re.sub('^\\.[/\\\\]', '', directory)
918
919 if not directory in testInfos:
920 testInfos[directory] = {}
921
922 if not name in testInfos[directory]:
923 testInfos[directory][name] = {}
924
925 if not reason in testInfos[directory][name]:
926 testInfos[directory][name][reason] = []
927
928 testInfos[directory][name][reason].append(way)
929
930 def skiptest (name, way):
931 # print 'Skipping test \"', name, '\"'
932 t.n_tests_skipped = t.n_tests_skipped + 1
933 if name in t.tests_skipped:
934 t.tests_skipped[name].append(way)
935 else:
936 t.tests_skipped[name] = [way]
937
938 def framework_fail( name, way, reason ):
939 full_name = name + '(' + way + ')'
940 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
941 t.n_framework_failures = t.n_framework_failures + 1
942 if name in t.framework_failures:
943 t.framework_failures[name].append(way)
944 else:
945 t.framework_failures[name] = [way]
946
947 def badResult(result):
948 try:
949 if result['passFail'] == 'pass':
950 return False
951 return True
952 except:
953 return True
954
955 def passed():
956 return {'passFail': 'pass'}
957
958 def failBecause(reason, tag=None):
959 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
960
961 # -----------------------------------------------------------------------------
962 # Generic command tests
963
964 # A generic command test is expected to run and exit successfully.
965 #
966 # The expected exit code can be changed via exit_code() as normal, and
967 # the expected stdout/stderr are stored in <testname>.stdout and
968 # <testname>.stderr. The output of the command can be ignored
969 # altogether by using run_command_ignore_output instead of
970 # run_command.
971
972 def run_command( name, way, cmd ):
973 return simple_run( name, '', cmd, '' )
974
975 # -----------------------------------------------------------------------------
976 # GHCi tests
977
978 def ghci_script( name, way, script, override_flags = None ):
979 # filter out -fforce-recomp from compiler_always_flags, because we're
980 # actually testing the recompilation behaviour in the GHCi tests.
981 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
982
983 way_flags = ' '.join(config.way_flags(name)[way])
984
985 # We pass HC and HC_OPTS as environment variables, so that the
986 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
987 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
988 ).format(flags=flags, way_flags=way_flags)
989
990 getTestOpts().stdin = script
991 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
992
993 # -----------------------------------------------------------------------------
994 # Compile-only tests
995
996 def compile_override_default_flags(overrides):
997 def apply(name, way, extra_opts):
998 return do_compile(name, way, 0, '', [], extra_opts, overrides)
999
1000 return apply
1001
1002 def compile_fail_override_default_flags(overrides):
1003 def apply(name, way, extra_opts):
1004 return do_compile(name, way, 1, '', [], extra_opts, overrides)
1005
1006 return apply
1007
1008 def compile_without_flag(flag):
1009 def apply(name, way, extra_opts):
1010 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
1011 return compile_override_default_flags(overrides)(name, way, extra_opts)
1012
1013 return apply
1014
1015 def compile_fail_without_flag(flag):
1016 def apply(name, way, extra_opts):
1017 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
1018 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
1019
1020 return apply
1021
1022 def compile( name, way, extra_hc_opts ):
1023 return do_compile( name, way, 0, '', [], extra_hc_opts )
1024
1025 def compile_fail( name, way, extra_hc_opts ):
1026 return do_compile( name, way, 1, '', [], extra_hc_opts )
1027
1028 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1029 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1030
1031 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1032 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1033
1034 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1035 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1036
1037 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1038 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1039
1040 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1041 # print 'Compile only, extra args = ', extra_hc_opts
1042
1043 result = extras_build( way, extra_mods, extra_hc_opts )
1044 if badResult(result):
1045 return result
1046 extra_hc_opts = result['hc_opts']
1047
1048 force = 0
1049 if extra_mods:
1050 force = 1
1051 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1052
1053 if badResult(result):
1054 return result
1055
1056 # the actual stderr should always match the expected, regardless
1057 # of whether we expected the compilation to fail or not (successful
1058 # compilations may generate warnings).
1059
1060 expected_stderr_file = find_expected_file(name, 'stderr')
1061 actual_stderr_file = add_suffix(name, 'comp.stderr')
1062
1063 if not compare_outputs(way, 'stderr',
1064 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1065 normalise_errmsg),
1066 expected_stderr_file, actual_stderr_file,
1067 whitespace_normaliser=normalise_whitespace):
1068 return failBecause('stderr mismatch')
1069
1070 # no problems found, this test passed
1071 return passed()
1072
1073 def compile_cmp_asm( name, way, extra_hc_opts ):
1074 print('Compile only, extra args = ', extra_hc_opts)
1075 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1076
1077 if badResult(result):
1078 return result
1079
1080 # the actual stderr should always match the expected, regardless
1081 # of whether we expected the compilation to fail or not (successful
1082 # compilations may generate warnings).
1083
1084 expected_asm_file = find_expected_file(name, 'asm')
1085 actual_asm_file = add_suffix(name, 's')
1086
1087 if not compare_outputs(way, 'asm',
1088 join_normalisers(normalise_errmsg, normalise_asm),
1089 expected_asm_file, actual_asm_file):
1090 return failBecause('asm mismatch')
1091
1092 # no problems found, this test passed
1093 return passed()
1094
1095 # -----------------------------------------------------------------------------
1096 # Compile-and-run tests
1097
1098 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1099 # print 'Compile and run, extra args = ', extra_hc_opts
1100
1101 result = extras_build( way, extra_mods, extra_hc_opts )
1102 if badResult(result):
1103 return result
1104 extra_hc_opts = result['hc_opts']
1105
1106 if way.startswith('ghci'): # interpreted...
1107 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1108 else: # compiled...
1109 force = 0
1110 if extra_mods:
1111 force = 1
1112
1113 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1114 if badResult(result):
1115 return result
1116
1117 cmd = './' + name;
1118
1119 # we don't check the compiler's stderr for a compile-and-run test
1120 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1121
1122 def compile_and_run( name, way, extra_hc_opts ):
1123 return compile_and_run__( name, way, '', [], extra_hc_opts)
1124
1125 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1126 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1127
1128 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1129 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1130
1131 def stats( name, way, stats_file ):
1132 opts = getTestOpts()
1133 return checkStats(name, way, stats_file, opts.stats_range_fields)
1134
1135 # -----------------------------------------------------------------------------
1136 # Check -t stats info
1137
1138 def checkStats(name, way, stats_file, range_fields):
1139 full_name = name + '(' + way + ')'
1140
1141 result = passed()
1142 if len(range_fields) > 0:
1143 try:
1144 f = open(in_testdir(stats_file))
1145 except IOError as e:
1146 return failBecause(str(e))
1147 contents = f.read()
1148 f.close()
1149
1150 for (field, (expected, dev)) in range_fields.items():
1151 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1152 if m == None:
1153 print('Failed to find field: ', field)
1154 result = failBecause('no such stats field')
1155 val = int(m.group(1))
1156
1157 lowerBound = trunc( expected * ((100 - float(dev))/100))
1158 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1159
1160 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1161
1162 if val < lowerBound:
1163 print(field, 'value is too low:')
1164 print('(If this is because you have improved GHC, please')
1165 print('update the test so that GHC doesn\'t regress again)')
1166 result = failBecause('stat too good', tag='stat')
1167 if val > upperBound:
1168 print(field, 'value is too high:')
1169 result = failBecause('stat not good enough', tag='stat')
1170
1171 if val < lowerBound or val > upperBound or config.verbose >= 4:
1172 valStr = str(val)
1173 valLen = len(valStr)
1174 expectedStr = str(expected)
1175 expectedLen = len(expectedStr)
1176 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1177
1178 def display(descr, val, extra):
1179 print(descr, str(val).rjust(length), extra)
1180
1181 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1182 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1183 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1184 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1185 if val != expected:
1186 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1187
1188 return result
1189
1190 # -----------------------------------------------------------------------------
1191 # Build a single-module program
1192
1193 def extras_build( way, extra_mods, extra_hc_opts ):
1194 for modopts in extra_mods:
1195 mod, opts = modopts
1196 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1197 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1198 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1199 if badResult(result):
1200 return result
1201
1202 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1203
1204
1205 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1206 opts = getTestOpts()
1207 errname = add_suffix(name, 'comp.stderr')
1208
1209 if top_mod != '':
1210 srcname = top_mod
1211 base, suf = os.path.splitext(top_mod)
1212 elif addsuf:
1213 srcname = add_hs_lhs_suffix(name)
1214 else:
1215 srcname = name
1216
1217 to_do = ''
1218 if top_mod != '':
1219 to_do = '--make '
1220 if link:
1221 to_do = to_do + '-o ' + name
1222 elif link:
1223 to_do = '-o ' + name
1224 elif opts.compile_to_hc:
1225 to_do = '-C'
1226 else:
1227 to_do = '-c' # just compile
1228
1229 stats_file = name + '.comp.stats'
1230 if len(opts.compiler_stats_range_fields) > 0:
1231 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1232
1233 # Required by GHC 7.3+, harmless for earlier versions:
1234 if (getTestOpts().c_src or
1235 getTestOpts().objc_src or
1236 getTestOpts().objcpp_src or
1237 getTestOpts().cmm_src):
1238 extra_hc_opts += ' -no-hs-main '
1239
1240 if getTestOpts().compile_cmd_prefix == '':
1241 cmd_prefix = ''
1242 else:
1243 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1244
1245 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1246 config.way_flags(name)[way])
1247
1248 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1249 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1250 '> {errname} 2>&1'
1251 ).format(**locals())
1252
1253 result = runCmdFor(name, cmd, timeout_multiplier=opts.compile_timeout_multiplier)
1254
1255 if result != 0 and not should_fail:
1256 if config.verbose >= 1 and _expect_pass(way):
1257 print('Compile failed (status ' + repr(result) + ') errors were:')
1258 actual_stderr_path = in_testdir(name, 'comp.stderr')
1259 if_verbose_dump(1, actual_stderr_path)
1260
1261 # ToDo: if the sub-shell was killed by ^C, then exit
1262
1263 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1264
1265 if badResult(statsResult):
1266 return statsResult
1267
1268 if should_fail:
1269 if result == 0:
1270 return failBecause('exit code 0')
1271 else:
1272 if result != 0:
1273 return failBecause('exit code non-0')
1274
1275 return passed()
1276
1277 # -----------------------------------------------------------------------------
1278 # Run a program and check its output
1279 #
1280 # If testname.stdin exists, route input from that, else
1281 # from /dev/null. Route output to testname.run.stdout and
1282 # testname.run.stderr. Returns the exit code of the run.
1283
1284 def simple_run(name, way, prog, extra_run_opts):
1285 opts = getTestOpts()
1286
1287 # figure out what to use for stdin
1288 if opts.stdin != '':
1289 use_stdin = opts.stdin
1290 else:
1291 stdin_file = add_suffix(name, 'stdin')
1292 if os.path.exists(in_srcdir(stdin_file)):
1293 use_stdin = stdin_file
1294 else:
1295 use_stdin = '/dev/null'
1296
1297 run_stdout = add_suffix(name,'run.stdout')
1298 run_stderr = add_suffix(name,'run.stderr')
1299
1300 my_rts_flags = rts_flags(way)
1301
1302 stats_file = name + '.stats'
1303 if len(opts.stats_range_fields) > 0:
1304 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1305 else:
1306 stats_args = ''
1307
1308 if opts.no_stdin:
1309 stdin_comes_from = ''
1310 else:
1311 stdin_comes_from = ' <' + use_stdin
1312
1313 if opts.combined_output:
1314 redirection = ' > {0} 2>&1'.format(run_stdout)
1315 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1316 else:
1317 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1318 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1319
1320 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1321 cmd = prog + stats_args + ' ' \
1322 + my_rts_flags + ' ' \
1323 + extra_run_opts + ' ' \
1324 + stdin_comes_from \
1325 + redirection
1326
1327 if opts.cmd_wrapper != None:
1328 cmd = opts.cmd_wrapper(cmd) + redirection_append
1329
1330 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1331
1332 # run the command
1333 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1334
1335 exit_code = result >> 8
1336 signal = result & 0xff
1337
1338 # check the exit code
1339 if exit_code != opts.exit_code:
1340 if config.verbose >= 1 and _expect_pass(way):
1341 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1342 dump_stdout(name)
1343 dump_stderr(name)
1344 return failBecause('bad exit code')
1345
1346 check_hp = my_rts_flags.find("-h") != -1
1347 check_prof = my_rts_flags.find("-p") != -1
1348
1349 if not opts.ignore_output:
1350 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1351 bad_stdout = not check_stdout_ok(name, way)
1352 if bad_stderr:
1353 return failBecause('bad stderr')
1354 if bad_stdout:
1355 return failBecause('bad stdout')
1356 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1357 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1358 return failBecause('bad heap profile')
1359 if check_prof and not check_prof_ok(name, way):
1360 return failBecause('bad profile')
1361
1362 return checkStats(name, way, stats_file, opts.stats_range_fields)
1363
1364 def rts_flags(way):
1365 if (way == ''):
1366 return ''
1367 else:
1368 args = config.way_rts_flags[way]
1369
1370 if args == []:
1371 return ''
1372 else:
1373 return '+RTS ' + ' '.join(args) + ' -RTS'
1374
1375 # -----------------------------------------------------------------------------
1376 # Run a program in the interpreter and check its output
1377
1378 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1379 opts = getTestOpts()
1380
1381 outname = add_suffix(name, 'interp.stdout')
1382 errname = add_suffix(name, 'interp.stderr')
1383
1384 if (top_mod == ''):
1385 srcname = add_hs_lhs_suffix(name)
1386 else:
1387 srcname = top_mod
1388
1389 scriptname = add_suffix(name, 'genscript')
1390 qscriptname = in_testdir(scriptname)
1391
1392 delimiter = '===== program output begins here\n'
1393
1394 script = open(qscriptname, 'w')
1395 if not compile_only:
1396 # set the prog name and command-line args to match the compiled
1397 # environment.
1398 script.write(':set prog ' + name + '\n')
1399 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1400 # Add marker lines to the stdout and stderr output files, so we
1401 # can separate GHCi's output from the program's.
1402 script.write(':! echo ' + delimiter)
1403 script.write(':! echo 1>&2 ' + delimiter)
1404 # Set stdout to be line-buffered to match the compiled environment.
1405 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1406 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1407 # in the event of an exception as for the compiled program.
1408 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1409 script.close()
1410
1411 # figure out what to use for stdin
1412 if getTestOpts().stdin != '':
1413 stdin_file = in_srcdir(opts.stdin)
1414 else:
1415 stdin_file = in_srcdir(name, 'stdin')
1416
1417 if os.path.exists(stdin_file):
1418 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1419
1420 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1421 config.way_flags(name)[way])
1422
1423 if getTestOpts().combined_output:
1424 redirection = ' > {0} 2>&1'.format(outname)
1425 redirection_append = ' >> {0} 2>&1'.format(outname)
1426 else:
1427 redirection = ' > {0} 2> {1}'.format(outname, errname)
1428 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1429
1430 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1431 '< {scriptname} {redirection}'
1432 ).format(**locals())
1433
1434 if getTestOpts().cmd_wrapper != None:
1435 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1436
1437 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1438
1439 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1440
1441 exit_code = result >> 8
1442 signal = result & 0xff
1443
1444 # split the stdout into compilation/program output
1445 split_file(in_testdir(outname), delimiter,
1446 in_testdir(name, 'comp.stdout'),
1447 in_testdir(name, 'run.stdout'))
1448 split_file(in_testdir(errname), delimiter,
1449 in_testdir(name, 'comp.stderr'),
1450 in_testdir(name, 'run.stderr'))
1451
1452 # check the exit code
1453 if exit_code != getTestOpts().exit_code:
1454 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1455 dump_stdout(name)
1456 dump_stderr(name)
1457 return failBecause('bad exit code')
1458
1459 # ToDo: if the sub-shell was killed by ^C, then exit
1460
1461 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1462 check_stdout_ok(name, way)):
1463 return passed()
1464 else:
1465 return failBecause('bad stdout or stderr')
1466
1467
1468 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1469 infile = open(in_fn)
1470 out1 = open(out1_fn, 'w')
1471 out2 = open(out2_fn, 'w')
1472
1473 line = infile.readline()
1474 line = re.sub('\r', '', line) # ignore Windows EOL
1475 while (re.sub('^\s*','',line) != delimiter and line != ''):
1476 out1.write(line)
1477 line = infile.readline()
1478 line = re.sub('\r', '', line)
1479 out1.close()
1480
1481 line = infile.readline()
1482 while (line != ''):
1483 out2.write(line)
1484 line = infile.readline()
1485 out2.close()
1486
1487 # -----------------------------------------------------------------------------
1488 # Utils
1489 def get_compiler_flags(override_flags, noforce):
1490 opts = getTestOpts()
1491
1492 if override_flags is not None:
1493 flags = copy.copy(override_flags)
1494 else:
1495 flags = copy.copy(opts.compiler_always_flags)
1496
1497 if noforce:
1498 flags = [f for f in flags if f != '-fforce-recomp']
1499
1500 flags.append(opts.extra_hc_opts)
1501
1502 if opts.outputdir != None:
1503 flags.extend(["-outputdir", opts.outputdir])
1504
1505 return flags
1506
1507 def check_stdout_ok(name, way):
1508 actual_stdout_file = add_suffix(name, 'run.stdout')
1509 expected_stdout_file = find_expected_file(name, 'stdout')
1510
1511 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1512
1513 check_stdout = getTestOpts().check_stdout
1514 if check_stdout:
1515 actual_stdout_path = in_testdir(actual_stdout_file)
1516 return check_stdout(actual_stdout_path, extra_norm)
1517
1518 return compare_outputs(way, 'stdout', extra_norm,
1519 expected_stdout_file, actual_stdout_file)
1520
1521 def dump_stdout( name ):
1522 print('Stdout:')
1523 print(read_no_crs(in_testdir(name, 'run.stdout')))
1524
1525 def check_stderr_ok(name, way):
1526 actual_stderr_file = add_suffix(name, 'run.stderr')
1527 expected_stderr_file = find_expected_file(name, 'stderr')
1528
1529 return compare_outputs(way, 'stderr',
1530 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1531 expected_stderr_file, actual_stderr_file,
1532 whitespace_normaliser=normalise_whitespace)
1533
1534 def dump_stderr( name ):
1535 print("Stderr:")
1536 print(read_no_crs(in_testdir(name, 'run.stderr')))
1537
1538 def read_no_crs(file):
1539 str = ''
1540 try:
1541 h = open(file)
1542 str = h.read()
1543 h.close
1544 except:
1545 # On Windows, if the program fails very early, it seems the
1546 # files stdout/stderr are redirected to may not get created
1547 pass
1548 return re.sub('\r', '', str)
1549
1550 def write_file(file, str):
1551 h = open(file, 'w')
1552 h.write(str)
1553 h.close
1554
1555 def check_hp_ok(name):
1556 opts = getTestOpts()
1557
1558 # do not qualify for hp2ps because we should be in the right directory
1559 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1560
1561 hp2psResult = runCmdExitCode(hp2psCmd)
1562
1563 actual_ps_path = in_testdir(name, 'ps')
1564
1565 if(hp2psResult == 0):
1566 if (os.path.exists(actual_ps_path)):
1567 if gs_working:
1568 gsResult = runCmdExitCode(genGSCmd(actual_ps_path))
1569 if (gsResult == 0):
1570 return (True)
1571 else:
1572 print("hp2ps output for " + name + "is not valid PostScript")
1573 else: return (True) # assume postscript is valid without ghostscript
1574 else:
1575 print("hp2ps did not generate PostScript for " + name)
1576 return (False)
1577 else:
1578 print("hp2ps error when processing heap profile for " + name)
1579 return(False)
1580
1581 def check_prof_ok(name, way):
1582 expected_prof_file = find_expected_file(name, 'prof.sample')
1583 expected_prof_path = in_testdir(expected_prof_file)
1584
1585 # Check actual prof file only if we have an expected prof file to
1586 # compare it with.
1587 if not os.path.exists(expected_prof_path):
1588 return True
1589
1590 actual_prof_file = add_suffix(name, 'prof')
1591 actual_prof_path = in_testdir(actual_prof_file)
1592
1593 if not os.path.exists(actual_prof_path):
1594 print(actual_prof_path + " does not exist")
1595 return(False)
1596
1597 if os.path.getsize(actual_prof_path) == 0:
1598 print(actual_prof_path + " is empty")
1599 return(False)
1600
1601 return compare_outputs(way, 'prof', normalise_prof,
1602 expected_prof_file, actual_prof_file,
1603 whitespace_normaliser=normalise_whitespace)
1604
1605 # Compare expected output to actual output, and optionally accept the
1606 # new output. Returns true if output matched or was accepted, false
1607 # otherwise. See Note [Output comparison] for the meaning of the
1608 # normaliser and whitespace_normaliser parameters.
1609 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1610 whitespace_normaliser=lambda x:x):
1611
1612 expected_path = in_srcdir(expected_file)
1613 actual_path = in_testdir(actual_file)
1614
1615 if os.path.exists(expected_path):
1616 expected_str = normaliser(read_no_crs(expected_path))
1617 # Create the .normalised file in the testdir, not in the srcdir.
1618 expected_normalised_file = add_suffix(expected_file, 'normalised')
1619 expected_normalised_path = in_testdir(expected_normalised_file)
1620 else:
1621 expected_str = ''
1622 expected_normalised_path = '/dev/null'
1623
1624 actual_raw = read_no_crs(actual_path)
1625 actual_str = normaliser(actual_raw)
1626
1627 # See Note [Output comparison].
1628 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1629 return 1
1630 else:
1631 if config.verbose >= 1 and _expect_pass(way):
1632 print('Actual ' + kind + ' output differs from expected:')
1633
1634 if expected_normalised_path != '/dev/null':
1635 write_file(expected_normalised_path, expected_str)
1636
1637 actual_normalised_path = add_suffix(actual_path, 'normalised')
1638 write_file(actual_normalised_path, actual_str)
1639
1640 if config.verbose >= 1 and _expect_pass(way):
1641 # See Note [Output comparison].
1642 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1643 actual_normalised_path))
1644
1645 # If for some reason there were no non-whitespace differences,
1646 # then do a full diff
1647 if r == 0:
1648 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1649 actual_normalised_path))
1650
1651 if config.accept and (getTestOpts().expect == 'fail' or
1652 way in getTestOpts().expect_fail_for):
1653 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1654 return 0
1655 elif config.accept and actual_raw:
1656 if_verbose(1, 'Accepting new output.')
1657 write_file(expected_path, actual_raw)
1658 return 1
1659 elif config.accept:
1660 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1661 os.remove(expected_path)
1662 return 1
1663 else:
1664 return 0
1665
1666 # Note [Output comparison]
1667 #
1668 # We do two types of output comparison:
1669 #
1670 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1671 # optional `whitespace_normaliser` to the expected and the actual
1672 # output, before comparing the two.
1673 #
1674 # 2. To show as a diff to the user when the test indeed failed. We apply
1675 # the same `normaliser` function to the outputs, to make the diff as
1676 # small as possible (only showing the actual problem). But we don't
1677 # apply the `whitespace_normaliser` here, because it might completely
1678 # squash all whitespace, making the diff unreadable. Instead we rely
1679 # on the `diff` program to ignore whitespace changes as much as
1680 # possible (#10152).
1681
1682 def normalise_whitespace( str ):
1683 # Merge contiguous whitespace characters into a single space.
1684 return ' '.join(w for w in str.split())
1685
1686 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1687
1688 def normalise_callstacks(s):
1689 opts = getTestOpts()
1690 def repl(matches):
1691 location = matches.group(1)
1692 location = normalise_slashes_(location)
1693 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1694 # Ignore line number differences in call stacks (#10834).
1695 s = re.sub(callSite_re, repl, s)
1696 # Ignore the change in how we identify implicit call-stacks
1697 s = s.replace('from ImplicitParams', 'from HasCallStack')
1698 if not opts.keep_prof_callstacks:
1699 # Don't output prof callstacks. Test output should be
1700 # independent from the WAY we run the test.
1701 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1702 return s
1703
1704 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1705
1706 def normalise_type_reps(str):
1707 """ Normalise out fingerprints from Typeable TyCon representations """
1708 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1709
1710 def normalise_errmsg( str ):
1711 """Normalise error-messages emitted via stderr"""
1712 # IBM AIX's `ld` is a bit chatty
1713 if opsys('aix'):
1714 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1715 # remove " error:" and lower-case " Warning:" to make patch for
1716 # trac issue #10021 smaller
1717 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1718 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1719 str = normalise_callstacks(str)
1720 str = normalise_type_reps(str)
1721
1722 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1723 # the colon is there because it appears in error messages; this
1724 # hacky solution is used in place of more sophisticated filename
1725 # mangling
1726 str = re.sub('([^\\s])\\.exe', '\\1', str)
1727 # normalise slashes, minimise Windows/Unix filename differences
1728 str = re.sub('\\\\', '/', str)
1729 # The inplace ghc's are called ghc-stage[123] to avoid filename
1730 # collisions, so we need to normalise that to just "ghc"
1731 str = re.sub('ghc-stage[123]', 'ghc', str)
1732 # Error messages simetimes contain integer implementation package
1733 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1734 # Also filter out bullet characters. This is because bullets are used to
1735 # separate error sections, and tests shouldn't be sensitive to how the
1736 # the division happens.
1737 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1738 str = str.replace(bullet, '')
1739 return str
1740
1741 # normalise a .prof file, so that we can reasonably compare it against
1742 # a sample. This doesn't compare any of the actual profiling data,
1743 # only the shape of the profile and the number of entries.
1744 def normalise_prof (str):
1745 # strip everything up to the line beginning "COST CENTRE"
1746 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1747
1748 # strip results for CAFs, these tend to change unpredictably
1749 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1750
1751 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1752 # sometimes under MAIN.
1753 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1754
1755 # We have somthing like this:
1756 #
1757 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1758 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1759 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1760 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1761 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1762 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1763 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1764 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1765 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1766 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1767 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1768 #
1769 # then we remove all the specific profiling data, leaving only the cost
1770 # centre name, module, src, and entries, to end up with this: (modulo
1771 # whitespace between columns)
1772 #
1773 # MAIN MAIN <built-in> 0
1774 # readPrec Main Main_1.hs:7:13-16 1
1775 # readPrec Main Main_1.hs:4:13-16 1
1776 # == Main Main_1.hs:7:25-26 1
1777 # == Main Main_1.hs:4:25-26 1
1778 # showsPrec Main Main_1.hs:7:19-22 2
1779 # showsPrec Main Main_1.hs:4:19-22 2
1780 # readPrec Main Main_1.hs:7:13-16 0
1781 # readPrec Main Main_1.hs:4:13-16 0
1782
1783 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1784 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1785 # this works fine.
1786 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1787 '\\1 \\2 \\3 \\5\n', str)
1788 return str
1789
1790 def normalise_slashes_( str ):
1791 str = re.sub('\\\\', '/', str)
1792 return str
1793
1794 def normalise_exe_( str ):
1795 str = re.sub('\.exe', '', str)
1796 return str
1797
1798 def normalise_output( str ):
1799 # remove " error:" and lower-case " Warning:" to make patch for
1800 # trac issue #10021 smaller
1801 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1802 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1803 # Remove a .exe extension (for Windows)
1804 # This can occur in error messages generated by the program.
1805 str = re.sub('([^\\s])\\.exe', '\\1', str)
1806 str = normalise_callstacks(str)
1807 str = normalise_type_reps(str)
1808 return str
1809
1810 def normalise_asm( str ):
1811 lines = str.split('\n')
1812 # Only keep instructions and labels not starting with a dot.
1813 metadata = re.compile('^[ \t]*\\..*$')
1814 out = []
1815 for line in lines:
1816 # Drop metadata directives (e.g. ".type")
1817 if not metadata.match(line):
1818 line = re.sub('@plt', '', line)
1819 instr = line.lstrip().split()
1820 # Drop empty lines.
1821 if not instr:
1822 continue
1823 # Drop operands, except for call instructions.
1824 elif instr[0] == 'call':
1825 out.append(instr[0] + ' ' + instr[1])
1826 else:
1827 out.append(instr[0])
1828 out = '\n'.join(out)
1829 return out
1830
1831 def if_verbose( n, s ):
1832 if config.verbose >= n:
1833 print(s)
1834
1835 def if_verbose_dump( n, f ):
1836 if config.verbose >= n:
1837 try:
1838 print(open(f).read())
1839 except:
1840 print('')
1841
1842 def rawSystem(cmd_and_args):
1843 # We prefer subprocess.call to os.spawnv as the latter
1844 # seems to send its arguments through a shell or something
1845 # with the Windows (non-cygwin) python. An argument "a b c"
1846 # turns into three arguments ["a", "b", "c"].
1847
1848 cmd = cmd_and_args[0]
1849 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1850
1851 # Note that this doesn't handle the timeout itself; it is just used for
1852 # commands that have timeout handling built-in.
1853 def rawSystemWithTimeout(cmd_and_args):
1854 r = rawSystem(cmd_and_args)
1855 if r == 98:
1856 # The python timeout program uses 98 to signal that ^C was pressed
1857 stopNow()
1858 if r == 99 and getTestOpts().exit_code != 99:
1859 # Only print a message when timeout killed the process unexpectedly.
1860 cmd = cmd_and_args[-1]
1861 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1862 return r
1863
1864 # cmd is a complex command in Bourne-shell syntax
1865 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1866 # Hence it must ultimately be run by a Bourne shell
1867 #
1868 # Mostly it invokes the command wrapped in 'timeout' thus
1869 # timeout 300 'cd . && ...blah blah'
1870 # so it's timeout's job to invoke the Bourne shell
1871 #
1872 # But watch out for the case when there is no timeout program!
1873 # Then, when using the native Python, os.system will invoke the cmd shell
1874
1875 def runCmd( cmd ):
1876 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1877 cmd = cmd.format(**config.__dict__)
1878
1879 if_verbose( 3, cmd )
1880 r = 0
1881 if config.os == 'mingw32':
1882 # On MinGW, we will always have timeout
1883 assert config.timeout_prog!=''
1884
1885 if config.timeout_prog != '':
1886 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1887 else:
1888 r = os.system(cmd)
1889 return r << 8
1890
1891 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1892 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1893 cmd = cmd.format(**config.__dict__)
1894
1895 if_verbose( 3, cmd )
1896 r = 0
1897 if config.os == 'mingw32':
1898 # On MinGW, we will always have timeout
1899 assert config.timeout_prog!=''
1900 timeout = int(ceil(config.timeout * timeout_multiplier))
1901
1902 if config.timeout_prog != '':
1903 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1904 else:
1905 r = os.system(cmd)
1906 return r << 8
1907
1908 def runCmdExitCode( cmd ):
1909 return (runCmd(cmd) >> 8);
1910
1911 # -----------------------------------------------------------------------------
1912 # checking if ghostscript is available for checking the output of hp2ps
1913
1914 def genGSCmd(psfile):
1915 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1916
1917 def gsNotWorking():
1918 global gs_working
1919 print("GhostScript not available for hp2ps tests")
1920
1921 global gs_working
1922 gs_working = 0
1923 if config.have_profiling:
1924 if config.gs != '':
1925 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
1926 if resultGood == 0:
1927 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
1928 ' >/dev/null 2>&1')
1929 if resultBad != 0:
1930 print("GhostScript available for hp2ps tests")
1931 gs_working = 1;
1932 else:
1933 gsNotWorking();
1934 else:
1935 gsNotWorking();
1936 else:
1937 gsNotWorking();
1938
1939 def add_suffix( name, suffix ):
1940 if suffix == '':
1941 return name
1942 else:
1943 return name + '.' + suffix
1944
1945 def add_hs_lhs_suffix(name):
1946 if getTestOpts().c_src:
1947 return add_suffix(name, 'c')
1948 elif getTestOpts().cmm_src:
1949 return add_suffix(name, 'cmm')
1950 elif getTestOpts().objc_src:
1951 return add_suffix(name, 'm')
1952 elif getTestOpts().objcpp_src:
1953 return add_suffix(name, 'mm')
1954 elif getTestOpts().literate:
1955 return add_suffix(name, 'lhs')
1956 else:
1957 return add_suffix(name, 'hs')
1958
1959 def replace_suffix( name, suffix ):
1960 base, suf = os.path.splitext(name)
1961 return base + '.' + suffix
1962
1963 def in_testdir(name, suffix=''):
1964 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1965
1966 def in_srcdir(name, suffix=''):
1967 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1968
1969 # Finding the sample output. The filename is of the form
1970 #
1971 # <test>.stdout[-ws-<wordsize>][-<platform>]
1972 #
1973 def find_expected_file(name, suff):
1974 basename = add_suffix(name, suff)
1975
1976 files = [basename + ws + plat
1977 for plat in ['-' + config.platform, '-' + config.os, '']
1978 for ws in ['-ws-' + config.wordsize, '']]
1979
1980 for f in files:
1981 if os.path.exists(in_srcdir(f)):
1982 return f
1983
1984 return basename
1985
1986 def cleanup():
1987 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
1988
1989
1990 # -----------------------------------------------------------------------------
1991 # Return a list of all the files ending in '.T' below directories roots.
1992
1993 def findTFiles(roots):
1994 # It would be better to use os.walk, but that
1995 # gives backslashes on Windows, which trip the
1996 # testsuite later :-(
1997 return [filename for root in roots for filename in findTFiles_(root)]
1998
1999 def findTFiles_(path):
2000 if os.path.isdir(path):
2001 paths = [os.path.join(path, x) for x in os.listdir(path)]
2002 return findTFiles(paths)
2003 elif path[-2:] == '.T':
2004 return [path]
2005 else:
2006 return []
2007
2008 # -----------------------------------------------------------------------------
2009 # Output a test summary to the specified file object
2010
2011 def summary(t, file, short=False):
2012
2013 file.write('\n')
2014 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2015
2016 if short:
2017 # Only print the list of unexpected tests above.
2018 return
2019
2020 file.write('OVERALL SUMMARY for test run started at '
2021 + time.strftime("%c %Z", t.start_time) + '\n'
2022 + str(datetime.timedelta(seconds=
2023 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2024 + ' spent to go through\n'
2025 + repr(t.total_tests).rjust(8)
2026 + ' total tests, which gave rise to\n'
2027 + repr(t.total_test_cases).rjust(8)
2028 + ' test cases, of which\n'
2029 + repr(t.n_tests_skipped).rjust(8)
2030 + ' were skipped\n'
2031 + '\n'
2032 + repr(t.n_missing_libs).rjust(8)
2033 + ' had missing libraries\n'
2034 + repr(t.n_expected_passes).rjust(8)
2035 + ' expected passes\n'
2036 + repr(t.n_expected_failures).rjust(8)
2037 + ' expected failures\n'
2038 + '\n'
2039 + repr(t.n_framework_failures).rjust(8)
2040 + ' caused framework failures\n'
2041 + repr(t.n_unexpected_passes).rjust(8)
2042 + ' unexpected passes\n'
2043 + repr(t.n_unexpected_failures).rjust(8)
2044 + ' unexpected failures\n'
2045 + repr(t.n_unexpected_stat_failures).rjust(8)
2046 + ' unexpected stat failures\n'
2047 + '\n')
2048
2049 if t.n_unexpected_passes > 0:
2050 file.write('Unexpected passes:\n')
2051 printPassingTestInfosSummary(file, t.unexpected_passes)
2052
2053 if t.n_unexpected_failures > 0:
2054 file.write('Unexpected failures:\n')
2055 printFailingTestInfosSummary(file, t.unexpected_failures)
2056
2057 if t.n_unexpected_stat_failures > 0:
2058 file.write('Unexpected stat failures:\n')
2059 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2060
2061 if t.n_framework_failures > 0:
2062 file.write('Test framework failures:\n')
2063 printFrameworkFailureSummary(file, t.framework_failures)
2064
2065 if stopping():
2066 file.write('WARNING: Testsuite run was terminated early\n')
2067
2068 def printUnexpectedTests(file, testInfoss):
2069 unexpected = []
2070 for testInfos in testInfoss:
2071 directories = testInfos.keys()
2072 for directory in directories:
2073 tests = list(testInfos[directory].keys())
2074 unexpected += tests
2075 if unexpected != []:
2076 file.write('Unexpected results from:\n')
2077 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2078 file.write('\n')
2079
2080 def printPassingTestInfosSummary(file, testInfos):
2081 directories = list(testInfos.keys())
2082 directories.sort()
2083 maxDirLen = max(len(x) for x in directories)
2084 for directory in directories:
2085 tests = list(testInfos[directory].keys())
2086 tests.sort()
2087 for test in tests:
2088 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2089 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2090 file.write('\n')
2091
2092 def printFailingTestInfosSummary(file, testInfos):
2093 directories = list(testInfos.keys())
2094 directories.sort()
2095 maxDirLen = max(len(d) for d in directories)
2096 for directory in directories:
2097 tests = list(testInfos[directory].keys())
2098 tests.sort()
2099 for test in tests:
2100 reasons = testInfos[directory][test].keys()
2101 for reason in reasons:
2102 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2103 ' [' + reason + ']' + \
2104 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2105 file.write('\n')
2106
2107 def printFrameworkFailureSummary(file, testInfos):
2108 names = list(testInfos.keys())
2109 names.sort()
2110 maxNameLen = max(len(n) for n in names)
2111 for name in names:
2112 ways = testInfos[name]
2113 file.write(' ' + name.ljust(maxNameLen + 2) + \
2114 ' (' + ','.join(ways) + ')\n')
2115 file.write('\n')
2116
2117 def modify_lines(s, f):
2118 s = '\n'.join([f(l) for l in s.splitlines()])
2119 if s and s[-1] != '\n':
2120 # Prevent '\ No newline at end of file' warnings when diffing.
2121 s += '\n'
2122 return s