f6db8288faffc846afbec2352c264907d2dee9b4
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import shutil
9 import sys
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 from math import ceil, trunc
20 import collections
21 import subprocess
22
23 from testglobals import *
24 from testutil import *
25 from extra_files import extra_src_files
26
27 try:
28 basestring
29 except: # Python 3
30 basestring = (str,bytes)
31
32 if config.use_threads:
33 import threading
34 try:
35 import thread
36 except ImportError: # Python 3
37 import _thread as thread
38
39 global wantToStop
40 wantToStop = False
41 def stopNow():
42 global wantToStop
43 wantToStop = True
44 def stopping():
45 return wantToStop
46
47 # Options valid for the current test only (these get reset to
48 # testdir_testopts after each test).
49
50 global testopts_local
51 if config.use_threads:
52 testopts_local = threading.local()
53 else:
54 class TestOpts_Local:
55 pass
56 testopts_local = TestOpts_Local()
57
58 def getTestOpts():
59 return testopts_local.x
60
61 def setLocalTestOpts(opts):
62 global testopts_local
63 testopts_local.x=opts
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86
87 def normal( name, opts ):
88 return;
89
90 def skip( name, opts ):
91 opts.skip = 1
92
93 def expect_fail( name, opts ):
94 # The compiler, testdriver, OS or platform is missing a certain
95 # feature, and we don't plan to or can't fix it now or in the
96 # future.
97 opts.expect = 'fail';
98
99 def reqlib( lib ):
100 return lambda name, opts, l=lib: _reqlib (name, opts, l )
101
102 # Cache the results of looking to see if we have a library or not.
103 # This makes quite a difference, especially on Windows.
104 have_lib = {}
105
106 def _reqlib( name, opts, lib ):
107 if lib in have_lib:
108 got_it = have_lib[lib]
109 else:
110 cmd = strip_quotes(config.ghc_pkg)
111 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
112 stdout=subprocess.PIPE,
113 stderr=subprocess.PIPE)
114 # read from stdout and stderr to avoid blocking due to
115 # buffers filling
116 p.communicate()
117 r = p.wait()
118 got_it = r == 0
119 have_lib[lib] = got_it
120
121 if not got_it:
122 opts.expect = 'missing-lib'
123
124 def req_haddock( name, opts ):
125 if not config.haddock:
126 opts.expect = 'missing-lib'
127
128 def req_profiling( name, opts ):
129 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
130 if not config.have_profiling:
131 opts.expect = 'fail'
132
133 def req_shared_libs( name, opts ):
134 if not config.have_shared_libs:
135 opts.expect = 'fail'
136
137 def req_interp( name, opts ):
138 if not config.have_interp:
139 opts.expect = 'fail'
140
141 def req_smp( name, opts ):
142 if not config.have_smp:
143 opts.expect = 'fail'
144
145 def ignore_output( name, opts ):
146 opts.ignore_output = 1
147
148 def no_stdin( name, opts ):
149 opts.no_stdin = 1
150
151 def combined_output( name, opts ):
152 opts.combined_output = True
153
154 # -----
155
156 def expect_fail_for( ways ):
157 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
158
159 def _expect_fail_for( name, opts, ways ):
160 opts.expect_fail_for = ways
161
162 def expect_broken( bug ):
163 # This test is a expected not to work due to the indicated trac bug
164 # number.
165 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
166
167 def _expect_broken( name, opts, bug ):
168 record_broken(name, opts, bug)
169 opts.expect = 'fail';
170
171 def expect_broken_for( bug, ways ):
172 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
173
174 def _expect_broken_for( name, opts, bug, ways ):
175 record_broken(name, opts, bug)
176 opts.expect_fail_for = ways
177
178 def record_broken(name, opts, bug):
179 global brokens
180 me = (bug, opts.testdir, name)
181 if not me in brokens:
182 brokens.append(me)
183
184 def _expect_pass(way):
185 # Helper function. Not intended for use in .T files.
186 opts = getTestOpts()
187 return opts.expect == 'pass' and way not in opts.expect_fail_for
188
189 # -----
190
191 def omit_ways( ways ):
192 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
193
194 def _omit_ways( name, opts, ways ):
195 opts.omit_ways = ways
196
197 # -----
198
199 def only_ways( ways ):
200 return lambda name, opts, w=ways: _only_ways( name, opts, w )
201
202 def _only_ways( name, opts, ways ):
203 opts.only_ways = ways
204
205 # -----
206
207 def extra_ways( ways ):
208 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
209
210 def _extra_ways( name, opts, ways ):
211 opts.extra_ways = ways
212
213 # -----
214
215 def set_stdin( file ):
216 return lambda name, opts, f=file: _set_stdin(name, opts, f);
217
218 def _set_stdin( name, opts, f ):
219 opts.stdin = f
220
221 # -----
222
223 def exit_code( val ):
224 return lambda name, opts, v=val: _exit_code(name, opts, v);
225
226 def _exit_code( name, opts, v ):
227 opts.exit_code = v
228
229 def signal_exit_code( val ):
230 if opsys('solaris2'):
231 return exit_code( val );
232 else:
233 # When application running on Linux receives fatal error
234 # signal, then its exit code is encoded as 128 + signal
235 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
236 # I assume that Mac OS X behaves in the same way at least Mac
237 # OS X builder behavior suggests this.
238 return exit_code( val+128 );
239
240 # -----
241
242 def compile_timeout_multiplier( val ):
243 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
244
245 def _compile_timeout_multiplier( name, opts, v ):
246 opts.compile_timeout_multiplier = v
247
248 def run_timeout_multiplier( val ):
249 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
250
251 def _run_timeout_multiplier( name, opts, v ):
252 opts.run_timeout_multiplier = v
253
254 # -----
255
256 def extra_run_opts( val ):
257 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
258
259 def _extra_run_opts( name, opts, v ):
260 opts.extra_run_opts = v
261
262 # -----
263
264 def extra_hc_opts( val ):
265 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
266
267 def _extra_hc_opts( name, opts, v ):
268 opts.extra_hc_opts = v
269
270 # -----
271
272 def extra_clean( files ):
273 # TODO. Remove all calls to extra_clean.
274 return lambda _name, _opts: None
275
276 def extra_files(files):
277 return lambda name, opts: _extra_files(name, opts, files)
278
279 def _extra_files(name, opts, files):
280 opts.extra_files.extend(files)
281
282 # -----
283
284 def stats_num_field( field, expecteds ):
285 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
286
287 def _stats_num_field( name, opts, field, expecteds ):
288 if field in opts.stats_range_fields:
289 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
290
291 if type(expecteds) is list:
292 for (b, expected, dev) in expecteds:
293 if b:
294 opts.stats_range_fields[field] = (expected, dev)
295 return
296 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
297
298 else:
299 (expected, dev) = expecteds
300 opts.stats_range_fields[field] = (expected, dev)
301
302 def compiler_stats_num_field( field, expecteds ):
303 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
304
305 def _compiler_stats_num_field( name, opts, field, expecteds ):
306 if field in opts.compiler_stats_range_fields:
307 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
308
309 # Compiler performance numbers change when debugging is on, making the results
310 # useless and confusing. Therefore, skip if debugging is on.
311 if compiler_debugged():
312 skip(name, opts)
313
314 for (b, expected, dev) in expecteds:
315 if b:
316 opts.compiler_stats_range_fields[field] = (expected, dev)
317 return
318
319 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
320
321 # -----
322
323 def when(b, f):
324 # When list_brokens is on, we want to see all expect_broken calls,
325 # so we always do f
326 if b or config.list_broken:
327 return f
328 else:
329 return normal
330
331 def unless(b, f):
332 return when(not b, f)
333
334 def doing_ghci():
335 return 'ghci' in config.run_ways
336
337 def ghc_dynamic():
338 return config.ghc_dynamic
339
340 def fast():
341 return config.speed == 2
342
343 def platform( plat ):
344 return config.platform == plat
345
346 def opsys( os ):
347 return config.os == os
348
349 def arch( arch ):
350 return config.arch == arch
351
352 def wordsize( ws ):
353 return config.wordsize == str(ws)
354
355 def msys( ):
356 return config.msys
357
358 def cygwin( ):
359 return config.cygwin
360
361 def have_vanilla( ):
362 return config.have_vanilla
363
364 def have_dynamic( ):
365 return config.have_dynamic
366
367 def have_profiling( ):
368 return config.have_profiling
369
370 def in_tree_compiler( ):
371 return config.in_tree_compiler
372
373 def unregisterised( ):
374 return config.unregisterised
375
376 def compiler_profiled( ):
377 return config.compiler_profiled
378
379 def compiler_debugged( ):
380 return config.compiler_debugged
381
382 # ---
383
384 def high_memory_usage(name, opts):
385 opts.alone = True
386
387 # If a test is for a multi-CPU race, then running the test alone
388 # increases the chance that we'll actually see it.
389 def multi_cpu_race(name, opts):
390 opts.alone = True
391
392 # ---
393 def literate( name, opts ):
394 opts.literate = 1;
395
396 def c_src( name, opts ):
397 opts.c_src = 1;
398
399 def objc_src( name, opts ):
400 opts.objc_src = 1;
401
402 def objcpp_src( name, opts ):
403 opts.objcpp_src = 1;
404
405 def cmm_src( name, opts ):
406 opts.cmm_src = 1;
407
408 def outputdir( odir ):
409 return lambda name, opts, d=odir: _outputdir(name, opts, d)
410
411 def _outputdir( name, opts, odir ):
412 opts.outputdir = odir;
413
414 # ----
415
416 def pre_cmd( cmd ):
417 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
418
419 def _pre_cmd( name, opts, cmd ):
420 opts.pre_cmd = cmd
421
422 # ----
423
424 def clean_cmd( cmd ):
425 # TODO. Remove all calls to clean_cmd.
426 return lambda _name, _opts: None
427
428 # ----
429
430 def cmd_prefix( prefix ):
431 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
432
433 def _cmd_prefix( name, opts, prefix ):
434 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
435
436 # ----
437
438 def cmd_wrapper( fun ):
439 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
440
441 def _cmd_wrapper( name, opts, fun ):
442 opts.cmd_wrapper = fun
443
444 # ----
445
446 def compile_cmd_prefix( prefix ):
447 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
448
449 def _compile_cmd_prefix( name, opts, prefix ):
450 opts.compile_cmd_prefix = prefix
451
452 # ----
453
454 def check_stdout( f ):
455 return lambda name, opts, f=f: _check_stdout(name, opts, f)
456
457 def _check_stdout( name, opts, f ):
458 opts.check_stdout = f
459
460 # ----
461
462 def normalise_slashes( name, opts ):
463 _normalise_fun(name, opts, normalise_slashes_)
464
465 def normalise_exe( name, opts ):
466 _normalise_fun(name, opts, normalise_exe_)
467
468 def normalise_fun( *fs ):
469 return lambda name, opts: _normalise_fun(name, opts, fs)
470
471 def _normalise_fun( name, opts, *fs ):
472 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
473
474 def normalise_errmsg_fun( *fs ):
475 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
476
477 def _normalise_errmsg_fun( name, opts, *fs ):
478 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
479
480 def normalise_version_( *pkgs ):
481 def normalise_version__( str ):
482 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
483 '\\1-<VERSION>', str)
484 return normalise_version__
485
486 def normalise_version( *pkgs ):
487 def normalise_version__( name, opts ):
488 _normalise_fun(name, opts, normalise_version_(*pkgs))
489 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
490 return normalise_version__
491
492 def normalise_drive_letter(name, opts):
493 # Windows only. Change D:\\ to C:\\.
494 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
495
496 def keep_prof_callstacks(name, opts):
497 """Keep profiling callstacks.
498
499 Use together with `only_ways(prof_ways)`.
500 """
501 opts.keep_prof_callstacks = True
502
503 def join_normalisers(*a):
504 """
505 Compose functions, flattening sequences.
506
507 join_normalisers(f1,[f2,f3],f4)
508
509 is the same as
510
511 lambda x: f1(f2(f3(f4(x))))
512 """
513
514 def flatten(l):
515 """
516 Taken from http://stackoverflow.com/a/2158532/946226
517 """
518 for el in l:
519 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
520 for sub in flatten(el):
521 yield sub
522 else:
523 yield el
524
525 a = flatten(a)
526
527 fn = lambda x:x # identity function
528 for f in a:
529 assert callable(f)
530 fn = lambda x,f=f,fn=fn: fn(f(x))
531 return fn
532
533 # ----
534 # Function for composing two opt-fns together
535
536 def executeSetups(fs, name, opts):
537 if type(fs) is list:
538 # If we have a list of setups, then execute each one
539 for f in fs:
540 executeSetups(f, name, opts)
541 else:
542 # fs is a single function, so just apply it
543 fs(name, opts)
544
545 # -----------------------------------------------------------------------------
546 # The current directory of tests
547
548 def newTestDir(tempdir, dir):
549 # opts.testdir should be quoted when used, to make sure the testsuite
550 # keeps working when it contains backward slashes, for example from
551 # using os.path.join. Windows native and mingw* python
552 # (/mingw64/bin/python) set `os.path.sep = '\\'`, while msys2 python
553 # (/bin/python, /usr/bin/python or /usr/local/bin/python) sets
554 # `os.path.sep = '/'`.
555 # To catch usage of unquoted opts.testdir early, insert some spaces into
556 # tempdir.
557 tempdir = os.path.join(tempdir, 'test spaces')
558
559 # Hack. A few tests depend on files in ancestor directories
560 # (e.g. extra_files(['../../../../libraries/base/dist-install/haddock.t']))
561 # Make sure tempdir is sufficiently "deep", such that copying/linking those
562 # files won't cause any problems.
563 #
564 # If you received a framework failure about adding an extra level:
565 # * add one extra '../' to the startswith('../../../../../') in do_test
566 # * add one more number here:
567 tempdir = os.path.join(tempdir, '1', '2', '3')
568
569 global thisdir_settings
570 # reset the options for this test directory
571 def settings(name, opts, tempdir=tempdir, dir=dir):
572 return _newTestDir(name, opts, tempdir, dir)
573 thisdir_settings = settings
574
575
576 def _newTestDir(name, opts, tempdir, dir):
577 opts.srcdir = os.path.join(os.getcwd(), dir)
578 opts.testdir = os.path.join(tempdir, dir, name)
579 opts.compiler_always_flags = config.compiler_always_flags
580
581 # -----------------------------------------------------------------------------
582 # Actually doing tests
583
584 parallelTests = []
585 aloneTests = []
586 allTestNames = set([])
587
588 def runTest (opts, name, func, args):
589 ok = 0
590
591 if config.use_threads:
592 t.thread_pool.acquire()
593 try:
594 while config.threads<(t.running_threads+1):
595 t.thread_pool.wait()
596 t.running_threads = t.running_threads+1
597 ok=1
598 t.thread_pool.release()
599 thread.start_new_thread(test_common_thread, (name, opts, func, args))
600 except:
601 if not ok:
602 t.thread_pool.release()
603 else:
604 test_common_work (name, opts, func, args)
605
606 # name :: String
607 # setup :: TestOpts -> IO ()
608 def test (name, setup, func, args):
609 if config.run_only_some_tests:
610 if name not in config.only:
611 return
612 else:
613 # Note [Mutating config.only]
614 # config.only is initiallly the set of tests requested by
615 # the user (via 'make TEST='). We then remove all tests that
616 # we've already seen (in .T files), so that we can later
617 # report on any tests we couldn't find and error out.
618 config.only.remove(name)
619
620 global aloneTests
621 global parallelTests
622 global allTestNames
623 global thisdir_settings
624 if name in allTestNames:
625 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
626 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
627 framework_fail(name, 'bad_name', 'This test has an invalid name')
628
629 # Make a deep copy of the default_testopts, as we need our own copy
630 # of any dictionaries etc inside it. Otherwise, if one test modifies
631 # them, all tests will see the modified version!
632 myTestOpts = copy.deepcopy(default_testopts)
633
634 executeSetups([thisdir_settings, setup], name, myTestOpts)
635
636 thisTest = lambda : runTest(myTestOpts, name, func, args)
637 if myTestOpts.alone:
638 aloneTests.append(thisTest)
639 else:
640 parallelTests.append(thisTest)
641 allTestNames.add(name)
642
643 if config.use_threads:
644 def test_common_thread(name, opts, func, args):
645 t.lock.acquire()
646 try:
647 test_common_work(name,opts,func,args)
648 finally:
649 t.lock.release()
650 t.thread_pool.acquire()
651 t.running_threads = t.running_threads - 1
652 t.thread_pool.notify()
653 t.thread_pool.release()
654
655 def get_package_cache_timestamp():
656 if config.package_conf_cache_file == '':
657 return 0.0
658 else:
659 try:
660 return os.stat(config.package_conf_cache_file).st_mtime
661 except:
662 return 0.0
663
664
665 def test_common_work (name, opts, func, args):
666 try:
667 t.total_tests = t.total_tests+1
668 setLocalTestOpts(opts)
669
670 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
671
672 # All the ways we might run this test
673 if func == compile or func == multimod_compile:
674 all_ways = config.compile_ways
675 elif func == compile_and_run or func == multimod_compile_and_run:
676 all_ways = config.run_ways
677 elif func == ghci_script:
678 if 'ghci' in config.run_ways:
679 all_ways = ['ghci']
680 else:
681 all_ways = []
682 else:
683 all_ways = ['normal']
684
685 # A test itself can request extra ways by setting opts.extra_ways
686 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
687
688 t.total_test_cases = t.total_test_cases + len(all_ways)
689
690 ok_way = lambda way: \
691 not getTestOpts().skip \
692 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
693 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
694 and (not (config.skip_perf_tests and isStatsTest())) \
695 and way not in getTestOpts().omit_ways
696
697 # Which ways we are asked to skip
698 do_ways = list(filter (ok_way,all_ways))
699
700 # Only run all ways in slow mode.
701 # See Note [validate and testsuite speed] in toplevel Makefile.
702 if config.accept:
703 # Only ever run one way
704 do_ways = do_ways[:1]
705 elif config.speed > 0:
706 # However, if we EXPLICITLY asked for a way (with extra_ways)
707 # please test it!
708 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
709 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
710 do_ways = other_ways[:1] + explicit_ways
711
712 # Find all files in the source directory that this test
713 # depends on. Do this only once for all ways.
714 # Generously add all filenames that start with the name of
715 # the test to this set, as a convenience to test authors.
716 # They will have to use the `extra_files` setup function to
717 # specify all other files that their test depends on (but
718 # this seems to be necessary for only about 10% of all
719 # tests).
720 files = set((f for f in os.listdir(opts.srcdir)
721 if f.startswith(name)))
722 for filename in (opts.extra_files + extra_src_files.get(name, [])):
723 if filename.startswith('../../../../../../'):
724 framework_fail(name, 'whole-test',
725 'add extra level to testlib.py:newTestDir for: ' + filename)
726
727 elif filename.startswith('/'):
728 framework_fail(name, 'whole-test',
729 'no absolute paths in extra_files please: ' + filename)
730
731 elif '*' in filename:
732 # Don't use wildcards in extra_files too much, as
733 # globbing is slow.
734 files.update((os.path.relpath(f, opts.srcdir)
735 for f in glob.iglob(in_srcdir(filename))))
736
737 else:
738 files.add(filename)
739
740 # Run the required tests...
741 for way in do_ways:
742 if stopping():
743 break
744 do_test(name, way, func, args, files)
745
746 for way in all_ways:
747 if way not in do_ways:
748 skiptest (name,way)
749
750 if config.cleanup and do_ways:
751 cleanup()
752
753 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
754
755 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
756 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
757
758 try:
759 for f in files_written[name]:
760 if os.path.exists(f):
761 try:
762 if not f in files_written_not_removed[name]:
763 files_written_not_removed[name].append(f)
764 except:
765 files_written_not_removed[name] = [f]
766 except:
767 pass
768 except Exception as e:
769 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
770
771 def do_test(name, way, func, args, files):
772 opts = getTestOpts()
773
774 full_name = name + '(' + way + ')'
775
776 try:
777 if_verbose(2, "=====> %s %d of %d %s " % \
778 (full_name, t.total_tests, len(allTestNames), \
779 [t.n_unexpected_passes, \
780 t.n_unexpected_failures, \
781 t.n_framework_failures]))
782
783 # Clean up prior to the test, so that we can't spuriously conclude
784 # that it passed on the basis of old run outputs.
785 cleanup()
786
787 # Link all source files for this test into a new directory in
788 # /tmp, and run the test in that directory. This makes it
789 # possible to run tests in parallel, without modification, that
790 # would otherwise (accidentally) write to the same output file.
791 # It also makes it easier to keep the testsuite clean.
792
793 for filename in files:
794 src = in_srcdir(filename)
795 dst = in_testdir(filename)
796
797 if os.path.isfile(src):
798 dirname = os.path.dirname(dst)
799 if dirname:
800 mkdirp(dirname)
801 try:
802 link_or_copy_file(src, dst)
803 except OSError as e:
804 if e.errno == errno.EEXIST and os.path.isfile(dst):
805 # Some tests depend on files from ancestor
806 # directories (e.g. '../shell.hs'). It is
807 # possible such a file was already copied over
808 # for another test, since cleanup() doesn't
809 # delete them.
810 pass
811 else:
812 raise
813 elif os.path.isdir(src):
814 os.makedirs(dst)
815 lndir(src, dst)
816 else:
817 if not config.haddock and os.path.splitext(filename)[1] == '.t':
818 # When using a ghc built without haddock support, .t
819 # files are rightfully missing. Don't
820 # framework_fail. Test will be skipped later.
821 pass
822 else:
823 framework_fail(name, way,
824 'extra_file does not exist: ' + filename)
825
826 if not files:
827 # Always create the testdir, even when no files were copied
828 # (because user forgot to specify extra_files setup function), to
829 # prevent the confusing error: can't cd to <testdir>.
830 os.makedirs(opts.testdir)
831
832 if func.__name__ == 'run_command' or opts.pre_cmd:
833 # When running 'MAKE' make sure 'TOP' still points to the
834 # root of the testsuite.
835 src_makefile = in_srcdir('Makefile')
836 dst_makefile = in_testdir('Makefile')
837 if os.path.exists(src_makefile):
838 with open(src_makefile, 'r') as src:
839 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
840 with open(dst_makefile, 'w') as dst:
841 dst.write(makefile)
842
843 if config.use_threads:
844 t.lock.release()
845
846 try:
847 preCmd = getTestOpts().pre_cmd
848 if preCmd != None:
849 result = runCmdFor(name, 'cd "{opts.testdir}" && {preCmd}'.format(**locals()))
850 if result != 0:
851 framework_fail(name, way, 'pre-command failed: ' + str(result))
852 except:
853 framework_fail(name, way, 'pre-command exception')
854
855 try:
856 result = func(*[name,way] + args)
857 finally:
858 if config.use_threads:
859 t.lock.acquire()
860
861 if getTestOpts().expect != 'pass' and \
862 getTestOpts().expect != 'fail' and \
863 getTestOpts().expect != 'missing-lib':
864 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
865
866 try:
867 passFail = result['passFail']
868 except:
869 passFail = 'No passFail found'
870
871 if passFail == 'pass':
872 if _expect_pass(way):
873 t.n_expected_passes = t.n_expected_passes + 1
874 if name in t.expected_passes:
875 t.expected_passes[name].append(way)
876 else:
877 t.expected_passes[name] = [way]
878 else:
879 if_verbose(1, '*** unexpected pass for %s' % full_name)
880 t.n_unexpected_passes = t.n_unexpected_passes + 1
881 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
882 elif passFail == 'fail':
883 if _expect_pass(way):
884 reason = result['reason']
885 tag = result.get('tag')
886 if tag == 'stat':
887 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
888 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
889 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
890 else:
891 if_verbose(1, '*** unexpected failure for %s' % full_name)
892 t.n_unexpected_failures = t.n_unexpected_failures + 1
893 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
894 else:
895 if getTestOpts().expect == 'missing-lib':
896 t.n_missing_libs = t.n_missing_libs + 1
897 if name in t.missing_libs:
898 t.missing_libs[name].append(way)
899 else:
900 t.missing_libs[name] = [way]
901 else:
902 t.n_expected_failures = t.n_expected_failures + 1
903 if name in t.expected_failures:
904 t.expected_failures[name].append(way)
905 else:
906 t.expected_failures[name] = [way]
907 else:
908 framework_fail(name, way, 'bad result ' + passFail)
909 except KeyboardInterrupt:
910 stopNow()
911 except:
912 framework_fail(name, way, 'do_test exception')
913 traceback.print_exc()
914
915 def addPassingTestInfo (testInfos, directory, name, way):
916 directory = re.sub('^\\.[/\\\\]', '', directory)
917
918 if not directory in testInfos:
919 testInfos[directory] = {}
920
921 if not name in testInfos[directory]:
922 testInfos[directory][name] = []
923
924 testInfos[directory][name].append(way)
925
926 def addFailingTestInfo (testInfos, directory, name, reason, way):
927 directory = re.sub('^\\.[/\\\\]', '', directory)
928
929 if not directory in testInfos:
930 testInfos[directory] = {}
931
932 if not name in testInfos[directory]:
933 testInfos[directory][name] = {}
934
935 if not reason in testInfos[directory][name]:
936 testInfos[directory][name][reason] = []
937
938 testInfos[directory][name][reason].append(way)
939
940 def skiptest (name, way):
941 # print 'Skipping test \"', name, '\"'
942 t.n_tests_skipped = t.n_tests_skipped + 1
943 if name in t.tests_skipped:
944 t.tests_skipped[name].append(way)
945 else:
946 t.tests_skipped[name] = [way]
947
948 def framework_fail( name, way, reason ):
949 full_name = name + '(' + way + ')'
950 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
951 t.n_framework_failures = t.n_framework_failures + 1
952 if name in t.framework_failures:
953 t.framework_failures[name].append(way)
954 else:
955 t.framework_failures[name] = [way]
956
957 def badResult(result):
958 try:
959 if result['passFail'] == 'pass':
960 return False
961 return True
962 except:
963 return True
964
965 def passed():
966 return {'passFail': 'pass'}
967
968 def failBecause(reason, tag=None):
969 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
970
971 # -----------------------------------------------------------------------------
972 # Generic command tests
973
974 # A generic command test is expected to run and exit successfully.
975 #
976 # The expected exit code can be changed via exit_code() as normal, and
977 # the expected stdout/stderr are stored in <testname>.stdout and
978 # <testname>.stderr. The output of the command can be ignored
979 # altogether by using run_command_ignore_output instead of
980 # run_command.
981
982 def run_command( name, way, cmd ):
983 return simple_run( name, '', cmd, '' )
984
985 # -----------------------------------------------------------------------------
986 # GHCi tests
987
988 def ghci_script( name, way, script, override_flags = None ):
989 # filter out -fforce-recomp from compiler_always_flags, because we're
990 # actually testing the recompilation behaviour in the GHCi tests.
991 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
992
993 way_flags = ' '.join(config.way_flags(name)[way])
994
995 # We pass HC and HC_OPTS as environment variables, so that the
996 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
997 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
998 ).format(flags=flags, way_flags=way_flags)
999
1000 getTestOpts().stdin = script
1001 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1002
1003 # -----------------------------------------------------------------------------
1004 # Compile-only tests
1005
1006 def compile_override_default_flags(overrides):
1007 def apply(name, way, extra_opts):
1008 return do_compile(name, way, 0, '', [], extra_opts, overrides)
1009
1010 return apply
1011
1012 def compile_fail_override_default_flags(overrides):
1013 def apply(name, way, extra_opts):
1014 return do_compile(name, way, 1, '', [], extra_opts, overrides)
1015
1016 return apply
1017
1018 def compile_without_flag(flag):
1019 def apply(name, way, extra_opts):
1020 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
1021 return compile_override_default_flags(overrides)(name, way, extra_opts)
1022
1023 return apply
1024
1025 def compile_fail_without_flag(flag):
1026 def apply(name, way, extra_opts):
1027 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
1028 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
1029
1030 return apply
1031
1032 def compile( name, way, extra_hc_opts ):
1033 return do_compile( name, way, 0, '', [], extra_hc_opts )
1034
1035 def compile_fail( name, way, extra_hc_opts ):
1036 return do_compile( name, way, 1, '', [], extra_hc_opts )
1037
1038 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1039 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1040
1041 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1042 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1043
1044 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1045 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1046
1047 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1048 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1049
1050 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1051 # print 'Compile only, extra args = ', extra_hc_opts
1052
1053 result = extras_build( way, extra_mods, extra_hc_opts )
1054 if badResult(result):
1055 return result
1056 extra_hc_opts = result['hc_opts']
1057
1058 force = 0
1059 if extra_mods:
1060 force = 1
1061 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1062
1063 if badResult(result):
1064 return result
1065
1066 # the actual stderr should always match the expected, regardless
1067 # of whether we expected the compilation to fail or not (successful
1068 # compilations may generate warnings).
1069
1070 expected_stderr_file = find_expected_file(name, 'stderr')
1071 actual_stderr_file = add_suffix(name, 'comp.stderr')
1072
1073 if not compare_outputs(way, 'stderr',
1074 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1075 normalise_errmsg),
1076 expected_stderr_file, actual_stderr_file,
1077 whitespace_normaliser=normalise_whitespace):
1078 return failBecause('stderr mismatch')
1079
1080 # no problems found, this test passed
1081 return passed()
1082
1083 def compile_cmp_asm( name, way, extra_hc_opts ):
1084 print('Compile only, extra args = ', extra_hc_opts)
1085 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1086
1087 if badResult(result):
1088 return result
1089
1090 # the actual stderr should always match the expected, regardless
1091 # of whether we expected the compilation to fail or not (successful
1092 # compilations may generate warnings).
1093
1094 expected_asm_file = find_expected_file(name, 'asm')
1095 actual_asm_file = add_suffix(name, 's')
1096
1097 if not compare_outputs(way, 'asm',
1098 join_normalisers(normalise_errmsg, normalise_asm),
1099 expected_asm_file, actual_asm_file):
1100 return failBecause('asm mismatch')
1101
1102 # no problems found, this test passed
1103 return passed()
1104
1105 # -----------------------------------------------------------------------------
1106 # Compile-and-run tests
1107
1108 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1109 # print 'Compile and run, extra args = ', extra_hc_opts
1110
1111 result = extras_build( way, extra_mods, extra_hc_opts )
1112 if badResult(result):
1113 return result
1114 extra_hc_opts = result['hc_opts']
1115
1116 if way.startswith('ghci'): # interpreted...
1117 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1118 else: # compiled...
1119 force = 0
1120 if extra_mods:
1121 force = 1
1122
1123 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1124 if badResult(result):
1125 return result
1126
1127 cmd = './' + name;
1128
1129 # we don't check the compiler's stderr for a compile-and-run test
1130 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1131
1132 def compile_and_run( name, way, extra_hc_opts ):
1133 return compile_and_run__( name, way, '', [], extra_hc_opts)
1134
1135 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1136 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1137
1138 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1139 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1140
1141 def stats( name, way, stats_file ):
1142 opts = getTestOpts()
1143 return checkStats(name, way, stats_file, opts.stats_range_fields)
1144
1145 # -----------------------------------------------------------------------------
1146 # Check -t stats info
1147
1148 def checkStats(name, way, stats_file, range_fields):
1149 full_name = name + '(' + way + ')'
1150
1151 result = passed()
1152 if len(range_fields) > 0:
1153 try:
1154 f = open(in_testdir(stats_file))
1155 except IOError as e:
1156 return failBecause(str(e))
1157 contents = f.read()
1158 f.close()
1159
1160 for (field, (expected, dev)) in range_fields.items():
1161 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1162 if m == None:
1163 print('Failed to find field: ', field)
1164 result = failBecause('no such stats field')
1165 val = int(m.group(1))
1166
1167 lowerBound = trunc( expected * ((100 - float(dev))/100))
1168 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1169
1170 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1171
1172 if val < lowerBound:
1173 print(field, 'value is too low:')
1174 print('(If this is because you have improved GHC, please')
1175 print('update the test so that GHC doesn\'t regress again)')
1176 result = failBecause('stat too good', tag='stat')
1177 if val > upperBound:
1178 print(field, 'value is too high:')
1179 result = failBecause('stat not good enough', tag='stat')
1180
1181 if val < lowerBound or val > upperBound or config.verbose >= 4:
1182 valStr = str(val)
1183 valLen = len(valStr)
1184 expectedStr = str(expected)
1185 expectedLen = len(expectedStr)
1186 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1187
1188 def display(descr, val, extra):
1189 print(descr, str(val).rjust(length), extra)
1190
1191 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1192 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1193 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1194 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1195 if val != expected:
1196 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1197
1198 return result
1199
1200 # -----------------------------------------------------------------------------
1201 # Build a single-module program
1202
1203 def extras_build( way, extra_mods, extra_hc_opts ):
1204 for modopts in extra_mods:
1205 mod, opts = modopts
1206 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1207 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1208 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1209 if badResult(result):
1210 return result
1211
1212 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1213
1214
1215 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1216 opts = getTestOpts()
1217 errname = add_suffix(name, 'comp.stderr')
1218
1219 if top_mod != '':
1220 srcname = top_mod
1221 base, suf = os.path.splitext(top_mod)
1222 elif addsuf:
1223 srcname = add_hs_lhs_suffix(name)
1224 else:
1225 srcname = name
1226
1227 to_do = ''
1228 if top_mod != '':
1229 to_do = '--make '
1230 if link:
1231 to_do = to_do + '-o ' + name
1232 elif link:
1233 to_do = '-o ' + name
1234 elif opts.compile_to_hc:
1235 to_do = '-C'
1236 else:
1237 to_do = '-c' # just compile
1238
1239 stats_file = name + '.comp.stats'
1240 if len(opts.compiler_stats_range_fields) > 0:
1241 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1242
1243 # Required by GHC 7.3+, harmless for earlier versions:
1244 if (getTestOpts().c_src or
1245 getTestOpts().objc_src or
1246 getTestOpts().objcpp_src or
1247 getTestOpts().cmm_src):
1248 extra_hc_opts += ' -no-hs-main '
1249
1250 if getTestOpts().compile_cmd_prefix == '':
1251 cmd_prefix = ''
1252 else:
1253 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1254
1255 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1256 config.way_flags(name)[way])
1257
1258 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1259 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1260 '> {errname} 2>&1'
1261 ).format(**locals())
1262
1263 result = runCmdFor(name, cmd, timeout_multiplier=opts.compile_timeout_multiplier)
1264
1265 if result != 0 and not should_fail:
1266 if config.verbose >= 1 and _expect_pass(way):
1267 print('Compile failed (status ' + repr(result) + ') errors were:')
1268 actual_stderr_path = in_testdir(name, 'comp.stderr')
1269 if_verbose_dump(1, actual_stderr_path)
1270
1271 # ToDo: if the sub-shell was killed by ^C, then exit
1272
1273 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1274
1275 if badResult(statsResult):
1276 return statsResult
1277
1278 if should_fail:
1279 if result == 0:
1280 return failBecause('exit code 0')
1281 else:
1282 if result != 0:
1283 return failBecause('exit code non-0')
1284
1285 return passed()
1286
1287 # -----------------------------------------------------------------------------
1288 # Run a program and check its output
1289 #
1290 # If testname.stdin exists, route input from that, else
1291 # from /dev/null. Route output to testname.run.stdout and
1292 # testname.run.stderr. Returns the exit code of the run.
1293
1294 def simple_run(name, way, prog, extra_run_opts):
1295 opts = getTestOpts()
1296
1297 # figure out what to use for stdin
1298 if opts.stdin != '':
1299 use_stdin = opts.stdin
1300 else:
1301 stdin_file = add_suffix(name, 'stdin')
1302 if os.path.exists(in_srcdir(stdin_file)):
1303 use_stdin = stdin_file
1304 else:
1305 use_stdin = '/dev/null'
1306
1307 run_stdout = add_suffix(name,'run.stdout')
1308 run_stderr = add_suffix(name,'run.stderr')
1309
1310 my_rts_flags = rts_flags(way)
1311
1312 stats_file = name + '.stats'
1313 if len(opts.stats_range_fields) > 0:
1314 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1315 else:
1316 stats_args = ''
1317
1318 if opts.no_stdin:
1319 stdin_comes_from = ''
1320 else:
1321 stdin_comes_from = ' <' + use_stdin
1322
1323 if opts.combined_output:
1324 redirection = ' > {0} 2>&1'.format(run_stdout)
1325 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1326 else:
1327 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1328 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1329
1330 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1331 cmd = prog + stats_args + ' ' \
1332 + my_rts_flags + ' ' \
1333 + extra_run_opts + ' ' \
1334 + stdin_comes_from \
1335 + redirection
1336
1337 if opts.cmd_wrapper != None:
1338 cmd = opts.cmd_wrapper(cmd) + redirection_append
1339
1340 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1341
1342 # run the command
1343 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1344
1345 exit_code = result >> 8
1346 signal = result & 0xff
1347
1348 # check the exit code
1349 if exit_code != opts.exit_code:
1350 if config.verbose >= 1 and _expect_pass(way):
1351 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1352 dump_stdout(name)
1353 dump_stderr(name)
1354 return failBecause('bad exit code')
1355
1356 check_hp = my_rts_flags.find("-h") != -1
1357 check_prof = my_rts_flags.find("-p") != -1
1358
1359 if not opts.ignore_output:
1360 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1361 bad_stdout = not check_stdout_ok(name, way)
1362 if bad_stderr:
1363 return failBecause('bad stderr')
1364 if bad_stdout:
1365 return failBecause('bad stdout')
1366 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1367 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1368 return failBecause('bad heap profile')
1369 if check_prof and not check_prof_ok(name, way):
1370 return failBecause('bad profile')
1371
1372 return checkStats(name, way, stats_file, opts.stats_range_fields)
1373
1374 def rts_flags(way):
1375 if (way == ''):
1376 return ''
1377 else:
1378 args = config.way_rts_flags[way]
1379
1380 if args == []:
1381 return ''
1382 else:
1383 return '+RTS ' + ' '.join(args) + ' -RTS'
1384
1385 # -----------------------------------------------------------------------------
1386 # Run a program in the interpreter and check its output
1387
1388 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1389 opts = getTestOpts()
1390
1391 outname = add_suffix(name, 'interp.stdout')
1392 errname = add_suffix(name, 'interp.stderr')
1393
1394 if (top_mod == ''):
1395 srcname = add_hs_lhs_suffix(name)
1396 else:
1397 srcname = top_mod
1398
1399 scriptname = add_suffix(name, 'genscript')
1400 qscriptname = in_testdir(scriptname)
1401
1402 delimiter = '===== program output begins here\n'
1403
1404 script = open(qscriptname, 'w')
1405 if not compile_only:
1406 # set the prog name and command-line args to match the compiled
1407 # environment.
1408 script.write(':set prog ' + name + '\n')
1409 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1410 # Add marker lines to the stdout and stderr output files, so we
1411 # can separate GHCi's output from the program's.
1412 script.write(':! echo ' + delimiter)
1413 script.write(':! echo 1>&2 ' + delimiter)
1414 # Set stdout to be line-buffered to match the compiled environment.
1415 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1416 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1417 # in the event of an exception as for the compiled program.
1418 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1419 script.close()
1420
1421 # figure out what to use for stdin
1422 if getTestOpts().stdin != '':
1423 stdin_file = in_srcdir(opts.stdin)
1424 else:
1425 stdin_file = in_srcdir(name, 'stdin')
1426
1427 if os.path.exists(stdin_file):
1428 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1429
1430 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1431 config.way_flags(name)[way])
1432
1433 if getTestOpts().combined_output:
1434 redirection = ' > {0} 2>&1'.format(outname)
1435 redirection_append = ' >> {0} 2>&1'.format(outname)
1436 else:
1437 redirection = ' > {0} 2> {1}'.format(outname, errname)
1438 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1439
1440 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1441 '< {scriptname} {redirection}'
1442 ).format(**locals())
1443
1444 if getTestOpts().cmd_wrapper != None:
1445 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1446
1447 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1448
1449 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1450
1451 exit_code = result >> 8
1452 signal = result & 0xff
1453
1454 # split the stdout into compilation/program output
1455 split_file(in_testdir(outname), delimiter,
1456 in_testdir(name, 'comp.stdout'),
1457 in_testdir(name, 'run.stdout'))
1458 split_file(in_testdir(errname), delimiter,
1459 in_testdir(name, 'comp.stderr'),
1460 in_testdir(name, 'run.stderr'))
1461
1462 # check the exit code
1463 if exit_code != getTestOpts().exit_code:
1464 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1465 dump_stdout(name)
1466 dump_stderr(name)
1467 return failBecause('bad exit code')
1468
1469 # ToDo: if the sub-shell was killed by ^C, then exit
1470
1471 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1472 check_stdout_ok(name, way)):
1473 return passed()
1474 else:
1475 return failBecause('bad stdout or stderr')
1476
1477
1478 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1479 infile = open(in_fn)
1480 out1 = open(out1_fn, 'w')
1481 out2 = open(out2_fn, 'w')
1482
1483 line = infile.readline()
1484 line = re.sub('\r', '', line) # ignore Windows EOL
1485 while (re.sub('^\s*','',line) != delimiter and line != ''):
1486 out1.write(line)
1487 line = infile.readline()
1488 line = re.sub('\r', '', line)
1489 out1.close()
1490
1491 line = infile.readline()
1492 while (line != ''):
1493 out2.write(line)
1494 line = infile.readline()
1495 out2.close()
1496
1497 # -----------------------------------------------------------------------------
1498 # Utils
1499 def get_compiler_flags(override_flags, noforce):
1500 opts = getTestOpts()
1501
1502 if override_flags is not None:
1503 flags = copy.copy(override_flags)
1504 else:
1505 flags = copy.copy(opts.compiler_always_flags)
1506
1507 if noforce:
1508 flags = [f for f in flags if f != '-fforce-recomp']
1509
1510 flags.append(opts.extra_hc_opts)
1511
1512 if opts.outputdir != None:
1513 flags.extend(["-outputdir", opts.outputdir])
1514
1515 return flags
1516
1517 def check_stdout_ok(name, way):
1518 actual_stdout_file = add_suffix(name, 'run.stdout')
1519 expected_stdout_file = find_expected_file(name, 'stdout')
1520
1521 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1522
1523 check_stdout = getTestOpts().check_stdout
1524 if check_stdout:
1525 actual_stdout_path = in_testdir(actual_stdout_file)
1526 return check_stdout(actual_stdout_path, extra_norm)
1527
1528 return compare_outputs(way, 'stdout', extra_norm,
1529 expected_stdout_file, actual_stdout_file)
1530
1531 def dump_stdout( name ):
1532 print('Stdout:')
1533 print(read_no_crs(in_testdir(name, 'run.stdout')))
1534
1535 def check_stderr_ok(name, way):
1536 actual_stderr_file = add_suffix(name, 'run.stderr')
1537 expected_stderr_file = find_expected_file(name, 'stderr')
1538
1539 return compare_outputs(way, 'stderr',
1540 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1541 expected_stderr_file, actual_stderr_file,
1542 whitespace_normaliser=normalise_whitespace)
1543
1544 def dump_stderr( name ):
1545 print("Stderr:")
1546 print(read_no_crs(in_testdir(name, 'run.stderr')))
1547
1548 def read_no_crs(file):
1549 str = ''
1550 try:
1551 h = open(file)
1552 str = h.read()
1553 h.close
1554 except:
1555 # On Windows, if the program fails very early, it seems the
1556 # files stdout/stderr are redirected to may not get created
1557 pass
1558 return re.sub('\r', '', str)
1559
1560 def write_file(file, str):
1561 h = open(file, 'w')
1562 h.write(str)
1563 h.close
1564
1565 def check_hp_ok(name):
1566 opts = getTestOpts()
1567
1568 # do not qualify for hp2ps because we should be in the right directory
1569 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1570
1571 hp2psResult = runCmdExitCode(hp2psCmd)
1572
1573 actual_ps_path = in_testdir(name, 'ps')
1574
1575 if(hp2psResult == 0):
1576 if (os.path.exists(actual_ps_path)):
1577 if gs_working:
1578 gsResult = runCmdExitCode(genGSCmd(actual_ps_path))
1579 if (gsResult == 0):
1580 return (True)
1581 else:
1582 print("hp2ps output for " + name + "is not valid PostScript")
1583 else: return (True) # assume postscript is valid without ghostscript
1584 else:
1585 print("hp2ps did not generate PostScript for " + name)
1586 return (False)
1587 else:
1588 print("hp2ps error when processing heap profile for " + name)
1589 return(False)
1590
1591 def check_prof_ok(name, way):
1592 expected_prof_file = find_expected_file(name, 'prof.sample')
1593 expected_prof_path = in_testdir(expected_prof_file)
1594
1595 # Check actual prof file only if we have an expected prof file to
1596 # compare it with.
1597 if not os.path.exists(expected_prof_path):
1598 return True
1599
1600 actual_prof_file = add_suffix(name, 'prof')
1601 actual_prof_path = in_testdir(actual_prof_file)
1602
1603 if not os.path.exists(actual_prof_path):
1604 print(actual_prof_path + " does not exist")
1605 return(False)
1606
1607 if os.path.getsize(actual_prof_path) == 0:
1608 print(actual_prof_path + " is empty")
1609 return(False)
1610
1611 return compare_outputs(way, 'prof', normalise_prof,
1612 expected_prof_file, actual_prof_file,
1613 whitespace_normaliser=normalise_whitespace)
1614
1615 # Compare expected output to actual output, and optionally accept the
1616 # new output. Returns true if output matched or was accepted, false
1617 # otherwise. See Note [Output comparison] for the meaning of the
1618 # normaliser and whitespace_normaliser parameters.
1619 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1620 whitespace_normaliser=lambda x:x):
1621
1622 expected_path = in_srcdir(expected_file)
1623 actual_path = in_testdir(actual_file)
1624
1625 if os.path.exists(expected_path):
1626 expected_str = normaliser(read_no_crs(expected_path))
1627 # Create the .normalised file in the testdir, not in the srcdir.
1628 expected_normalised_file = add_suffix(expected_file, 'normalised')
1629 expected_normalised_path = in_testdir(expected_normalised_file)
1630 else:
1631 expected_str = ''
1632 expected_normalised_path = '/dev/null'
1633
1634 actual_raw = read_no_crs(actual_path)
1635 actual_str = normaliser(actual_raw)
1636
1637 # See Note [Output comparison].
1638 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1639 return 1
1640 else:
1641 if config.verbose >= 1 and _expect_pass(way):
1642 print('Actual ' + kind + ' output differs from expected:')
1643
1644 if expected_normalised_path != '/dev/null':
1645 write_file(expected_normalised_path, expected_str)
1646
1647 actual_normalised_path = add_suffix(actual_path, 'normalised')
1648 write_file(actual_normalised_path, actual_str)
1649
1650 if config.verbose >= 1 and _expect_pass(way):
1651 # See Note [Output comparison].
1652 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1653 actual_normalised_path))
1654
1655 # If for some reason there were no non-whitespace differences,
1656 # then do a full diff
1657 if r == 0:
1658 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1659 actual_normalised_path))
1660
1661 if config.accept and (getTestOpts().expect == 'fail' or
1662 way in getTestOpts().expect_fail_for):
1663 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1664 return 0
1665 elif config.accept and actual_raw:
1666 if_verbose(1, 'Accepting new output.')
1667 write_file(expected_path, actual_raw)
1668 return 1
1669 elif config.accept:
1670 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1671 os.remove(expected_path)
1672 return 1
1673 else:
1674 return 0
1675
1676 # Note [Output comparison]
1677 #
1678 # We do two types of output comparison:
1679 #
1680 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1681 # optional `whitespace_normaliser` to the expected and the actual
1682 # output, before comparing the two.
1683 #
1684 # 2. To show as a diff to the user when the test indeed failed. We apply
1685 # the same `normaliser` function to the outputs, to make the diff as
1686 # small as possible (only showing the actual problem). But we don't
1687 # apply the `whitespace_normaliser` here, because it might completely
1688 # squash all whitespace, making the diff unreadable. Instead we rely
1689 # on the `diff` program to ignore whitespace changes as much as
1690 # possible (#10152).
1691
1692 def normalise_whitespace( str ):
1693 # Merge contiguous whitespace characters into a single space.
1694 return ' '.join(w for w in str.split())
1695
1696 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1697
1698 def normalise_callstacks(s):
1699 opts = getTestOpts()
1700 def repl(matches):
1701 location = matches.group(1)
1702 location = normalise_slashes_(location)
1703 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1704 # Ignore line number differences in call stacks (#10834).
1705 s = re.sub(callSite_re, repl, s)
1706 # Ignore the change in how we identify implicit call-stacks
1707 s = s.replace('from ImplicitParams', 'from HasCallStack')
1708 if not opts.keep_prof_callstacks:
1709 # Don't output prof callstacks. Test output should be
1710 # independent from the WAY we run the test.
1711 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1712 return s
1713
1714 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1715
1716 def normalise_type_reps(str):
1717 """ Normalise out fingerprints from Typeable TyCon representations """
1718 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1719
1720 def normalise_errmsg( str ):
1721 """Normalise error-messages emitted via stderr"""
1722 # IBM AIX's `ld` is a bit chatty
1723 if opsys('aix'):
1724 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1725 # remove " error:" and lower-case " Warning:" to make patch for
1726 # trac issue #10021 smaller
1727 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1728 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1729 str = normalise_callstacks(str)
1730 str = normalise_type_reps(str)
1731
1732 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1733 # the colon is there because it appears in error messages; this
1734 # hacky solution is used in place of more sophisticated filename
1735 # mangling
1736 str = re.sub('([^\\s])\\.exe', '\\1', str)
1737 # normalise slashes, minimise Windows/Unix filename differences
1738 str = re.sub('\\\\', '/', str)
1739 # The inplace ghc's are called ghc-stage[123] to avoid filename
1740 # collisions, so we need to normalise that to just "ghc"
1741 str = re.sub('ghc-stage[123]', 'ghc', str)
1742 # Error messages simetimes contain integer implementation package
1743 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1744 # Also filter out bullet characters. This is because bullets are used to
1745 # separate error sections, and tests shouldn't be sensitive to how the
1746 # the division happens.
1747 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1748 str = str.replace(bullet, '')
1749 return str
1750
1751 # normalise a .prof file, so that we can reasonably compare it against
1752 # a sample. This doesn't compare any of the actual profiling data,
1753 # only the shape of the profile and the number of entries.
1754 def normalise_prof (str):
1755 # strip everything up to the line beginning "COST CENTRE"
1756 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1757
1758 # strip results for CAFs, these tend to change unpredictably
1759 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1760
1761 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1762 # sometimes under MAIN.
1763 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1764
1765 # We have somthing like this:
1766 #
1767 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1768 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1769 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1770 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1771 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1772 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1773 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1774 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1775 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1776 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1777 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1778 #
1779 # then we remove all the specific profiling data, leaving only the cost
1780 # centre name, module, src, and entries, to end up with this: (modulo
1781 # whitespace between columns)
1782 #
1783 # MAIN MAIN <built-in> 0
1784 # readPrec Main Main_1.hs:7:13-16 1
1785 # readPrec Main Main_1.hs:4:13-16 1
1786 # == Main Main_1.hs:7:25-26 1
1787 # == Main Main_1.hs:4:25-26 1
1788 # showsPrec Main Main_1.hs:7:19-22 2
1789 # showsPrec Main Main_1.hs:4:19-22 2
1790 # readPrec Main Main_1.hs:7:13-16 0
1791 # readPrec Main Main_1.hs:4:13-16 0
1792
1793 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1794 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1795 # this works fine.
1796 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1797 '\\1 \\2 \\3 \\5\n', str)
1798 return str
1799
1800 def normalise_slashes_( str ):
1801 str = re.sub('\\\\', '/', str)
1802 return str
1803
1804 def normalise_exe_( str ):
1805 str = re.sub('\.exe', '', str)
1806 return str
1807
1808 def normalise_output( str ):
1809 # remove " error:" and lower-case " Warning:" to make patch for
1810 # trac issue #10021 smaller
1811 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1812 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1813 # Remove a .exe extension (for Windows)
1814 # This can occur in error messages generated by the program.
1815 str = re.sub('([^\\s])\\.exe', '\\1', str)
1816 str = normalise_callstacks(str)
1817 str = normalise_type_reps(str)
1818 return str
1819
1820 def normalise_asm( str ):
1821 lines = str.split('\n')
1822 # Only keep instructions and labels not starting with a dot.
1823 metadata = re.compile('^[ \t]*\\..*$')
1824 out = []
1825 for line in lines:
1826 # Drop metadata directives (e.g. ".type")
1827 if not metadata.match(line):
1828 line = re.sub('@plt', '', line)
1829 instr = line.lstrip().split()
1830 # Drop empty lines.
1831 if not instr:
1832 continue
1833 # Drop operands, except for call instructions.
1834 elif instr[0] == 'call':
1835 out.append(instr[0] + ' ' + instr[1])
1836 else:
1837 out.append(instr[0])
1838 out = '\n'.join(out)
1839 return out
1840
1841 def if_verbose( n, s ):
1842 if config.verbose >= n:
1843 print(s)
1844
1845 def if_verbose_dump( n, f ):
1846 if config.verbose >= n:
1847 try:
1848 print(open(f).read())
1849 except:
1850 print('')
1851
1852 def rawSystem(cmd_and_args):
1853 # We prefer subprocess.call to os.spawnv as the latter
1854 # seems to send its arguments through a shell or something
1855 # with the Windows (non-cygwin) python. An argument "a b c"
1856 # turns into three arguments ["a", "b", "c"].
1857
1858 cmd = cmd_and_args[0]
1859 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1860
1861 # Note that this doesn't handle the timeout itself; it is just used for
1862 # commands that have timeout handling built-in.
1863 def rawSystemWithTimeout(cmd_and_args):
1864 r = rawSystem(cmd_and_args)
1865 if r == 98:
1866 # The python timeout program uses 98 to signal that ^C was pressed
1867 stopNow()
1868 if r == 99 and getTestOpts().exit_code != 99:
1869 # Only print a message when timeout killed the process unexpectedly.
1870 cmd = cmd_and_args[-1]
1871 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1872 return r
1873
1874 # cmd is a complex command in Bourne-shell syntax
1875 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1876 # Hence it must ultimately be run by a Bourne shell
1877 #
1878 # Mostly it invokes the command wrapped in 'timeout' thus
1879 # timeout 300 'cd . && ...blah blah'
1880 # so it's timeout's job to invoke the Bourne shell
1881 #
1882 # But watch out for the case when there is no timeout program!
1883 # Then, when using the native Python, os.system will invoke the cmd shell
1884
1885 def runCmd( cmd ):
1886 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1887 cmd = cmd.format(**config.__dict__)
1888
1889 if_verbose( 3, cmd )
1890 r = 0
1891 if config.os == 'mingw32':
1892 # On MinGW, we will always have timeout
1893 assert config.timeout_prog!=''
1894
1895 if config.timeout_prog != '':
1896 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1897 else:
1898 r = os.system(cmd)
1899 return r << 8
1900
1901 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1902 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1903 cmd = cmd.format(**config.__dict__)
1904
1905 if_verbose( 3, cmd )
1906 r = 0
1907 if config.os == 'mingw32':
1908 # On MinGW, we will always have timeout
1909 assert config.timeout_prog!=''
1910 timeout = int(ceil(config.timeout * timeout_multiplier))
1911
1912 if config.timeout_prog != '':
1913 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1914 else:
1915 r = os.system(cmd)
1916 return r << 8
1917
1918 def runCmdExitCode( cmd ):
1919 return (runCmd(cmd) >> 8);
1920
1921 # -----------------------------------------------------------------------------
1922 # checking if ghostscript is available for checking the output of hp2ps
1923
1924 def genGSCmd(psfile):
1925 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1926
1927 def gsNotWorking():
1928 global gs_working
1929 print("GhostScript not available for hp2ps tests")
1930
1931 global gs_working
1932 gs_working = 0
1933 if config.have_profiling:
1934 if config.gs != '':
1935 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
1936 if resultGood == 0:
1937 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
1938 ' >/dev/null 2>&1')
1939 if resultBad != 0:
1940 print("GhostScript available for hp2ps tests")
1941 gs_working = 1;
1942 else:
1943 gsNotWorking();
1944 else:
1945 gsNotWorking();
1946 else:
1947 gsNotWorking();
1948
1949 def add_suffix( name, suffix ):
1950 if suffix == '':
1951 return name
1952 else:
1953 return name + '.' + suffix
1954
1955 def add_hs_lhs_suffix(name):
1956 if getTestOpts().c_src:
1957 return add_suffix(name, 'c')
1958 elif getTestOpts().cmm_src:
1959 return add_suffix(name, 'cmm')
1960 elif getTestOpts().objc_src:
1961 return add_suffix(name, 'm')
1962 elif getTestOpts().objcpp_src:
1963 return add_suffix(name, 'mm')
1964 elif getTestOpts().literate:
1965 return add_suffix(name, 'lhs')
1966 else:
1967 return add_suffix(name, 'hs')
1968
1969 def replace_suffix( name, suffix ):
1970 base, suf = os.path.splitext(name)
1971 return base + '.' + suffix
1972
1973 def in_testdir(name, suffix=''):
1974 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1975
1976 def in_srcdir(name, suffix=''):
1977 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1978
1979 # Finding the sample output. The filename is of the form
1980 #
1981 # <test>.stdout[-ws-<wordsize>][-<platform>]
1982 #
1983 def find_expected_file(name, suff):
1984 basename = add_suffix(name, suff)
1985
1986 files = [basename + ws + plat
1987 for plat in ['-' + config.platform, '-' + config.os, '']
1988 for ws in ['-ws-' + config.wordsize, '']]
1989
1990 for f in files:
1991 if os.path.exists(in_srcdir(f)):
1992 return f
1993
1994 return basename
1995
1996 def cleanup():
1997 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
1998
1999
2000 # -----------------------------------------------------------------------------
2001 # Return a list of all the files ending in '.T' below directories roots.
2002
2003 def findTFiles(roots):
2004 # It would be better to use os.walk, but that
2005 # gives backslashes on Windows, which trip the
2006 # testsuite later :-(
2007 return [filename for root in roots for filename in findTFiles_(root)]
2008
2009 def findTFiles_(path):
2010 if os.path.isdir(path):
2011 paths = [os.path.join(path, x) for x in os.listdir(path)]
2012 return findTFiles(paths)
2013 elif path[-2:] == '.T':
2014 return [path]
2015 else:
2016 return []
2017
2018 # -----------------------------------------------------------------------------
2019 # Output a test summary to the specified file object
2020
2021 def summary(t, file, short=False):
2022
2023 file.write('\n')
2024 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2025
2026 if short:
2027 # Only print the list of unexpected tests above.
2028 return
2029
2030 file.write('OVERALL SUMMARY for test run started at '
2031 + time.strftime("%c %Z", t.start_time) + '\n'
2032 + str(datetime.timedelta(seconds=
2033 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2034 + ' spent to go through\n'
2035 + repr(t.total_tests).rjust(8)
2036 + ' total tests, which gave rise to\n'
2037 + repr(t.total_test_cases).rjust(8)
2038 + ' test cases, of which\n'
2039 + repr(t.n_tests_skipped).rjust(8)
2040 + ' were skipped\n'
2041 + '\n'
2042 + repr(t.n_missing_libs).rjust(8)
2043 + ' had missing libraries\n'
2044 + repr(t.n_expected_passes).rjust(8)
2045 + ' expected passes\n'
2046 + repr(t.n_expected_failures).rjust(8)
2047 + ' expected failures\n'
2048 + '\n'
2049 + repr(t.n_framework_failures).rjust(8)
2050 + ' caused framework failures\n'
2051 + repr(t.n_unexpected_passes).rjust(8)
2052 + ' unexpected passes\n'
2053 + repr(t.n_unexpected_failures).rjust(8)
2054 + ' unexpected failures\n'
2055 + repr(t.n_unexpected_stat_failures).rjust(8)
2056 + ' unexpected stat failures\n'
2057 + '\n')
2058
2059 if t.n_unexpected_passes > 0:
2060 file.write('Unexpected passes:\n')
2061 printPassingTestInfosSummary(file, t.unexpected_passes)
2062
2063 if t.n_unexpected_failures > 0:
2064 file.write('Unexpected failures:\n')
2065 printFailingTestInfosSummary(file, t.unexpected_failures)
2066
2067 if t.n_unexpected_stat_failures > 0:
2068 file.write('Unexpected stat failures:\n')
2069 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2070
2071 if t.n_framework_failures > 0:
2072 file.write('Test framework failures:\n')
2073 printFrameworkFailureSummary(file, t.framework_failures)
2074
2075 if stopping():
2076 file.write('WARNING: Testsuite run was terminated early\n')
2077
2078 def printUnexpectedTests(file, testInfoss):
2079 unexpected = []
2080 for testInfos in testInfoss:
2081 directories = testInfos.keys()
2082 for directory in directories:
2083 tests = list(testInfos[directory].keys())
2084 unexpected += tests
2085 if unexpected != []:
2086 file.write('Unexpected results from:\n')
2087 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2088 file.write('\n')
2089
2090 def printPassingTestInfosSummary(file, testInfos):
2091 directories = list(testInfos.keys())
2092 directories.sort()
2093 maxDirLen = max(len(x) for x in directories)
2094 for directory in directories:
2095 tests = list(testInfos[directory].keys())
2096 tests.sort()
2097 for test in tests:
2098 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2099 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2100 file.write('\n')
2101
2102 def printFailingTestInfosSummary(file, testInfos):
2103 directories = list(testInfos.keys())
2104 directories.sort()
2105 maxDirLen = max(len(d) for d in directories)
2106 for directory in directories:
2107 tests = list(testInfos[directory].keys())
2108 tests.sort()
2109 for test in tests:
2110 reasons = testInfos[directory][test].keys()
2111 for reason in reasons:
2112 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2113 ' [' + reason + ']' + \
2114 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2115 file.write('\n')
2116
2117 def printFrameworkFailureSummary(file, testInfos):
2118 names = list(testInfos.keys())
2119 names.sort()
2120 maxNameLen = max(len(n) for n in names)
2121 for name in names:
2122 ways = testInfos[name]
2123 file.write(' ' + name.ljust(maxNameLen + 2) + \
2124 ' (' + ','.join(ways) + ')\n')
2125 file.write('\n')
2126
2127 def modify_lines(s, f):
2128 s = '\n'.join([f(l) for l in s.splitlines()])
2129 if s and s[-1] != '\n':
2130 # Prevent '\ No newline at end of file' warnings when diffing.
2131 s += '\n'
2132 return s