tests: remove extra_files.py (#12223)
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import io
9 import shutil
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 import sys
20 from math import ceil, trunc
21 import collections
22 import subprocess
23
24 from testglobals import *
25 from testutil import *
26 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
27
28 if config.use_threads:
29 import threading
30 try:
31 import thread
32 except ImportError: # Python 3
33 import _thread as thread
34
35 global wantToStop
36 wantToStop = False
37
38 global pool_sema
39 if config.use_threads:
40 pool_sema = threading.BoundedSemaphore(value=config.threads)
41
42 def stopNow():
43 global wantToStop
44 wantToStop = True
45 def stopping():
46 return wantToStop
47
48 # Options valid for the current test only (these get reset to
49 # testdir_testopts after each test).
50
51 global testopts_local
52 if config.use_threads:
53 testopts_local = threading.local()
54 else:
55 class TestOpts_Local:
56 pass
57 testopts_local = TestOpts_Local()
58
59 def getTestOpts():
60 return testopts_local.x
61
62 def setLocalTestOpts(opts):
63 global testopts_local
64 testopts_local.x=opts
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return bool(opts.compiler_stats_range_fields or opts.stats_range_fields)
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87
88 def normal( name, opts ):
89 return;
90
91 def skip( name, opts ):
92 opts.skip = 1
93
94 def expect_fail( name, opts ):
95 # The compiler, testdriver, OS or platform is missing a certain
96 # feature, and we don't plan to or can't fix it now or in the
97 # future.
98 opts.expect = 'fail';
99
100 def reqlib( lib ):
101 return lambda name, opts, l=lib: _reqlib (name, opts, l )
102
103 def stage1(name, opts):
104 # See Note [Why is there no stage1 setup function?]
105 framework_fail(name, 'stage1 setup function does not exist',
106 'add your test to testsuite/tests/stage1 instead')
107
108 # Note [Why is there no stage1 setup function?]
109 #
110 # Presumably a stage1 setup function would signal that the stage1
111 # compiler should be used to compile a test.
112 #
113 # Trouble is, the path to the compiler + the `ghc --info` settings for
114 # that compiler are currently passed in from the `make` part of the
115 # testsuite driver.
116 #
117 # Switching compilers in the Python part would be entirely too late, as
118 # all ghc_with_* settings would be wrong. See config/ghc for possible
119 # consequences (for example, config.run_ways would still be
120 # based on the default compiler, quite likely causing ./validate --slow
121 # to fail).
122 #
123 # It would be possible to let the Python part of the testsuite driver
124 # make the call to `ghc --info`, but doing so would require quite some
125 # work. Care has to be taken to not affect the run_command tests for
126 # example, as they also use the `ghc --info` settings:
127 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
128 #
129 # If you want a test to run using the stage1 compiler, add it to the
130 # testsuite/tests/stage1 directory. Validate runs the tests in that
131 # directory with `make stage=1`.
132
133 # Cache the results of looking to see if we have a library or not.
134 # This makes quite a difference, especially on Windows.
135 have_lib = {}
136
137 def _reqlib( name, opts, lib ):
138 if lib in have_lib:
139 got_it = have_lib[lib]
140 else:
141 cmd = strip_quotes(config.ghc_pkg)
142 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
143 stdout=subprocess.PIPE,
144 stderr=subprocess.PIPE)
145 # read from stdout and stderr to avoid blocking due to
146 # buffers filling
147 p.communicate()
148 r = p.wait()
149 got_it = r == 0
150 have_lib[lib] = got_it
151
152 if not got_it:
153 opts.expect = 'missing-lib'
154
155 def req_haddock( name, opts ):
156 if not config.haddock:
157 opts.expect = 'missing-lib'
158
159 def req_profiling( name, opts ):
160 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
161 if not config.have_profiling:
162 opts.expect = 'fail'
163
164 def req_shared_libs( name, opts ):
165 if not config.have_shared_libs:
166 opts.expect = 'fail'
167
168 def req_interp( name, opts ):
169 if not config.have_interp:
170 opts.expect = 'fail'
171
172 def req_smp( name, opts ):
173 if not config.have_smp:
174 opts.expect = 'fail'
175
176 def ignore_stdout(name, opts):
177 opts.ignore_stdout = True
178
179 def ignore_stderr(name, opts):
180 opts.ignore_stderr = True
181
182 def combined_output( name, opts ):
183 opts.combined_output = True
184
185 # -----
186
187 def expect_fail_for( ways ):
188 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
189
190 def _expect_fail_for( name, opts, ways ):
191 opts.expect_fail_for = ways
192
193 def expect_broken( bug ):
194 # This test is a expected not to work due to the indicated trac bug
195 # number.
196 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
197
198 def _expect_broken( name, opts, bug ):
199 record_broken(name, opts, bug)
200 opts.expect = 'fail';
201
202 def expect_broken_for( bug, ways ):
203 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
204
205 def _expect_broken_for( name, opts, bug, ways ):
206 record_broken(name, opts, bug)
207 opts.expect_fail_for = ways
208
209 def record_broken(name, opts, bug):
210 global brokens
211 me = (bug, opts.testdir, name)
212 if not me in brokens:
213 brokens.append(me)
214
215 def _expect_pass(way):
216 # Helper function. Not intended for use in .T files.
217 opts = getTestOpts()
218 return opts.expect == 'pass' and way not in opts.expect_fail_for
219
220 # -----
221
222 def omit_ways( ways ):
223 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
224
225 def _omit_ways( name, opts, ways ):
226 opts.omit_ways = ways
227
228 # -----
229
230 def only_ways( ways ):
231 return lambda name, opts, w=ways: _only_ways( name, opts, w )
232
233 def _only_ways( name, opts, ways ):
234 opts.only_ways = ways
235
236 # -----
237
238 def extra_ways( ways ):
239 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
240
241 def _extra_ways( name, opts, ways ):
242 opts.extra_ways = ways
243
244 # -----
245
246 def set_stdin( file ):
247 return lambda name, opts, f=file: _set_stdin(name, opts, f);
248
249 def _set_stdin( name, opts, f ):
250 opts.stdin = f
251
252 # -----
253
254 def exit_code( val ):
255 return lambda name, opts, v=val: _exit_code(name, opts, v);
256
257 def _exit_code( name, opts, v ):
258 opts.exit_code = v
259
260 def signal_exit_code( val ):
261 if opsys('solaris2'):
262 return exit_code( val );
263 else:
264 # When application running on Linux receives fatal error
265 # signal, then its exit code is encoded as 128 + signal
266 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
267 # I assume that Mac OS X behaves in the same way at least Mac
268 # OS X builder behavior suggests this.
269 return exit_code( val+128 );
270
271 # -----
272
273 def compile_timeout_multiplier( val ):
274 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
275
276 def _compile_timeout_multiplier( name, opts, v ):
277 opts.compile_timeout_multiplier = v
278
279 def run_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
281
282 def _run_timeout_multiplier( name, opts, v ):
283 opts.run_timeout_multiplier = v
284
285 # -----
286
287 def extra_run_opts( val ):
288 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
289
290 def _extra_run_opts( name, opts, v ):
291 opts.extra_run_opts = v
292
293 # -----
294
295 def extra_hc_opts( val ):
296 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
297
298 def _extra_hc_opts( name, opts, v ):
299 opts.extra_hc_opts = v
300
301 # -----
302
303 def extra_clean( files ):
304 # TODO. Remove all calls to extra_clean.
305 return lambda _name, _opts: None
306
307 def extra_files(files):
308 return lambda name, opts: _extra_files(name, opts, files)
309
310 def _extra_files(name, opts, files):
311 opts.extra_files.extend(files)
312
313 # -----
314
315 def stats_num_field( field, expecteds ):
316 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
317
318 def _stats_num_field( name, opts, field, expecteds ):
319 if field in opts.stats_range_fields:
320 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
321
322 if type(expecteds) is list:
323 for (b, expected, dev) in expecteds:
324 if b:
325 opts.stats_range_fields[field] = (expected, dev)
326 return
327 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
328
329 else:
330 (expected, dev) = expecteds
331 opts.stats_range_fields[field] = (expected, dev)
332
333 def compiler_stats_num_field( field, expecteds ):
334 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
335
336 def _compiler_stats_num_field( name, opts, field, expecteds ):
337 if field in opts.compiler_stats_range_fields:
338 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
339
340 # Compiler performance numbers change when debugging is on, making the results
341 # useless and confusing. Therefore, skip if debugging is on.
342 if compiler_debugged():
343 skip(name, opts)
344
345 for (b, expected, dev) in expecteds:
346 if b:
347 opts.compiler_stats_range_fields[field] = (expected, dev)
348 return
349
350 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
351
352 # -----
353
354 def when(b, f):
355 # When list_brokens is on, we want to see all expect_broken calls,
356 # so we always do f
357 if b or config.list_broken:
358 return f
359 else:
360 return normal
361
362 def unless(b, f):
363 return when(not b, f)
364
365 def doing_ghci():
366 return 'ghci' in config.run_ways
367
368 def ghc_dynamic():
369 return config.ghc_dynamic
370
371 def fast():
372 return config.speed == 2
373
374 def platform( plat ):
375 return config.platform == plat
376
377 def opsys( os ):
378 return config.os == os
379
380 def arch( arch ):
381 return config.arch == arch
382
383 def wordsize( ws ):
384 return config.wordsize == str(ws)
385
386 def msys( ):
387 return config.msys
388
389 def cygwin( ):
390 return config.cygwin
391
392 def have_vanilla( ):
393 return config.have_vanilla
394
395 def have_dynamic( ):
396 return config.have_dynamic
397
398 def have_profiling( ):
399 return config.have_profiling
400
401 def in_tree_compiler( ):
402 return config.in_tree_compiler
403
404 def unregisterised( ):
405 return config.unregisterised
406
407 def compiler_profiled( ):
408 return config.compiler_profiled
409
410 def compiler_debugged( ):
411 return config.compiler_debugged
412
413 # ---
414
415 def high_memory_usage(name, opts):
416 opts.alone = True
417
418 # If a test is for a multi-CPU race, then running the test alone
419 # increases the chance that we'll actually see it.
420 def multi_cpu_race(name, opts):
421 opts.alone = True
422
423 # ---
424 def literate( name, opts ):
425 opts.literate = 1;
426
427 def c_src( name, opts ):
428 opts.c_src = 1;
429
430 def objc_src( name, opts ):
431 opts.objc_src = 1;
432
433 def objcpp_src( name, opts ):
434 opts.objcpp_src = 1;
435
436 def cmm_src( name, opts ):
437 opts.cmm_src = 1;
438
439 def outputdir( odir ):
440 return lambda name, opts, d=odir: _outputdir(name, opts, d)
441
442 def _outputdir( name, opts, odir ):
443 opts.outputdir = odir;
444
445 # ----
446
447 def pre_cmd( cmd ):
448 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
449
450 def _pre_cmd( name, opts, cmd ):
451 opts.pre_cmd = cmd
452
453 # ----
454
455 def clean_cmd( cmd ):
456 # TODO. Remove all calls to clean_cmd.
457 return lambda _name, _opts: None
458
459 # ----
460
461 def cmd_prefix( prefix ):
462 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
463
464 def _cmd_prefix( name, opts, prefix ):
465 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
466
467 # ----
468
469 def cmd_wrapper( fun ):
470 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
471
472 def _cmd_wrapper( name, opts, fun ):
473 opts.cmd_wrapper = fun
474
475 # ----
476
477 def compile_cmd_prefix( prefix ):
478 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
479
480 def _compile_cmd_prefix( name, opts, prefix ):
481 opts.compile_cmd_prefix = prefix
482
483 # ----
484
485 def check_stdout( f ):
486 return lambda name, opts, f=f: _check_stdout(name, opts, f)
487
488 def _check_stdout( name, opts, f ):
489 opts.check_stdout = f
490
491 def no_check_hp(name, opts):
492 opts.check_hp = False
493
494 # ----
495
496 def normalise_slashes( name, opts ):
497 _normalise_fun(name, opts, normalise_slashes_)
498
499 def normalise_exe( name, opts ):
500 _normalise_fun(name, opts, normalise_exe_)
501
502 def normalise_fun( *fs ):
503 return lambda name, opts: _normalise_fun(name, opts, fs)
504
505 def _normalise_fun( name, opts, *fs ):
506 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
507
508 def normalise_errmsg_fun( *fs ):
509 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
510
511 def _normalise_errmsg_fun( name, opts, *fs ):
512 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
513
514 def normalise_version_( *pkgs ):
515 def normalise_version__( str ):
516 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
517 '\\1-<VERSION>', str)
518 return normalise_version__
519
520 def normalise_version( *pkgs ):
521 def normalise_version__( name, opts ):
522 _normalise_fun(name, opts, normalise_version_(*pkgs))
523 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
524 return normalise_version__
525
526 def normalise_drive_letter(name, opts):
527 # Windows only. Change D:\\ to C:\\.
528 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
529
530 def keep_prof_callstacks(name, opts):
531 """Keep profiling callstacks.
532
533 Use together with `only_ways(prof_ways)`.
534 """
535 opts.keep_prof_callstacks = True
536
537 def join_normalisers(*a):
538 """
539 Compose functions, flattening sequences.
540
541 join_normalisers(f1,[f2,f3],f4)
542
543 is the same as
544
545 lambda x: f1(f2(f3(f4(x))))
546 """
547
548 def flatten(l):
549 """
550 Taken from http://stackoverflow.com/a/2158532/946226
551 """
552 for el in l:
553 if (isinstance(el, collections.Iterable)
554 and not isinstance(el, (bytes, str))):
555 for sub in flatten(el):
556 yield sub
557 else:
558 yield el
559
560 a = flatten(a)
561
562 fn = lambda x:x # identity function
563 for f in a:
564 assert callable(f)
565 fn = lambda x,f=f,fn=fn: fn(f(x))
566 return fn
567
568 # ----
569 # Function for composing two opt-fns together
570
571 def executeSetups(fs, name, opts):
572 if type(fs) is list:
573 # If we have a list of setups, then execute each one
574 for f in fs:
575 executeSetups(f, name, opts)
576 else:
577 # fs is a single function, so just apply it
578 fs(name, opts)
579
580 # -----------------------------------------------------------------------------
581 # The current directory of tests
582
583 def newTestDir(tempdir, dir):
584
585 global thisdir_settings
586 # reset the options for this test directory
587 def settings(name, opts, tempdir=tempdir, dir=dir):
588 return _newTestDir(name, opts, tempdir, dir)
589 thisdir_settings = settings
590
591 # Should be equal to entry in toplevel .gitignore.
592 testdir_suffix = '.run'
593
594 def _newTestDir(name, opts, tempdir, dir):
595 opts.srcdir = os.path.join(os.getcwd(), dir)
596 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
597 opts.compiler_always_flags = config.compiler_always_flags
598
599 # -----------------------------------------------------------------------------
600 # Actually doing tests
601
602 parallelTests = []
603 aloneTests = []
604 allTestNames = set([])
605
606 def runTest(watcher, opts, name, func, args):
607 if config.use_threads:
608 pool_sema.acquire()
609 t = threading.Thread(target=test_common_thread,
610 name=name,
611 args=(watcher, name, opts, func, args))
612 t.daemon = False
613 t.start()
614 else:
615 test_common_work(watcher, name, opts, func, args)
616
617 # name :: String
618 # setup :: TestOpts -> IO ()
619 def test(name, setup, func, args):
620 global aloneTests
621 global parallelTests
622 global allTestNames
623 global thisdir_settings
624 if name in allTestNames:
625 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
626 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
627 framework_fail(name, 'bad_name', 'This test has an invalid name')
628
629 if config.run_only_some_tests:
630 if name not in config.only:
631 return
632 else:
633 # Note [Mutating config.only]
634 # config.only is initially the set of tests requested by
635 # the user (via 'make TEST='). We then remove all tests that
636 # we've already seen (in .T files), so that we can later
637 # report on any tests we couldn't find and error out.
638 config.only.remove(name)
639
640 # Make a deep copy of the default_testopts, as we need our own copy
641 # of any dictionaries etc inside it. Otherwise, if one test modifies
642 # them, all tests will see the modified version!
643 myTestOpts = copy.deepcopy(default_testopts)
644
645 executeSetups([thisdir_settings, setup], name, myTestOpts)
646
647 thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
648 if myTestOpts.alone:
649 aloneTests.append(thisTest)
650 else:
651 parallelTests.append(thisTest)
652 allTestNames.add(name)
653
654 if config.use_threads:
655 def test_common_thread(watcher, name, opts, func, args):
656 try:
657 test_common_work(watcher, name, opts, func, args)
658 finally:
659 pool_sema.release()
660
661 def get_package_cache_timestamp():
662 if config.package_conf_cache_file == '':
663 return 0.0
664 else:
665 try:
666 return os.stat(config.package_conf_cache_file).st_mtime
667 except:
668 return 0.0
669
670 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
671
672 def test_common_work(watcher, name, opts, func, args):
673 try:
674 t.total_tests += 1
675 setLocalTestOpts(opts)
676
677 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
678
679 # All the ways we might run this test
680 if func == compile or func == multimod_compile:
681 all_ways = config.compile_ways
682 elif func == compile_and_run or func == multimod_compile_and_run:
683 all_ways = config.run_ways
684 elif func == ghci_script:
685 if 'ghci' in config.run_ways:
686 all_ways = ['ghci']
687 else:
688 all_ways = []
689 else:
690 all_ways = ['normal']
691
692 # A test itself can request extra ways by setting opts.extra_ways
693 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
694
695 t.total_test_cases += len(all_ways)
696
697 ok_way = lambda way: \
698 not getTestOpts().skip \
699 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
700 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
701 and (not (config.skip_perf_tests and isStatsTest())) \
702 and way not in getTestOpts().omit_ways
703
704 # Which ways we are asked to skip
705 do_ways = list(filter (ok_way,all_ways))
706
707 # Only run all ways in slow mode.
708 # See Note [validate and testsuite speed] in toplevel Makefile.
709 if config.accept:
710 # Only ever run one way
711 do_ways = do_ways[:1]
712 elif config.speed > 0:
713 # However, if we EXPLICITLY asked for a way (with extra_ways)
714 # please test it!
715 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
716 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
717 do_ways = other_ways[:1] + explicit_ways
718
719 # Find all files in the source directory that this test
720 # depends on. Do this only once for all ways.
721 # Generously add all filenames that start with the name of
722 # the test to this set, as a convenience to test authors.
723 # They will have to use the `extra_files` setup function to
724 # specify all other files that their test depends on (but
725 # this seems to be necessary for only about 10% of all
726 # tests).
727 files = set(f for f in os.listdir(opts.srcdir)
728 if f.startswith(name) and not f == name and
729 not f.endswith(testdir_suffix) and
730 not os.path.splitext(f)[1] in do_not_copy)
731 for filename in (opts.extra_files + extra_src_files.get(name, [])):
732 if filename.startswith('/'):
733 framework_fail(name, 'whole-test',
734 'no absolute paths in extra_files please: ' + filename)
735
736 elif '*' in filename:
737 # Don't use wildcards in extra_files too much, as
738 # globbing is slow.
739 files.update((os.path.relpath(f, opts.srcdir)
740 for f in glob.iglob(in_srcdir(filename))))
741
742 elif filename:
743 files.add(filename)
744
745 else:
746 framework_fail(name, 'whole-test', 'extra_file is empty string')
747
748 # Run the required tests...
749 for way in do_ways:
750 if stopping():
751 break
752 try:
753 do_test(name, way, func, args, files)
754 except KeyboardInterrupt:
755 stopNow()
756 except Exception as e:
757 framework_fail(name, way, str(e))
758 traceback.print_exc()
759
760 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
761
762 if config.cleanup and do_ways:
763 cleanup()
764
765 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
766
767 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
768 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
769
770 except Exception as e:
771 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
772 finally:
773 watcher.notify()
774
775 def do_test(name, way, func, args, files):
776 opts = getTestOpts()
777
778 full_name = name + '(' + way + ')'
779
780 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
781 full_name, t.total_tests, len(allTestNames),
782 [len(t.unexpected_passes),
783 len(t.unexpected_failures),
784 len(t.framework_failures)]))
785
786 # Clean up prior to the test, so that we can't spuriously conclude
787 # that it passed on the basis of old run outputs.
788 cleanup()
789 os.makedirs(opts.testdir)
790
791 # Link all source files for this test into a new directory in
792 # /tmp, and run the test in that directory. This makes it
793 # possible to run tests in parallel, without modification, that
794 # would otherwise (accidentally) write to the same output file.
795 # It also makes it easier to keep the testsuite clean.
796
797 for extra_file in files:
798 src = in_srcdir(extra_file)
799 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
800 if os.path.isfile(src):
801 link_or_copy_file(src, dst)
802 elif os.path.isdir(src):
803 os.mkdir(dst)
804 lndir(src, dst)
805 else:
806 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
807 # When using a ghc built without haddock support, .t
808 # files are rightfully missing. Don't
809 # framework_fail. Test will be skipped later.
810 pass
811 else:
812 framework_fail(name, way,
813 'extra_file does not exist: ' + extra_file)
814
815 if func.__name__ == 'run_command' or opts.pre_cmd:
816 # When running 'MAKE' make sure 'TOP' still points to the
817 # root of the testsuite.
818 src_makefile = in_srcdir('Makefile')
819 dst_makefile = in_testdir('Makefile')
820 if os.path.exists(src_makefile):
821 with io.open(src_makefile, 'r', encoding='utf8') as src:
822 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
823 with io.open(dst_makefile, 'w', encoding='utf8') as dst:
824 dst.write(makefile)
825
826 if opts.pre_cmd:
827 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
828 stderr = subprocess.STDOUT,
829 print_output = config.verbose >= 3)
830
831 if exit_code != 0:
832 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
833
834 result = func(*[name,way] + args)
835
836 if opts.expect not in ['pass', 'fail', 'missing-lib']:
837 framework_fail(name, way, 'bad expected ' + opts.expect)
838
839 try:
840 passFail = result['passFail']
841 except (KeyError, TypeError):
842 passFail = 'No passFail found'
843
844 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
845
846 if passFail == 'pass':
847 if _expect_pass(way):
848 t.n_expected_passes += 1
849 else:
850 if_verbose(1, '*** unexpected pass for %s' % full_name)
851 t.unexpected_passes.append((directory, name, 'unexpected', way))
852 elif passFail == 'fail':
853 if _expect_pass(way):
854 reason = result['reason']
855 tag = result.get('tag')
856 if tag == 'stat':
857 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
858 t.unexpected_stat_failures.append((directory, name, reason, way))
859 else:
860 if_verbose(1, '*** unexpected failure for %s' % full_name)
861 t.unexpected_failures.append((directory, name, reason, way))
862 else:
863 if opts.expect == 'missing-lib':
864 t.missing_libs.append((directory, name, 'missing-lib', way))
865 else:
866 t.n_expected_failures += 1
867 else:
868 framework_fail(name, way, 'bad result ' + passFail)
869
870 # Make is often invoked with -s, which means if it fails, we get
871 # no feedback at all. This is annoying. So let's remove the option
872 # if found and instead have the testsuite decide on what to do
873 # with the output.
874 def override_options(pre_cmd):
875 if config.verbose >= 4 and bool(re.match('\$make', pre_cmd, re.I)):
876 return pre_cmd.replace('-s' , '') \
877 .replace('--silent', '') \
878 .replace('--quiet' , '')
879
880 return pre_cmd
881
882 def framework_fail(name, way, reason):
883 opts = getTestOpts()
884 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
885 full_name = name + '(' + way + ')'
886 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
887 t.framework_failures.append((directory, name, way, reason))
888
889 def badResult(result):
890 try:
891 if result['passFail'] == 'pass':
892 return False
893 return True
894 except (KeyError, TypeError):
895 return True
896
897 def passed():
898 return {'passFail': 'pass'}
899
900 def failBecause(reason, tag=None):
901 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
902
903 # -----------------------------------------------------------------------------
904 # Generic command tests
905
906 # A generic command test is expected to run and exit successfully.
907 #
908 # The expected exit code can be changed via exit_code() as normal, and
909 # the expected stdout/stderr are stored in <testname>.stdout and
910 # <testname>.stderr. The output of the command can be ignored
911 # altogether by using the setup function ignore_stdout instead of
912 # run_command.
913
914 def run_command( name, way, cmd ):
915 return simple_run( name, '', override_options(cmd), '' )
916
917 # -----------------------------------------------------------------------------
918 # GHCi tests
919
920 def ghci_script( name, way, script):
921 flags = ' '.join(get_compiler_flags())
922 way_flags = ' '.join(config.way_flags[way])
923
924 # We pass HC and HC_OPTS as environment variables, so that the
925 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
926 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
927 ).format(flags=flags, way_flags=way_flags)
928
929 getTestOpts().stdin = script
930 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
931
932 # -----------------------------------------------------------------------------
933 # Compile-only tests
934
935 def compile( name, way, extra_hc_opts ):
936 return do_compile( name, way, 0, '', [], extra_hc_opts )
937
938 def compile_fail( name, way, extra_hc_opts ):
939 return do_compile( name, way, 1, '', [], extra_hc_opts )
940
941 def backpack_typecheck( name, way, extra_hc_opts ):
942 return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=1 )
943
944 def backpack_typecheck_fail( name, way, extra_hc_opts ):
945 return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=1 )
946
947 def backpack_compile( name, way, extra_hc_opts ):
948 return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=1 )
949
950 def backpack_compile_fail( name, way, extra_hc_opts ):
951 return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=1 )
952
953 def backpack_run( name, way, extra_hc_opts ):
954 return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=1 )
955
956 def multimod_compile( name, way, top_mod, extra_hc_opts ):
957 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
958
959 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
960 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
961
962 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
963 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
964
965 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
966 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
967
968 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
969 # print 'Compile only, extra args = ', extra_hc_opts
970
971 result = extras_build( way, extra_mods, extra_hc_opts )
972 if badResult(result):
973 return result
974 extra_hc_opts = result['hc_opts']
975
976 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
977
978 if badResult(result):
979 return result
980
981 # the actual stderr should always match the expected, regardless
982 # of whether we expected the compilation to fail or not (successful
983 # compilations may generate warnings).
984
985 expected_stderr_file = find_expected_file(name, 'stderr')
986 actual_stderr_file = add_suffix(name, 'comp.stderr')
987
988 if not compare_outputs(way, 'stderr',
989 join_normalisers(getTestOpts().extra_errmsg_normaliser,
990 normalise_errmsg),
991 expected_stderr_file, actual_stderr_file,
992 whitespace_normaliser=normalise_whitespace):
993 return failBecause('stderr mismatch')
994
995 # no problems found, this test passed
996 return passed()
997
998 def compile_cmp_asm( name, way, extra_hc_opts ):
999 print('Compile only, extra args = ', extra_hc_opts)
1000 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1001
1002 if badResult(result):
1003 return result
1004
1005 # the actual stderr should always match the expected, regardless
1006 # of whether we expected the compilation to fail or not (successful
1007 # compilations may generate warnings).
1008
1009 expected_asm_file = find_expected_file(name, 'asm')
1010 actual_asm_file = add_suffix(name, 's')
1011
1012 if not compare_outputs(way, 'asm',
1013 join_normalisers(normalise_errmsg, normalise_asm),
1014 expected_asm_file, actual_asm_file):
1015 return failBecause('asm mismatch')
1016
1017 # no problems found, this test passed
1018 return passed()
1019
1020 # -----------------------------------------------------------------------------
1021 # Compile-and-run tests
1022
1023 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
1024 # print 'Compile and run, extra args = ', extra_hc_opts
1025
1026 result = extras_build( way, extra_mods, extra_hc_opts )
1027 if badResult(result):
1028 return result
1029 extra_hc_opts = result['hc_opts']
1030
1031 if way.startswith('ghci'): # interpreted...
1032 return interpreter_run(name, way, extra_hc_opts, top_mod)
1033 else: # compiled...
1034 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
1035 if badResult(result):
1036 return result
1037
1038 cmd = './' + name;
1039
1040 # we don't check the compiler's stderr for a compile-and-run test
1041 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1042
1043 def compile_and_run( name, way, extra_hc_opts ):
1044 return compile_and_run__( name, way, '', [], extra_hc_opts)
1045
1046 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1047 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1048
1049 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1050 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1051
1052 def stats( name, way, stats_file ):
1053 opts = getTestOpts()
1054 return checkStats(name, way, stats_file, opts.stats_range_fields)
1055
1056 # -----------------------------------------------------------------------------
1057 # Check -t stats info
1058
1059 def checkStats(name, way, stats_file, range_fields):
1060 full_name = name + '(' + way + ')'
1061
1062 result = passed()
1063 if range_fields:
1064 try:
1065 f = open(in_testdir(stats_file))
1066 except IOError as e:
1067 return failBecause(str(e))
1068 contents = f.read()
1069 f.close()
1070
1071 for (field, (expected, dev)) in range_fields.items():
1072 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1073 if m == None:
1074 print('Failed to find field: ', field)
1075 result = failBecause('no such stats field')
1076 val = int(m.group(1))
1077
1078 lowerBound = trunc( expected * ((100 - float(dev))/100))
1079 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1080
1081 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1082
1083 if val < lowerBound:
1084 print(field, 'value is too low:')
1085 print('(If this is because you have improved GHC, please')
1086 print('update the test so that GHC doesn\'t regress again)')
1087 result = failBecause('stat too good', tag='stat')
1088 if val > upperBound:
1089 print(field, 'value is too high:')
1090 result = failBecause('stat not good enough', tag='stat')
1091
1092 if val < lowerBound or val > upperBound or config.verbose >= 4:
1093 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1094
1095 def display(descr, val, extra):
1096 print(descr, str(val).rjust(length), extra)
1097
1098 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1099 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1100 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1101 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1102 if val != expected:
1103 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1104
1105 return result
1106
1107 # -----------------------------------------------------------------------------
1108 # Build a single-module program
1109
1110 def extras_build( way, extra_mods, extra_hc_opts ):
1111 for mod, opts in extra_mods:
1112 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1113 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1114 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1115 if badResult(result):
1116 return result
1117
1118 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1119
1120 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
1121 opts = getTestOpts()
1122
1123 # Redirect stdout and stderr to the same file
1124 stdout = in_testdir(name, 'comp.stderr')
1125 stderr = subprocess.STDOUT
1126
1127 if top_mod != '':
1128 srcname = top_mod
1129 elif addsuf:
1130 if backpack:
1131 srcname = add_suffix(name, 'bkp')
1132 else:
1133 srcname = add_hs_lhs_suffix(name)
1134 else:
1135 srcname = name
1136
1137 if top_mod != '':
1138 to_do = '--make '
1139 if link:
1140 to_do = to_do + '-o ' + name
1141 elif backpack:
1142 if link:
1143 to_do = '-o ' + name + ' '
1144 else:
1145 to_do = ''
1146 to_do = to_do + '--backpack '
1147 elif link:
1148 to_do = '-o ' + name
1149 else:
1150 to_do = '-c' # just compile
1151
1152 stats_file = name + '.comp.stats'
1153 if opts.compiler_stats_range_fields:
1154 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1155 if backpack:
1156 extra_hc_opts += ' -outputdir ' + name + '.out'
1157
1158 # Required by GHC 7.3+, harmless for earlier versions:
1159 if (getTestOpts().c_src or
1160 getTestOpts().objc_src or
1161 getTestOpts().objcpp_src or
1162 getTestOpts().cmm_src):
1163 extra_hc_opts += ' -no-hs-main '
1164
1165 if getTestOpts().compile_cmd_prefix == '':
1166 cmd_prefix = ''
1167 else:
1168 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1169
1170 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1171
1172 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1173 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1174 ).format(**locals())
1175
1176 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1177
1178 if exit_code != 0 and not should_fail:
1179 if config.verbose >= 1 and _expect_pass(way):
1180 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1181 actual_stderr_path = in_testdir(name, 'comp.stderr')
1182 if_verbose_dump(1, actual_stderr_path)
1183
1184 # ToDo: if the sub-shell was killed by ^C, then exit
1185
1186 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1187
1188 if badResult(statsResult):
1189 return statsResult
1190
1191 if should_fail:
1192 if exit_code == 0:
1193 return failBecause('exit code 0')
1194 else:
1195 if exit_code != 0:
1196 return failBecause('exit code non-0')
1197
1198 return passed()
1199
1200 # -----------------------------------------------------------------------------
1201 # Run a program and check its output
1202 #
1203 # If testname.stdin exists, route input from that, else
1204 # from /dev/null. Route output to testname.run.stdout and
1205 # testname.run.stderr. Returns the exit code of the run.
1206
1207 def simple_run(name, way, prog, extra_run_opts):
1208 opts = getTestOpts()
1209
1210 # figure out what to use for stdin
1211 if opts.stdin:
1212 stdin = in_testdir(opts.stdin)
1213 elif os.path.exists(in_testdir(name, 'stdin')):
1214 stdin = in_testdir(name, 'stdin')
1215 else:
1216 stdin = None
1217
1218 stdout = in_testdir(name, 'run.stdout')
1219 if opts.combined_output:
1220 stderr = subprocess.STDOUT
1221 else:
1222 stderr = in_testdir(name, 'run.stderr')
1223
1224 my_rts_flags = rts_flags(way)
1225
1226 stats_file = name + '.stats'
1227 if opts.stats_range_fields:
1228 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1229 else:
1230 stats_args = ''
1231
1232 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1233 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1234
1235 if opts.cmd_wrapper != None:
1236 cmd = opts.cmd_wrapper(cmd)
1237
1238 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1239
1240 # run the command
1241 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1242
1243 # check the exit code
1244 if exit_code != opts.exit_code:
1245 if config.verbose >= 1 and _expect_pass(way):
1246 print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
1247 dump_stdout(name)
1248 dump_stderr(name)
1249 return failBecause('bad exit code')
1250
1251 if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
1252 return failBecause('bad stderr')
1253 if not (opts.ignore_stdout or stdout_ok(name, way)):
1254 return failBecause('bad stdout')
1255
1256 check_hp = '-h' in my_rts_flags and opts.check_hp
1257 check_prof = '-p' in my_rts_flags
1258
1259 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1260 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1261 return failBecause('bad heap profile')
1262 if check_prof and not check_prof_ok(name, way):
1263 return failBecause('bad profile')
1264
1265 return checkStats(name, way, stats_file, opts.stats_range_fields)
1266
1267 def rts_flags(way):
1268 args = config.way_rts_flags.get(way, [])
1269 return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
1270
1271 # -----------------------------------------------------------------------------
1272 # Run a program in the interpreter and check its output
1273
1274 def interpreter_run(name, way, extra_hc_opts, top_mod):
1275 opts = getTestOpts()
1276
1277 stdout = in_testdir(name, 'interp.stdout')
1278 stderr = in_testdir(name, 'interp.stderr')
1279 script = in_testdir(name, 'genscript')
1280
1281 if opts.combined_output:
1282 framework_fail(name, 'unsupported',
1283 'WAY=ghci and combined_output together is not supported')
1284
1285 if (top_mod == ''):
1286 srcname = add_hs_lhs_suffix(name)
1287 else:
1288 srcname = top_mod
1289
1290 delimiter = '===== program output begins here\n'
1291
1292 with io.open(script, 'w', encoding='utf8') as f:
1293 # set the prog name and command-line args to match the compiled
1294 # environment.
1295 f.write(':set prog ' + name + '\n')
1296 f.write(':set args ' + opts.extra_run_opts + '\n')
1297 # Add marker lines to the stdout and stderr output files, so we
1298 # can separate GHCi's output from the program's.
1299 f.write(':! echo ' + delimiter)
1300 f.write(':! echo 1>&2 ' + delimiter)
1301 # Set stdout to be line-buffered to match the compiled environment.
1302 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1303 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1304 # in the event of an exception as for the compiled program.
1305 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1306
1307 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1308 if os.path.exists(stdin):
1309 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1310
1311 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1312
1313 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1314 ).format(**locals())
1315
1316 if getTestOpts().cmd_wrapper != None:
1317 cmd = opts.cmd_wrapper(cmd);
1318
1319 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1320
1321 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1322
1323 # split the stdout into compilation/program output
1324 split_file(stdout, delimiter,
1325 in_testdir(name, 'comp.stdout'),
1326 in_testdir(name, 'run.stdout'))
1327 split_file(stderr, delimiter,
1328 in_testdir(name, 'comp.stderr'),
1329 in_testdir(name, 'run.stderr'))
1330
1331 # check the exit code
1332 if exit_code != getTestOpts().exit_code:
1333 print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1334 dump_stdout(name)
1335 dump_stderr(name)
1336 return failBecause('bad exit code')
1337
1338 # ToDo: if the sub-shell was killed by ^C, then exit
1339
1340 if not (opts.ignore_stderr or stderr_ok(name, way)):
1341 return failBecause('bad stderr')
1342 elif not (opts.ignore_stdout or stdout_ok(name, way)):
1343 return failBecause('bad stdout')
1344 else:
1345 return passed()
1346
1347 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1348 # See Note [Universal newlines].
1349 with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
1350 with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
1351 with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
1352 line = infile.readline()
1353 while re.sub('^\s*','',line) != delimiter and line != '':
1354 out1.write(line)
1355 line = infile.readline()
1356
1357 line = infile.readline()
1358 while line != '':
1359 out2.write(line)
1360 line = infile.readline()
1361
1362 # -----------------------------------------------------------------------------
1363 # Utils
1364 def get_compiler_flags():
1365 opts = getTestOpts()
1366
1367 flags = copy.copy(opts.compiler_always_flags)
1368
1369 flags.append(opts.extra_hc_opts)
1370
1371 if opts.outputdir != None:
1372 flags.extend(["-outputdir", opts.outputdir])
1373
1374 return flags
1375
1376 def stdout_ok(name, way):
1377 actual_stdout_file = add_suffix(name, 'run.stdout')
1378 expected_stdout_file = find_expected_file(name, 'stdout')
1379
1380 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1381
1382 check_stdout = getTestOpts().check_stdout
1383 if check_stdout:
1384 actual_stdout_path = in_testdir(actual_stdout_file)
1385 return check_stdout(actual_stdout_path, extra_norm)
1386
1387 return compare_outputs(way, 'stdout', extra_norm,
1388 expected_stdout_file, actual_stdout_file)
1389
1390 def dump_stdout( name ):
1391 with open(in_testdir(name, 'run.stdout')) as f:
1392 str = f.read().strip()
1393 if str:
1394 print("Stdout (", name, "):")
1395 print(str)
1396
1397 def stderr_ok(name, way):
1398 actual_stderr_file = add_suffix(name, 'run.stderr')
1399 expected_stderr_file = find_expected_file(name, 'stderr')
1400
1401 return compare_outputs(way, 'stderr',
1402 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1403 expected_stderr_file, actual_stderr_file,
1404 whitespace_normaliser=normalise_whitespace)
1405
1406 def dump_stderr( name ):
1407 with open(in_testdir(name, 'run.stderr')) as f:
1408 str = f.read().strip()
1409 if str:
1410 print("Stderr (", name, "):")
1411 print(str)
1412
1413 def read_no_crs(file):
1414 str = ''
1415 try:
1416 # See Note [Universal newlines].
1417 with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
1418 str = h.read()
1419 except Exception:
1420 # On Windows, if the program fails very early, it seems the
1421 # files stdout/stderr are redirected to may not get created
1422 pass
1423 return str
1424
1425 def write_file(file, str):
1426 # See Note [Universal newlines].
1427 with io.open(file, 'w', encoding='utf8', newline='') as h:
1428 h.write(str)
1429
1430 # Note [Universal newlines]
1431 #
1432 # We don't want to write any Windows style line endings ever, because
1433 # it would mean that `make accept` would touch every line of the file
1434 # when switching between Linux and Windows.
1435 #
1436 # Furthermore, when reading a file, it is convenient to translate all
1437 # Windows style endings to '\n', as it simplifies searching or massaging
1438 # the content.
1439 #
1440 # Solution: use `io.open` instead of `open`
1441 # * when reading: use newline=None to translate '\r\n' to '\n'
1442 # * when writing: use newline='' to not translate '\n' to '\r\n'
1443 #
1444 # See https://docs.python.org/2/library/io.html#io.open.
1445 #
1446 # This should work with both python2 and python3, and with both mingw*
1447 # as msys2 style Python.
1448 #
1449 # Do note that io.open returns unicode strings. So we have to specify
1450 # the expected encoding. But there is at least one file which is not
1451 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1452 # Another solution would be to open files in binary mode always, and
1453 # operate on bytes.
1454
1455 def check_hp_ok(name):
1456 opts = getTestOpts()
1457
1458 # do not qualify for hp2ps because we should be in the right directory
1459 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1460
1461 hp2psResult = runCmd(hp2psCmd)
1462
1463 actual_ps_path = in_testdir(name, 'ps')
1464
1465 if hp2psResult == 0:
1466 if os.path.exists(actual_ps_path):
1467 if gs_working:
1468 gsResult = runCmd(genGSCmd(actual_ps_path))
1469 if (gsResult == 0):
1470 return (True)
1471 else:
1472 print("hp2ps output for " + name + "is not valid PostScript")
1473 else: return (True) # assume postscript is valid without ghostscript
1474 else:
1475 print("hp2ps did not generate PostScript for " + name)
1476 return (False)
1477 else:
1478 print("hp2ps error when processing heap profile for " + name)
1479 return(False)
1480
1481 def check_prof_ok(name, way):
1482 expected_prof_file = find_expected_file(name, 'prof.sample')
1483 expected_prof_path = in_testdir(expected_prof_file)
1484
1485 # Check actual prof file only if we have an expected prof file to
1486 # compare it with.
1487 if not os.path.exists(expected_prof_path):
1488 return True
1489
1490 actual_prof_file = add_suffix(name, 'prof')
1491 actual_prof_path = in_testdir(actual_prof_file)
1492
1493 if not os.path.exists(actual_prof_path):
1494 print(actual_prof_path + " does not exist")
1495 return(False)
1496
1497 if os.path.getsize(actual_prof_path) == 0:
1498 print(actual_prof_path + " is empty")
1499 return(False)
1500
1501 return compare_outputs(way, 'prof', normalise_prof,
1502 expected_prof_file, actual_prof_file,
1503 whitespace_normaliser=normalise_whitespace)
1504
1505 # Compare expected output to actual output, and optionally accept the
1506 # new output. Returns true if output matched or was accepted, false
1507 # otherwise. See Note [Output comparison] for the meaning of the
1508 # normaliser and whitespace_normaliser parameters.
1509 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1510 whitespace_normaliser=lambda x:x):
1511
1512 expected_path = in_srcdir(expected_file)
1513 actual_path = in_testdir(actual_file)
1514
1515 if os.path.exists(expected_path):
1516 expected_str = normaliser(read_no_crs(expected_path))
1517 # Create the .normalised file in the testdir, not in the srcdir.
1518 expected_normalised_file = add_suffix(expected_file, 'normalised')
1519 expected_normalised_path = in_testdir(expected_normalised_file)
1520 else:
1521 expected_str = ''
1522 expected_normalised_path = '/dev/null'
1523
1524 actual_raw = read_no_crs(actual_path)
1525 actual_str = normaliser(actual_raw)
1526
1527 # See Note [Output comparison].
1528 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1529 return 1
1530 else:
1531 if config.verbose >= 1 and _expect_pass(way):
1532 print('Actual ' + kind + ' output differs from expected:')
1533
1534 if expected_normalised_path != '/dev/null':
1535 write_file(expected_normalised_path, expected_str)
1536
1537 actual_normalised_path = add_suffix(actual_path, 'normalised')
1538 write_file(actual_normalised_path, actual_str)
1539
1540 if config.verbose >= 1 and _expect_pass(way):
1541 # See Note [Output comparison].
1542 r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1543 actual_normalised_path),
1544 print_output = 1)
1545
1546 # If for some reason there were no non-whitespace differences,
1547 # then do a full diff
1548 if r == 0:
1549 r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1550 actual_normalised_path),
1551 print_output = 1)
1552
1553 if config.accept and (getTestOpts().expect == 'fail' or
1554 way in getTestOpts().expect_fail_for):
1555 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1556 return 0
1557 elif config.accept and actual_raw:
1558 if_verbose(1, 'Accepting new output.')
1559 write_file(expected_path, actual_raw)
1560 return 1
1561 elif config.accept:
1562 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1563 os.remove(expected_path)
1564 return 1
1565 else:
1566 return 0
1567
1568 # Note [Output comparison]
1569 #
1570 # We do two types of output comparison:
1571 #
1572 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1573 # optional `whitespace_normaliser` to the expected and the actual
1574 # output, before comparing the two.
1575 #
1576 # 2. To show as a diff to the user when the test indeed failed. We apply
1577 # the same `normaliser` function to the outputs, to make the diff as
1578 # small as possible (only showing the actual problem). But we don't
1579 # apply the `whitespace_normaliser` here, because it might completely
1580 # squash all whitespace, making the diff unreadable. Instead we rely
1581 # on the `diff` program to ignore whitespace changes as much as
1582 # possible (#10152).
1583
1584 def normalise_whitespace( str ):
1585 # Merge contiguous whitespace characters into a single space.
1586 return ' '.join(w for w in str.split())
1587
1588 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1589
1590 def normalise_callstacks(s):
1591 opts = getTestOpts()
1592 def repl(matches):
1593 location = matches.group(1)
1594 location = normalise_slashes_(location)
1595 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1596 # Ignore line number differences in call stacks (#10834).
1597 s = re.sub(callSite_re, repl, s)
1598 # Ignore the change in how we identify implicit call-stacks
1599 s = s.replace('from ImplicitParams', 'from HasCallStack')
1600 if not opts.keep_prof_callstacks:
1601 # Don't output prof callstacks. Test output should be
1602 # independent from the WAY we run the test.
1603 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1604 return s
1605
1606 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1607
1608 def normalise_type_reps(str):
1609 """ Normalise out fingerprints from Typeable TyCon representations """
1610 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1611
1612 def normalise_errmsg( str ):
1613 """Normalise error-messages emitted via stderr"""
1614 # IBM AIX's `ld` is a bit chatty
1615 if opsys('aix'):
1616 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1617 # remove " error:" and lower-case " Warning:" to make patch for
1618 # trac issue #10021 smaller
1619 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1620 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1621 str = normalise_callstacks(str)
1622 str = normalise_type_reps(str)
1623
1624 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1625 # the colon is there because it appears in error messages; this
1626 # hacky solution is used in place of more sophisticated filename
1627 # mangling
1628 str = re.sub('([^\\s])\\.exe', '\\1', str)
1629
1630 # normalise slashes, minimise Windows/Unix filename differences
1631 str = re.sub('\\\\', '/', str)
1632
1633 # The inplace ghc's are called ghc-stage[123] to avoid filename
1634 # collisions, so we need to normalise that to just "ghc"
1635 str = re.sub('ghc-stage[123]', 'ghc', str)
1636
1637 # Error messages simetimes contain integer implementation package
1638 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1639
1640 # Also filter out bullet characters. This is because bullets are used to
1641 # separate error sections, and tests shouldn't be sensitive to how the
1642 # the division happens.
1643 bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
1644 str = str.replace(bullet, '')
1645
1646 # Windows only, this is a bug in hsc2hs but it is preventing
1647 # stable output for the testsuite. See Trac #9775. For now we filter out this
1648 # warning message to get clean output.
1649 if config.msys:
1650 str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
1651 str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
1652
1653 return str
1654
1655 # normalise a .prof file, so that we can reasonably compare it against
1656 # a sample. This doesn't compare any of the actual profiling data,
1657 # only the shape of the profile and the number of entries.
1658 def normalise_prof (str):
1659 # strip everything up to the line beginning "COST CENTRE"
1660 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1661
1662 # strip results for CAFs, these tend to change unpredictably
1663 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1664
1665 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1666 # sometimes under MAIN.
1667 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1668
1669 # We have something like this:
1670 #
1671 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1672 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1673 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1674 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1675 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1676 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1677 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1678 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1679 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1680 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1681 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1682 #
1683 # then we remove all the specific profiling data, leaving only the cost
1684 # centre name, module, src, and entries, to end up with this: (modulo
1685 # whitespace between columns)
1686 #
1687 # MAIN MAIN <built-in> 0
1688 # readPrec Main Main_1.hs:7:13-16 1
1689 # readPrec Main Main_1.hs:4:13-16 1
1690 # == Main Main_1.hs:7:25-26 1
1691 # == Main Main_1.hs:4:25-26 1
1692 # showsPrec Main Main_1.hs:7:19-22 2
1693 # showsPrec Main Main_1.hs:4:19-22 2
1694 # readPrec Main Main_1.hs:7:13-16 0
1695 # readPrec Main Main_1.hs:4:13-16 0
1696
1697 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1698 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1699 # this works fine.
1700 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1701 '\\1 \\2 \\3 \\5\n', str)
1702 return str
1703
1704 def normalise_slashes_( str ):
1705 str = re.sub('\\\\', '/', str)
1706 return str
1707
1708 def normalise_exe_( str ):
1709 str = re.sub('\.exe', '', str)
1710 return str
1711
1712 def normalise_output( str ):
1713 # remove " error:" and lower-case " Warning:" to make patch for
1714 # trac issue #10021 smaller
1715 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1716 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1717 # Remove a .exe extension (for Windows)
1718 # This can occur in error messages generated by the program.
1719 str = re.sub('([^\\s])\\.exe', '\\1', str)
1720 str = normalise_callstacks(str)
1721 str = normalise_type_reps(str)
1722 return str
1723
1724 def normalise_asm( str ):
1725 lines = str.split('\n')
1726 # Only keep instructions and labels not starting with a dot.
1727 metadata = re.compile('^[ \t]*\\..*$')
1728 out = []
1729 for line in lines:
1730 # Drop metadata directives (e.g. ".type")
1731 if not metadata.match(line):
1732 line = re.sub('@plt', '', line)
1733 instr = line.lstrip().split()
1734 # Drop empty lines.
1735 if not instr:
1736 continue
1737 # Drop operands, except for call instructions.
1738 elif instr[0] == 'call':
1739 out.append(instr[0] + ' ' + instr[1])
1740 else:
1741 out.append(instr[0])
1742 out = '\n'.join(out)
1743 return out
1744
1745 def if_verbose( n, s ):
1746 if config.verbose >= n:
1747 print(s)
1748
1749 def if_verbose_dump( n, f ):
1750 if config.verbose >= n:
1751 try:
1752 with io.open(f) as file:
1753 print(file.read())
1754 except Exception:
1755 print('')
1756
1757 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=0):
1758 timeout_prog = strip_quotes(config.timeout_prog)
1759 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1760
1761 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1762 cmd = cmd.format(**config.__dict__)
1763 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1764
1765 # declare the buffers to a default
1766 stdin_buffer = None
1767
1768 # ***** IMPORTANT *****
1769 # We have to treat input and output as
1770 # just binary data here. Don't try to decode
1771 # it to a string, since we have tests that actually
1772 # feed malformed utf-8 to see how GHC handles it.
1773 if stdin:
1774 with io.open(stdin, 'rb') as f:
1775 stdin_buffer = f.read()
1776
1777 stdout_buffer = b''
1778 stderr_buffer = b''
1779
1780 hStdErr = subprocess.PIPE
1781 if stderr is subprocess.STDOUT:
1782 hStdErr = subprocess.STDOUT
1783
1784 try:
1785 # cmd is a complex command in Bourne-shell syntax
1786 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1787 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1788 # to invoke the Bourne shell
1789
1790 r = subprocess.Popen([timeout_prog, timeout, cmd],
1791 stdin=subprocess.PIPE,
1792 stdout=subprocess.PIPE,
1793 stderr=hStdErr)
1794
1795 stdout_buffer, stderr_buffer = r.communicate(stdin_buffer)
1796 finally:
1797 if config.verbose >= 1 and print_output >= 1:
1798 if stdout_buffer:
1799 sys.stdout.buffer.write(stdout_buffer)
1800 if stderr_buffer:
1801 sys.stderr.buffer.write(stderr_buffer)
1802
1803 if stdout:
1804 with io.open(stdout, 'wb') as f:
1805 f.write(stdout_buffer)
1806 if stderr:
1807 if stderr is not subprocess.STDOUT:
1808 with io.open(stderr, 'wb') as f:
1809 f.write(stderr_buffer)
1810
1811 if r.returncode == 98:
1812 # The python timeout program uses 98 to signal that ^C was pressed
1813 stopNow()
1814 if r.returncode == 99 and getTestOpts().exit_code != 99:
1815 # Only print a message when timeout killed the process unexpectedly.
1816 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1817 return r.returncode
1818
1819 # -----------------------------------------------------------------------------
1820 # checking if ghostscript is available for checking the output of hp2ps
1821
1822 def genGSCmd(psfile):
1823 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1824
1825 def gsNotWorking():
1826 global gs_working
1827 print("GhostScript not available for hp2ps tests")
1828
1829 global gs_working
1830 gs_working = 0
1831 if config.have_profiling:
1832 if config.gs != '':
1833 resultGood = runCmd(genGSCmd(config.confdir + '/good.ps'));
1834 if resultGood == 0:
1835 resultBad = runCmd(genGSCmd(config.confdir + '/bad.ps') +
1836 ' >/dev/null 2>&1')
1837 if resultBad != 0:
1838 print("GhostScript available for hp2ps tests")
1839 gs_working = 1;
1840 else:
1841 gsNotWorking();
1842 else:
1843 gsNotWorking();
1844 else:
1845 gsNotWorking();
1846
1847 def add_suffix( name, suffix ):
1848 if suffix == '':
1849 return name
1850 else:
1851 return name + '.' + suffix
1852
1853 def add_hs_lhs_suffix(name):
1854 if getTestOpts().c_src:
1855 return add_suffix(name, 'c')
1856 elif getTestOpts().cmm_src:
1857 return add_suffix(name, 'cmm')
1858 elif getTestOpts().objc_src:
1859 return add_suffix(name, 'm')
1860 elif getTestOpts().objcpp_src:
1861 return add_suffix(name, 'mm')
1862 elif getTestOpts().literate:
1863 return add_suffix(name, 'lhs')
1864 else:
1865 return add_suffix(name, 'hs')
1866
1867 def replace_suffix( name, suffix ):
1868 base, suf = os.path.splitext(name)
1869 return base + '.' + suffix
1870
1871 def in_testdir(name, suffix=''):
1872 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1873
1874 def in_srcdir(name, suffix=''):
1875 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1876
1877 # Finding the sample output. The filename is of the form
1878 #
1879 # <test>.stdout[-ws-<wordsize>][-<platform>]
1880 #
1881 def find_expected_file(name, suff):
1882 basename = add_suffix(name, suff)
1883
1884 files = [basename + ws + plat
1885 for plat in ['-' + config.platform, '-' + config.os, '']
1886 for ws in ['-ws-' + config.wordsize, '']]
1887
1888 for f in files:
1889 if os.path.exists(in_srcdir(f)):
1890 return f
1891
1892 return basename
1893
1894 if config.msys:
1895 import stat
1896 import time
1897 def cleanup():
1898 testdir = getTestOpts().testdir
1899 max_attemps = 5
1900 retries = max_attemps
1901 def on_error(function, path, excinfo):
1902 # At least one test (T11489) removes the write bit from a file it
1903 # produces. Windows refuses to delete read-only files with a
1904 # permission error. Try setting the write bit and try again.
1905 os.chmod(path, stat.S_IWRITE)
1906 function(path)
1907
1908 # On Windows we have to retry the delete a couple of times.
1909 # The reason for this is that a FileDelete command just marks a
1910 # file for deletion. The file is really only removed when the last
1911 # handle to the file is closed. Unfortunately there are a lot of
1912 # system services that can have a file temporarily opened using a shared
1913 # readonly lock, such as the built in AV and search indexer.
1914 #
1915 # We can't really guarantee that these are all off, so what we can do is
1916 # whenever after a rmtree the folder still exists to try again and wait a bit.
1917 #
1918 # Based on what I've seen from the tests on CI server, is that this is relatively rare.
1919 # So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
1920 # still locked then abort the current test by throwing an exception, this so it won't fail
1921 # with an even more cryptic error.
1922 #
1923 # See Trac #13162
1924 while retries > 0 and os.path.exists(testdir):
1925 time.sleep((max_attemps-retries)*6)
1926 shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
1927 retries=-1
1928
1929 if retries == 0 and os.path.exists(testdir):
1930 raise Exception("Unable to remove folder '" + testdir + "'. Unable to start current test.")
1931 else:
1932 def cleanup():
1933 testdir = getTestOpts().testdir
1934 if os.path.exists(testdir):
1935 shutil.rmtree(testdir, ignore_errors=False)
1936
1937
1938 # -----------------------------------------------------------------------------
1939 # Return a list of all the files ending in '.T' below directories roots.
1940
1941 def findTFiles(roots):
1942 for root in roots:
1943 for path, dirs, files in os.walk(root, topdown=True):
1944 # Never pick up .T files in uncleaned .run directories.
1945 dirs[:] = [dir for dir in sorted(dirs)
1946 if not dir.endswith(testdir_suffix)]
1947 for filename in files:
1948 if filename.endswith('.T'):
1949 yield os.path.join(path, filename)
1950
1951 # -----------------------------------------------------------------------------
1952 # Output a test summary to the specified file object
1953
1954 def summary(t, file, short=False):
1955
1956 file.write('\n')
1957 printUnexpectedTests(file,
1958 [t.unexpected_passes, t.unexpected_failures,
1959 t.unexpected_stat_failures, t.framework_failures])
1960
1961 if short:
1962 # Only print the list of unexpected tests above.
1963 return
1964
1965 file.write('SUMMARY for test run started at '
1966 + time.strftime("%c %Z", t.start_time) + '\n'
1967 + str(datetime.timedelta(seconds=
1968 round(time.time() - time.mktime(t.start_time)))).rjust(8)
1969 + ' spent to go through\n'
1970 + repr(t.total_tests).rjust(8)
1971 + ' total tests, which gave rise to\n'
1972 + repr(t.total_test_cases).rjust(8)
1973 + ' test cases, of which\n'
1974 + repr(t.n_tests_skipped).rjust(8)
1975 + ' were skipped\n'
1976 + '\n'
1977 + repr(len(t.missing_libs)).rjust(8)
1978 + ' had missing libraries\n'
1979 + repr(t.n_expected_passes).rjust(8)
1980 + ' expected passes\n'
1981 + repr(t.n_expected_failures).rjust(8)
1982 + ' expected failures\n'
1983 + '\n'
1984 + repr(len(t.framework_failures)).rjust(8)
1985 + ' caused framework failures\n'
1986 + repr(len(t.unexpected_passes)).rjust(8)
1987 + ' unexpected passes\n'
1988 + repr(len(t.unexpected_failures)).rjust(8)
1989 + ' unexpected failures\n'
1990 + repr(len(t.unexpected_stat_failures)).rjust(8)
1991 + ' unexpected stat failures\n'
1992 + '\n')
1993
1994 if t.unexpected_passes:
1995 file.write('Unexpected passes:\n')
1996 printTestInfosSummary(file, t.unexpected_passes)
1997
1998 if t.unexpected_failures:
1999 file.write('Unexpected failures:\n')
2000 printTestInfosSummary(file, t.unexpected_failures)
2001
2002 if t.unexpected_stat_failures:
2003 file.write('Unexpected stat failures:\n')
2004 printTestInfosSummary(file, t.unexpected_stat_failures)
2005
2006 if t.framework_failures:
2007 file.write('Framework failures:\n')
2008 printTestInfosSummary(file, t.framework_failures)
2009
2010 if stopping():
2011 file.write('WARNING: Testsuite run was terminated early\n')
2012
2013 def printUnexpectedTests(file, testInfoss):
2014 unexpected = set(name for testInfos in testInfoss
2015 for (_, name, _, _) in testInfos
2016 if not name.endswith('.T'))
2017 if unexpected:
2018 file.write('Unexpected results from:\n')
2019 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2020 file.write('\n')
2021
2022 def printTestInfosSummary(file, testInfos):
2023 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
2024 for (directory, name, reason, way) in testInfos:
2025 directory = directory.ljust(maxDirLen)
2026 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
2027 file.write('\n')
2028
2029 def modify_lines(s, f):
2030 s = '\n'.join([f(l) for l in s.splitlines()])
2031 if s and s[-1] != '\n':
2032 # Prevent '\ No newline at end of file' warnings when diffing.
2033 s += '\n'
2034 return s