Testsuite: remove one level of indentation [skip ci]
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import io
9 import shutil
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 from math import ceil, trunc
20 import collections
21 import subprocess
22
23 from testglobals import *
24 from testutil import *
25 from extra_files import extra_src_files
26
27 try:
28 basestring
29 except: # Python 3
30 basestring = (str,bytes)
31
32 if config.use_threads:
33 import threading
34 try:
35 import thread
36 except ImportError: # Python 3
37 import _thread as thread
38
39 global wantToStop
40 wantToStop = False
41 def stopNow():
42 global wantToStop
43 wantToStop = True
44 def stopping():
45 return wantToStop
46
47 # Options valid for the current test only (these get reset to
48 # testdir_testopts after each test).
49
50 global testopts_local
51 if config.use_threads:
52 testopts_local = threading.local()
53 else:
54 class TestOpts_Local:
55 pass
56 testopts_local = TestOpts_Local()
57
58 def getTestOpts():
59 return testopts_local.x
60
61 def setLocalTestOpts(opts):
62 global testopts_local
63 testopts_local.x=opts
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86
87 def normal( name, opts ):
88 return;
89
90 def skip( name, opts ):
91 opts.skip = 1
92
93 def expect_fail( name, opts ):
94 # The compiler, testdriver, OS or platform is missing a certain
95 # feature, and we don't plan to or can't fix it now or in the
96 # future.
97 opts.expect = 'fail';
98
99 def reqlib( lib ):
100 return lambda name, opts, l=lib: _reqlib (name, opts, l )
101
102 def stage1(name, opts):
103 # See Note [Why is there no stage1 setup function?]
104 framework_fail(name, 'stage1 setup function does not exist',
105 'add your test to testsuite/tests/stage1 instead')
106
107 # Note [Why is there no stage1 setup function?]
108 #
109 # Presumably a stage1 setup function would signal that the stage1
110 # compiler should be used to compile a test.
111 #
112 # Trouble is, the path to the compiler + the `ghc --info` settings for
113 # that compiler are currently passed in from the `make` part of the
114 # testsuite driver.
115 #
116 # Switching compilers in the Python part would be entirely too late, as
117 # all ghc_with_* settings would be wrong. See config/ghc for possible
118 # consequences (for example, config.run_ways would still be
119 # based on the default compiler, quite likely causing ./validate --slow
120 # to fail).
121 #
122 # It would be possible to let the Python part of the testsuite driver
123 # make the call to `ghc --info`, but doing so would require quite some
124 # work. Care has to be taken to not affect the run_command tests for
125 # example, as they also use the `ghc --info` settings:
126 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
127 #
128 # If you want a test to run using the stage1 compiler, add it to the
129 # testsuite/tests/stage1 directory. Validate runs the tests in that
130 # directory with `make stage=1`.
131
132 # Cache the results of looking to see if we have a library or not.
133 # This makes quite a difference, especially on Windows.
134 have_lib = {}
135
136 def _reqlib( name, opts, lib ):
137 if lib in have_lib:
138 got_it = have_lib[lib]
139 else:
140 cmd = strip_quotes(config.ghc_pkg)
141 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
142 stdout=subprocess.PIPE,
143 stderr=subprocess.PIPE)
144 # read from stdout and stderr to avoid blocking due to
145 # buffers filling
146 p.communicate()
147 r = p.wait()
148 got_it = r == 0
149 have_lib[lib] = got_it
150
151 if not got_it:
152 opts.expect = 'missing-lib'
153
154 def req_haddock( name, opts ):
155 if not config.haddock:
156 opts.expect = 'missing-lib'
157
158 def req_profiling( name, opts ):
159 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
160 if not config.have_profiling:
161 opts.expect = 'fail'
162
163 def req_shared_libs( name, opts ):
164 if not config.have_shared_libs:
165 opts.expect = 'fail'
166
167 def req_interp( name, opts ):
168 if not config.have_interp:
169 opts.expect = 'fail'
170
171 def req_smp( name, opts ):
172 if not config.have_smp:
173 opts.expect = 'fail'
174
175 def ignore_output( name, opts ):
176 opts.ignore_output = 1
177
178 def no_stdin( name, opts ):
179 opts.no_stdin = 1
180
181 def combined_output( name, opts ):
182 opts.combined_output = True
183
184 # -----
185
186 def expect_fail_for( ways ):
187 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
188
189 def _expect_fail_for( name, opts, ways ):
190 opts.expect_fail_for = ways
191
192 def expect_broken( bug ):
193 # This test is a expected not to work due to the indicated trac bug
194 # number.
195 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
196
197 def _expect_broken( name, opts, bug ):
198 record_broken(name, opts, bug)
199 opts.expect = 'fail';
200
201 def expect_broken_for( bug, ways ):
202 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
203
204 def _expect_broken_for( name, opts, bug, ways ):
205 record_broken(name, opts, bug)
206 opts.expect_fail_for = ways
207
208 def record_broken(name, opts, bug):
209 global brokens
210 me = (bug, opts.testdir, name)
211 if not me in brokens:
212 brokens.append(me)
213
214 def _expect_pass(way):
215 # Helper function. Not intended for use in .T files.
216 opts = getTestOpts()
217 return opts.expect == 'pass' and way not in opts.expect_fail_for
218
219 # -----
220
221 def omit_ways( ways ):
222 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
223
224 def _omit_ways( name, opts, ways ):
225 opts.omit_ways = ways
226
227 # -----
228
229 def only_ways( ways ):
230 return lambda name, opts, w=ways: _only_ways( name, opts, w )
231
232 def _only_ways( name, opts, ways ):
233 opts.only_ways = ways
234
235 # -----
236
237 def extra_ways( ways ):
238 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
239
240 def _extra_ways( name, opts, ways ):
241 opts.extra_ways = ways
242
243 # -----
244
245 def set_stdin( file ):
246 return lambda name, opts, f=file: _set_stdin(name, opts, f);
247
248 def _set_stdin( name, opts, f ):
249 opts.stdin = f
250
251 # -----
252
253 def exit_code( val ):
254 return lambda name, opts, v=val: _exit_code(name, opts, v);
255
256 def _exit_code( name, opts, v ):
257 opts.exit_code = v
258
259 def signal_exit_code( val ):
260 if opsys('solaris2'):
261 return exit_code( val );
262 else:
263 # When application running on Linux receives fatal error
264 # signal, then its exit code is encoded as 128 + signal
265 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
266 # I assume that Mac OS X behaves in the same way at least Mac
267 # OS X builder behavior suggests this.
268 return exit_code( val+128 );
269
270 # -----
271
272 def compile_timeout_multiplier( val ):
273 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
274
275 def _compile_timeout_multiplier( name, opts, v ):
276 opts.compile_timeout_multiplier = v
277
278 def run_timeout_multiplier( val ):
279 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
280
281 def _run_timeout_multiplier( name, opts, v ):
282 opts.run_timeout_multiplier = v
283
284 # -----
285
286 def extra_run_opts( val ):
287 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
288
289 def _extra_run_opts( name, opts, v ):
290 opts.extra_run_opts = v
291
292 # -----
293
294 def extra_hc_opts( val ):
295 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
296
297 def _extra_hc_opts( name, opts, v ):
298 opts.extra_hc_opts = v
299
300 # -----
301
302 def extra_clean( files ):
303 # TODO. Remove all calls to extra_clean.
304 return lambda _name, _opts: None
305
306 def extra_files(files):
307 return lambda name, opts: _extra_files(name, opts, files)
308
309 def _extra_files(name, opts, files):
310 opts.extra_files.extend(files)
311
312 # -----
313
314 def stats_num_field( field, expecteds ):
315 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
316
317 def _stats_num_field( name, opts, field, expecteds ):
318 if field in opts.stats_range_fields:
319 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
320
321 if type(expecteds) is list:
322 for (b, expected, dev) in expecteds:
323 if b:
324 opts.stats_range_fields[field] = (expected, dev)
325 return
326 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
327
328 else:
329 (expected, dev) = expecteds
330 opts.stats_range_fields[field] = (expected, dev)
331
332 def compiler_stats_num_field( field, expecteds ):
333 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
334
335 def _compiler_stats_num_field( name, opts, field, expecteds ):
336 if field in opts.compiler_stats_range_fields:
337 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
338
339 # Compiler performance numbers change when debugging is on, making the results
340 # useless and confusing. Therefore, skip if debugging is on.
341 if compiler_debugged():
342 skip(name, opts)
343
344 for (b, expected, dev) in expecteds:
345 if b:
346 opts.compiler_stats_range_fields[field] = (expected, dev)
347 return
348
349 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
350
351 # -----
352
353 def when(b, f):
354 # When list_brokens is on, we want to see all expect_broken calls,
355 # so we always do f
356 if b or config.list_broken:
357 return f
358 else:
359 return normal
360
361 def unless(b, f):
362 return when(not b, f)
363
364 def doing_ghci():
365 return 'ghci' in config.run_ways
366
367 def ghc_dynamic():
368 return config.ghc_dynamic
369
370 def fast():
371 return config.speed == 2
372
373 def platform( plat ):
374 return config.platform == plat
375
376 def opsys( os ):
377 return config.os == os
378
379 def arch( arch ):
380 return config.arch == arch
381
382 def wordsize( ws ):
383 return config.wordsize == str(ws)
384
385 def msys( ):
386 return config.msys
387
388 def cygwin( ):
389 return config.cygwin
390
391 def have_vanilla( ):
392 return config.have_vanilla
393
394 def have_dynamic( ):
395 return config.have_dynamic
396
397 def have_profiling( ):
398 return config.have_profiling
399
400 def in_tree_compiler( ):
401 return config.in_tree_compiler
402
403 def unregisterised( ):
404 return config.unregisterised
405
406 def compiler_profiled( ):
407 return config.compiler_profiled
408
409 def compiler_debugged( ):
410 return config.compiler_debugged
411
412 # ---
413
414 def high_memory_usage(name, opts):
415 opts.alone = True
416
417 # If a test is for a multi-CPU race, then running the test alone
418 # increases the chance that we'll actually see it.
419 def multi_cpu_race(name, opts):
420 opts.alone = True
421
422 # ---
423 def literate( name, opts ):
424 opts.literate = 1;
425
426 def c_src( name, opts ):
427 opts.c_src = 1;
428
429 def objc_src( name, opts ):
430 opts.objc_src = 1;
431
432 def objcpp_src( name, opts ):
433 opts.objcpp_src = 1;
434
435 def cmm_src( name, opts ):
436 opts.cmm_src = 1;
437
438 def outputdir( odir ):
439 return lambda name, opts, d=odir: _outputdir(name, opts, d)
440
441 def _outputdir( name, opts, odir ):
442 opts.outputdir = odir;
443
444 # ----
445
446 def pre_cmd( cmd ):
447 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
448
449 def _pre_cmd( name, opts, cmd ):
450 opts.pre_cmd = cmd
451
452 # ----
453
454 def clean_cmd( cmd ):
455 # TODO. Remove all calls to clean_cmd.
456 return lambda _name, _opts: None
457
458 # ----
459
460 def cmd_prefix( prefix ):
461 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
462
463 def _cmd_prefix( name, opts, prefix ):
464 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
465
466 # ----
467
468 def cmd_wrapper( fun ):
469 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
470
471 def _cmd_wrapper( name, opts, fun ):
472 opts.cmd_wrapper = fun
473
474 # ----
475
476 def compile_cmd_prefix( prefix ):
477 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
478
479 def _compile_cmd_prefix( name, opts, prefix ):
480 opts.compile_cmd_prefix = prefix
481
482 # ----
483
484 def check_stdout( f ):
485 return lambda name, opts, f=f: _check_stdout(name, opts, f)
486
487 def _check_stdout( name, opts, f ):
488 opts.check_stdout = f
489
490 # ----
491
492 def normalise_slashes( name, opts ):
493 _normalise_fun(name, opts, normalise_slashes_)
494
495 def normalise_exe( name, opts ):
496 _normalise_fun(name, opts, normalise_exe_)
497
498 def normalise_fun( *fs ):
499 return lambda name, opts: _normalise_fun(name, opts, fs)
500
501 def _normalise_fun( name, opts, *fs ):
502 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
503
504 def normalise_errmsg_fun( *fs ):
505 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
506
507 def _normalise_errmsg_fun( name, opts, *fs ):
508 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
509
510 def normalise_version_( *pkgs ):
511 def normalise_version__( str ):
512 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
513 '\\1-<VERSION>', str)
514 return normalise_version__
515
516 def normalise_version( *pkgs ):
517 def normalise_version__( name, opts ):
518 _normalise_fun(name, opts, normalise_version_(*pkgs))
519 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
520 return normalise_version__
521
522 def normalise_drive_letter(name, opts):
523 # Windows only. Change D:\\ to C:\\.
524 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
525
526 def keep_prof_callstacks(name, opts):
527 """Keep profiling callstacks.
528
529 Use together with `only_ways(prof_ways)`.
530 """
531 opts.keep_prof_callstacks = True
532
533 def join_normalisers(*a):
534 """
535 Compose functions, flattening sequences.
536
537 join_normalisers(f1,[f2,f3],f4)
538
539 is the same as
540
541 lambda x: f1(f2(f3(f4(x))))
542 """
543
544 def flatten(l):
545 """
546 Taken from http://stackoverflow.com/a/2158532/946226
547 """
548 for el in l:
549 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
550 for sub in flatten(el):
551 yield sub
552 else:
553 yield el
554
555 a = flatten(a)
556
557 fn = lambda x:x # identity function
558 for f in a:
559 assert callable(f)
560 fn = lambda x,f=f,fn=fn: fn(f(x))
561 return fn
562
563 # ----
564 # Function for composing two opt-fns together
565
566 def executeSetups(fs, name, opts):
567 if type(fs) is list:
568 # If we have a list of setups, then execute each one
569 for f in fs:
570 executeSetups(f, name, opts)
571 else:
572 # fs is a single function, so just apply it
573 fs(name, opts)
574
575 # -----------------------------------------------------------------------------
576 # The current directory of tests
577
578 def newTestDir(tempdir, dir):
579
580 global thisdir_settings
581 # reset the options for this test directory
582 def settings(name, opts, tempdir=tempdir, dir=dir):
583 return _newTestDir(name, opts, tempdir, dir)
584 thisdir_settings = settings
585
586 # Should be equal to entry in toplevel .gitignore.
587 testdir_suffix = '.run'
588
589 def _newTestDir(name, opts, tempdir, dir):
590 opts.srcdir = os.path.join(os.getcwd(), dir)
591 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
592 opts.compiler_always_flags = config.compiler_always_flags
593
594 # -----------------------------------------------------------------------------
595 # Actually doing tests
596
597 parallelTests = []
598 aloneTests = []
599 allTestNames = set([])
600
601 def runTest (opts, name, func, args):
602 ok = 0
603
604 if config.use_threads:
605 t.thread_pool.acquire()
606 try:
607 while config.threads<(t.running_threads+1):
608 t.thread_pool.wait()
609 t.running_threads = t.running_threads+1
610 ok=1
611 t.thread_pool.release()
612 thread.start_new_thread(test_common_thread, (name, opts, func, args))
613 except:
614 if not ok:
615 t.thread_pool.release()
616 else:
617 test_common_work (name, opts, func, args)
618
619 # name :: String
620 # setup :: TestOpts -> IO ()
621 def test (name, setup, func, args):
622 global aloneTests
623 global parallelTests
624 global allTestNames
625 global thisdir_settings
626 if name in allTestNames:
627 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
628 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
629 framework_fail(name, 'bad_name', 'This test has an invalid name')
630
631 if config.run_only_some_tests:
632 if name not in config.only:
633 return
634 else:
635 # Note [Mutating config.only]
636 # config.only is initiallly the set of tests requested by
637 # the user (via 'make TEST='). We then remove all tests that
638 # we've already seen (in .T files), so that we can later
639 # report on any tests we couldn't find and error out.
640 config.only.remove(name)
641
642 # Make a deep copy of the default_testopts, as we need our own copy
643 # of any dictionaries etc inside it. Otherwise, if one test modifies
644 # them, all tests will see the modified version!
645 myTestOpts = copy.deepcopy(default_testopts)
646
647 executeSetups([thisdir_settings, setup], name, myTestOpts)
648
649 thisTest = lambda : runTest(myTestOpts, name, func, args)
650 if myTestOpts.alone:
651 aloneTests.append(thisTest)
652 else:
653 parallelTests.append(thisTest)
654 allTestNames.add(name)
655
656 if config.use_threads:
657 def test_common_thread(name, opts, func, args):
658 t.lock.acquire()
659 try:
660 test_common_work(name,opts,func,args)
661 finally:
662 t.lock.release()
663 t.thread_pool.acquire()
664 t.running_threads = t.running_threads - 1
665 t.thread_pool.notify()
666 t.thread_pool.release()
667
668 def get_package_cache_timestamp():
669 if config.package_conf_cache_file == '':
670 return 0.0
671 else:
672 try:
673 return os.stat(config.package_conf_cache_file).st_mtime
674 except:
675 return 0.0
676
677 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o') # 12112
678
679 def test_common_work (name, opts, func, args):
680 try:
681 t.total_tests = t.total_tests+1
682 setLocalTestOpts(opts)
683
684 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
685
686 # All the ways we might run this test
687 if func == compile or func == multimod_compile:
688 all_ways = config.compile_ways
689 elif func == compile_and_run or func == multimod_compile_and_run:
690 all_ways = config.run_ways
691 elif func == ghci_script:
692 if 'ghci' in config.run_ways:
693 all_ways = ['ghci']
694 else:
695 all_ways = []
696 else:
697 all_ways = ['normal']
698
699 # A test itself can request extra ways by setting opts.extra_ways
700 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
701
702 t.total_test_cases = t.total_test_cases + len(all_ways)
703
704 ok_way = lambda way: \
705 not getTestOpts().skip \
706 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
707 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
708 and (not (config.skip_perf_tests and isStatsTest())) \
709 and way not in getTestOpts().omit_ways
710
711 # Which ways we are asked to skip
712 do_ways = list(filter (ok_way,all_ways))
713
714 # Only run all ways in slow mode.
715 # See Note [validate and testsuite speed] in toplevel Makefile.
716 if config.accept:
717 # Only ever run one way
718 do_ways = do_ways[:1]
719 elif config.speed > 0:
720 # However, if we EXPLICITLY asked for a way (with extra_ways)
721 # please test it!
722 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
723 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
724 do_ways = other_ways[:1] + explicit_ways
725
726 # Find all files in the source directory that this test
727 # depends on. Do this only once for all ways.
728 # Generously add all filenames that start with the name of
729 # the test to this set, as a convenience to test authors.
730 # They will have to use the `extra_files` setup function to
731 # specify all other files that their test depends on (but
732 # this seems to be necessary for only about 10% of all
733 # tests).
734 files = set(f for f in os.listdir(opts.srcdir)
735 if f.startswith(name) and not f == name and
736 not f.endswith(testdir_suffix) and
737 not os.path.splitext(f)[1] in do_not_copy)
738 for filename in (opts.extra_files + extra_src_files.get(name, [])):
739 if filename.startswith('/'):
740 framework_fail(name, 'whole-test',
741 'no absolute paths in extra_files please: ' + filename)
742
743 elif '*' in filename:
744 # Don't use wildcards in extra_files too much, as
745 # globbing is slow.
746 files.update((os.path.relpath(f, opts.srcdir)
747 for f in glob.iglob(in_srcdir(filename))))
748
749 elif filename:
750 files.add(filename)
751
752 else:
753 framework_fail(name, 'whole-test', 'extra_file is empty string')
754
755 # Run the required tests...
756 for way in do_ways:
757 if stopping():
758 break
759 try:
760 do_test(name, way, func, args, files)
761 except KeyboardInterrupt:
762 stopNow()
763 except Exception as e:
764 framework_fail(name, way, str(e))
765 traceback.print_exc()
766
767 for way in all_ways:
768 if way not in do_ways:
769 skiptest (name,way)
770
771 if config.cleanup and do_ways:
772 cleanup()
773
774 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
775
776 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
777 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
778
779 except Exception as e:
780 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
781
782 def do_test(name, way, func, args, files):
783 opts = getTestOpts()
784
785 full_name = name + '(' + way + ')'
786
787 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
788 full_name, t.total_tests, len(allTestNames),
789 [t.n_unexpected_passes, t.n_unexpected_failures, t.n_framework_failures]))
790
791 # Clean up prior to the test, so that we can't spuriously conclude
792 # that it passed on the basis of old run outputs.
793 cleanup()
794
795 # Link all source files for this test into a new directory in
796 # /tmp, and run the test in that directory. This makes it
797 # possible to run tests in parallel, without modification, that
798 # would otherwise (accidentally) write to the same output file.
799 # It also makes it easier to keep the testsuite clean.
800
801 for extra_file in files:
802 src = in_srcdir(extra_file)
803 if extra_file.startswith('..'):
804 # In case the extra_file is a file in an ancestor
805 # directory (e.g. extra_files(['../shell.hs'])), make
806 # sure it is copied to the test directory
807 # (testdir/shell.hs), instead of ending up somewhere
808 # else in the tree (testdir/../shell.hs)
809 filename = os.path.basename(extra_file)
810 else:
811 filename = extra_file
812 assert not '..' in filename # no funny stuff (foo/../../bar)
813 dst = in_testdir(filename)
814
815 if os.path.isfile(src):
816 dirname = os.path.dirname(dst)
817 if dirname:
818 mkdirp(dirname)
819 try:
820 link_or_copy_file(src, dst)
821 except OSError as e:
822 if e.errno == errno.EEXIST and os.path.isfile(dst):
823 # Some tests depend on files from ancestor
824 # directories (e.g. '../shell.hs'). It is
825 # possible such a file was already copied over
826 # for another test, since cleanup() doesn't
827 # delete them.
828 pass
829 else:
830 raise
831 elif os.path.isdir(src):
832 os.makedirs(dst)
833 lndir(src, dst)
834 else:
835 if not config.haddock and os.path.splitext(filename)[1] == '.t':
836 # When using a ghc built without haddock support, .t
837 # files are rightfully missing. Don't
838 # framework_fail. Test will be skipped later.
839 pass
840 else:
841 framework_fail(name, way,
842 'extra_file does not exist: ' + extra_file)
843
844 if not files:
845 # Always create the testdir, even when no files were copied
846 # (because user forgot to specify extra_files setup function), to
847 # prevent the confusing error: can't cd to <testdir>.
848 os.makedirs(opts.testdir)
849
850 if func.__name__ == 'run_command' or opts.pre_cmd:
851 # When running 'MAKE' make sure 'TOP' still points to the
852 # root of the testsuite.
853 src_makefile = in_srcdir('Makefile')
854 dst_makefile = in_testdir('Makefile')
855 if os.path.exists(src_makefile):
856 with open(src_makefile, 'r') as src:
857 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
858 with open(dst_makefile, 'w') as dst:
859 dst.write(makefile)
860
861 if config.use_threads:
862 t.lock.release()
863
864 if opts.pre_cmd:
865 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, opts.pre_cmd))
866 if exit_code != 0:
867 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
868
869 try:
870 result = func(*[name,way] + args)
871 finally:
872 if config.use_threads:
873 t.lock.acquire()
874
875 if opts.expect not in ['pass', 'fail', 'missing-lib']:
876 framework_fail(name, way, 'bad expected ' + opts.expect)
877
878 try:
879 passFail = result['passFail']
880 except:
881 passFail = 'No passFail found'
882
883 if passFail == 'pass':
884 if _expect_pass(way):
885 t.n_expected_passes = t.n_expected_passes + 1
886 if name in t.expected_passes:
887 t.expected_passes[name].append(way)
888 else:
889 t.expected_passes[name] = [way]
890 else:
891 if_verbose(1, '*** unexpected pass for %s' % full_name)
892 t.n_unexpected_passes = t.n_unexpected_passes + 1
893 addPassingTestInfo(t.unexpected_passes, opts.testdir, name, way)
894 elif passFail == 'fail':
895 if _expect_pass(way):
896 reason = result['reason']
897 tag = result.get('tag')
898 if tag == 'stat':
899 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
900 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
901 addFailingTestInfo(t.unexpected_stat_failures, opts.testdir, name, reason, way)
902 else:
903 if_verbose(1, '*** unexpected failure for %s' % full_name)
904 t.n_unexpected_failures = t.n_unexpected_failures + 1
905 addFailingTestInfo(t.unexpected_failures, opts.testdir, name, reason, way)
906 else:
907 if opts.expect == 'missing-lib':
908 t.n_missing_libs = t.n_missing_libs + 1
909 if name in t.missing_libs:
910 t.missing_libs[name].append(way)
911 else:
912 t.missing_libs[name] = [way]
913 else:
914 t.n_expected_failures = t.n_expected_failures + 1
915 if name in t.expected_failures:
916 t.expected_failures[name].append(way)
917 else:
918 t.expected_failures[name] = [way]
919 else:
920 framework_fail(name, way, 'bad result ' + passFail)
921
922 def addPassingTestInfo (testInfos, directory, name, way):
923 directory = re.sub('^\\.[/\\\\]', '', directory)
924
925 if not directory in testInfos:
926 testInfos[directory] = {}
927
928 if not name in testInfos[directory]:
929 testInfos[directory][name] = []
930
931 testInfos[directory][name].append(way)
932
933 def addFailingTestInfo (testInfos, directory, name, reason, way):
934 directory = re.sub('^\\.[/\\\\]', '', directory)
935
936 if not directory in testInfos:
937 testInfos[directory] = {}
938
939 if not name in testInfos[directory]:
940 testInfos[directory][name] = {}
941
942 if not reason in testInfos[directory][name]:
943 testInfos[directory][name][reason] = []
944
945 testInfos[directory][name][reason].append(way)
946
947 def skiptest (name, way):
948 # print 'Skipping test \"', name, '\"'
949 t.n_tests_skipped = t.n_tests_skipped + 1
950 if name in t.tests_skipped:
951 t.tests_skipped[name].append(way)
952 else:
953 t.tests_skipped[name] = [way]
954
955 def framework_fail( name, way, reason ):
956 full_name = name + '(' + way + ')'
957 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
958 t.n_framework_failures = t.n_framework_failures + 1
959 if name in t.framework_failures:
960 t.framework_failures[name].append(way)
961 else:
962 t.framework_failures[name] = [way]
963
964 def badResult(result):
965 try:
966 if result['passFail'] == 'pass':
967 return False
968 return True
969 except:
970 return True
971
972 def passed():
973 return {'passFail': 'pass'}
974
975 def failBecause(reason, tag=None):
976 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
977
978 # -----------------------------------------------------------------------------
979 # Generic command tests
980
981 # A generic command test is expected to run and exit successfully.
982 #
983 # The expected exit code can be changed via exit_code() as normal, and
984 # the expected stdout/stderr are stored in <testname>.stdout and
985 # <testname>.stderr. The output of the command can be ignored
986 # altogether by using run_command_ignore_output instead of
987 # run_command.
988
989 def run_command( name, way, cmd ):
990 return simple_run( name, '', cmd, '' )
991
992 # -----------------------------------------------------------------------------
993 # GHCi tests
994
995 def ghci_script( name, way, script):
996 flags = ' '.join(get_compiler_flags())
997
998 way_flags = ' '.join(config.way_flags[way])
999
1000 # We pass HC and HC_OPTS as environment variables, so that the
1001 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1002 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
1003 ).format(flags=flags, way_flags=way_flags)
1004
1005 getTestOpts().stdin = script
1006 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1007
1008 # -----------------------------------------------------------------------------
1009 # Compile-only tests
1010
1011 def compile( name, way, extra_hc_opts ):
1012 return do_compile( name, way, 0, '', [], extra_hc_opts )
1013
1014 def compile_fail( name, way, extra_hc_opts ):
1015 return do_compile( name, way, 1, '', [], extra_hc_opts )
1016
1017 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1018 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1019
1020 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1021 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1022
1023 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1024 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1025
1026 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1027 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1028
1029 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts):
1030 # print 'Compile only, extra args = ', extra_hc_opts
1031
1032 result = extras_build( way, extra_mods, extra_hc_opts )
1033 if badResult(result):
1034 return result
1035 extra_hc_opts = result['hc_opts']
1036
1037 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1)
1038
1039 if badResult(result):
1040 return result
1041
1042 # the actual stderr should always match the expected, regardless
1043 # of whether we expected the compilation to fail or not (successful
1044 # compilations may generate warnings).
1045
1046 expected_stderr_file = find_expected_file(name, 'stderr')
1047 actual_stderr_file = add_suffix(name, 'comp.stderr')
1048
1049 if not compare_outputs(way, 'stderr',
1050 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1051 normalise_errmsg),
1052 expected_stderr_file, actual_stderr_file,
1053 whitespace_normaliser=normalise_whitespace):
1054 return failBecause('stderr mismatch')
1055
1056 # no problems found, this test passed
1057 return passed()
1058
1059 def compile_cmp_asm( name, way, extra_hc_opts ):
1060 print('Compile only, extra args = ', extra_hc_opts)
1061 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1062
1063 if badResult(result):
1064 return result
1065
1066 # the actual stderr should always match the expected, regardless
1067 # of whether we expected the compilation to fail or not (successful
1068 # compilations may generate warnings).
1069
1070 expected_asm_file = find_expected_file(name, 'asm')
1071 actual_asm_file = add_suffix(name, 's')
1072
1073 if not compare_outputs(way, 'asm',
1074 join_normalisers(normalise_errmsg, normalise_asm),
1075 expected_asm_file, actual_asm_file):
1076 return failBecause('asm mismatch')
1077
1078 # no problems found, this test passed
1079 return passed()
1080
1081 # -----------------------------------------------------------------------------
1082 # Compile-and-run tests
1083
1084 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1085 # print 'Compile and run, extra args = ', extra_hc_opts
1086
1087 result = extras_build( way, extra_mods, extra_hc_opts )
1088 if badResult(result):
1089 return result
1090 extra_hc_opts = result['hc_opts']
1091
1092 if way.startswith('ghci'): # interpreted...
1093 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1094 else: # compiled...
1095 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1)
1096 if badResult(result):
1097 return result
1098
1099 cmd = './' + name;
1100
1101 # we don't check the compiler's stderr for a compile-and-run test
1102 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1103
1104 def compile_and_run( name, way, extra_hc_opts ):
1105 return compile_and_run__( name, way, '', [], extra_hc_opts)
1106
1107 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1108 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1109
1110 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1111 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1112
1113 def stats( name, way, stats_file ):
1114 opts = getTestOpts()
1115 return checkStats(name, way, stats_file, opts.stats_range_fields)
1116
1117 # -----------------------------------------------------------------------------
1118 # Check -t stats info
1119
1120 def checkStats(name, way, stats_file, range_fields):
1121 full_name = name + '(' + way + ')'
1122
1123 result = passed()
1124 if len(range_fields) > 0:
1125 try:
1126 f = open(in_testdir(stats_file))
1127 except IOError as e:
1128 return failBecause(str(e))
1129 contents = f.read()
1130 f.close()
1131
1132 for (field, (expected, dev)) in range_fields.items():
1133 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1134 if m == None:
1135 print('Failed to find field: ', field)
1136 result = failBecause('no such stats field')
1137 val = int(m.group(1))
1138
1139 lowerBound = trunc( expected * ((100 - float(dev))/100))
1140 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1141
1142 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1143
1144 if val < lowerBound:
1145 print(field, 'value is too low:')
1146 print('(If this is because you have improved GHC, please')
1147 print('update the test so that GHC doesn\'t regress again)')
1148 result = failBecause('stat too good', tag='stat')
1149 if val > upperBound:
1150 print(field, 'value is too high:')
1151 result = failBecause('stat not good enough', tag='stat')
1152
1153 if val < lowerBound or val > upperBound or config.verbose >= 4:
1154 valStr = str(val)
1155 valLen = len(valStr)
1156 expectedStr = str(expected)
1157 expectedLen = len(expectedStr)
1158 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1159
1160 def display(descr, val, extra):
1161 print(descr, str(val).rjust(length), extra)
1162
1163 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1164 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1165 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1166 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1167 if val != expected:
1168 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1169
1170 return result
1171
1172 # -----------------------------------------------------------------------------
1173 # Build a single-module program
1174
1175 def extras_build( way, extra_mods, extra_hc_opts ):
1176 for mod, opts in extra_mods:
1177 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1178 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1179 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1180 if badResult(result):
1181 return result
1182
1183 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1184
1185 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf):
1186 opts = getTestOpts()
1187 errname = add_suffix(name, 'comp.stderr')
1188
1189 if top_mod != '':
1190 srcname = top_mod
1191 elif addsuf:
1192 srcname = add_hs_lhs_suffix(name)
1193 else:
1194 srcname = name
1195
1196 if top_mod != '':
1197 to_do = '--make '
1198 if link:
1199 to_do = to_do + '-o ' + name
1200 elif link:
1201 to_do = '-o ' + name
1202 else:
1203 to_do = '-c' # just compile
1204
1205 stats_file = name + '.comp.stats'
1206 if len(opts.compiler_stats_range_fields) > 0:
1207 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1208
1209 # Required by GHC 7.3+, harmless for earlier versions:
1210 if (getTestOpts().c_src or
1211 getTestOpts().objc_src or
1212 getTestOpts().objcpp_src or
1213 getTestOpts().cmm_src):
1214 extra_hc_opts += ' -no-hs-main '
1215
1216 if getTestOpts().compile_cmd_prefix == '':
1217 cmd_prefix = ''
1218 else:
1219 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1220
1221 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1222
1223 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1224 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1225 '> {errname} 2>&1'
1226 ).format(**locals())
1227
1228 exit_code = runCmd(cmd, opts.compile_timeout_multiplier)
1229
1230 if exit_code != 0 and not should_fail:
1231 if config.verbose >= 1 and _expect_pass(way):
1232 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1233 actual_stderr_path = in_testdir(name, 'comp.stderr')
1234 if_verbose_dump(1, actual_stderr_path)
1235
1236 # ToDo: if the sub-shell was killed by ^C, then exit
1237
1238 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1239
1240 if badResult(statsResult):
1241 return statsResult
1242
1243 if should_fail:
1244 if exit_code == 0:
1245 return failBecause('exit code 0')
1246 else:
1247 if exit_code != 0:
1248 return failBecause('exit code non-0')
1249
1250 return passed()
1251
1252 # -----------------------------------------------------------------------------
1253 # Run a program and check its output
1254 #
1255 # If testname.stdin exists, route input from that, else
1256 # from /dev/null. Route output to testname.run.stdout and
1257 # testname.run.stderr. Returns the exit code of the run.
1258
1259 def simple_run(name, way, prog, extra_run_opts):
1260 opts = getTestOpts()
1261
1262 # figure out what to use for stdin
1263 if opts.stdin != '':
1264 use_stdin = opts.stdin
1265 else:
1266 stdin_file = add_suffix(name, 'stdin')
1267 if os.path.exists(in_testdir(stdin_file)):
1268 use_stdin = stdin_file
1269 else:
1270 use_stdin = '/dev/null'
1271
1272 run_stdout = add_suffix(name,'run.stdout')
1273 run_stderr = add_suffix(name,'run.stderr')
1274
1275 my_rts_flags = rts_flags(way)
1276
1277 stats_file = name + '.stats'
1278 if len(opts.stats_range_fields) > 0:
1279 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1280 else:
1281 stats_args = ''
1282
1283 if opts.no_stdin:
1284 stdin_comes_from = ''
1285 else:
1286 stdin_comes_from = ' <' + use_stdin
1287
1288 if opts.combined_output:
1289 redirection = ' > {0} 2>&1'.format(run_stdout)
1290 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1291 else:
1292 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1293 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1294
1295 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1296 cmd = prog + stats_args + ' ' \
1297 + my_rts_flags + ' ' \
1298 + extra_run_opts + ' ' \
1299 + stdin_comes_from \
1300 + redirection
1301
1302 if opts.cmd_wrapper != None:
1303 cmd = opts.cmd_wrapper(cmd) + redirection_append
1304
1305 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1306
1307 # run the command
1308 exit_code = runCmd(cmd, opts.run_timeout_multiplier)
1309
1310 # check the exit code
1311 if exit_code != opts.exit_code:
1312 if config.verbose >= 1 and _expect_pass(way):
1313 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1314 dump_stdout(name)
1315 dump_stderr(name)
1316 return failBecause('bad exit code')
1317
1318 check_hp = my_rts_flags.find("-h") != -1
1319 check_prof = my_rts_flags.find("-p") != -1
1320
1321 if not opts.ignore_output:
1322 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1323 bad_stdout = not check_stdout_ok(name, way)
1324 if bad_stderr:
1325 return failBecause('bad stderr')
1326 if bad_stdout:
1327 return failBecause('bad stdout')
1328 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1329 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1330 return failBecause('bad heap profile')
1331 if check_prof and not check_prof_ok(name, way):
1332 return failBecause('bad profile')
1333
1334 return checkStats(name, way, stats_file, opts.stats_range_fields)
1335
1336 def rts_flags(way):
1337 if (way == ''):
1338 return ''
1339 else:
1340 args = config.way_rts_flags[way]
1341
1342 if args == []:
1343 return ''
1344 else:
1345 return '+RTS ' + ' '.join(args) + ' -RTS'
1346
1347 # -----------------------------------------------------------------------------
1348 # Run a program in the interpreter and check its output
1349
1350 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1351 opts = getTestOpts()
1352
1353 outname = add_suffix(name, 'interp.stdout')
1354 errname = add_suffix(name, 'interp.stderr')
1355
1356 if (top_mod == ''):
1357 srcname = add_hs_lhs_suffix(name)
1358 else:
1359 srcname = top_mod
1360
1361 scriptname = add_suffix(name, 'genscript')
1362 qscriptname = in_testdir(scriptname)
1363
1364 delimiter = '===== program output begins here\n'
1365
1366 script = open(qscriptname, 'w')
1367 if not compile_only:
1368 # set the prog name and command-line args to match the compiled
1369 # environment.
1370 script.write(':set prog ' + name + '\n')
1371 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1372 # Add marker lines to the stdout and stderr output files, so we
1373 # can separate GHCi's output from the program's.
1374 script.write(':! echo ' + delimiter)
1375 script.write(':! echo 1>&2 ' + delimiter)
1376 # Set stdout to be line-buffered to match the compiled environment.
1377 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1378 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1379 # in the event of an exception as for the compiled program.
1380 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1381 script.close()
1382
1383 # figure out what to use for stdin
1384 if getTestOpts().stdin != '':
1385 stdin_file = in_testdir(opts.stdin)
1386 else:
1387 stdin_file = in_testdir(name, 'stdin')
1388
1389 if os.path.exists(stdin_file):
1390 os.system('cat "{0}" >> "{1}"'.format(stdin_file, qscriptname))
1391
1392 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1393
1394 if getTestOpts().combined_output:
1395 redirection = ' > {0} 2>&1'.format(outname)
1396 redirection_append = ' >> {0} 2>&1'.format(outname)
1397 else:
1398 redirection = ' > {0} 2> {1}'.format(outname, errname)
1399 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1400
1401 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1402 '< {scriptname} {redirection}'
1403 ).format(**locals())
1404
1405 if getTestOpts().cmd_wrapper != None:
1406 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1407
1408 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1409
1410 exit_code = runCmd(cmd, opts.run_timeout_multiplier)
1411
1412 # split the stdout into compilation/program output
1413 split_file(in_testdir(outname), delimiter,
1414 in_testdir(name, 'comp.stdout'),
1415 in_testdir(name, 'run.stdout'))
1416 split_file(in_testdir(errname), delimiter,
1417 in_testdir(name, 'comp.stderr'),
1418 in_testdir(name, 'run.stderr'))
1419
1420 # check the exit code
1421 if exit_code != getTestOpts().exit_code:
1422 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1423 dump_stdout(name)
1424 dump_stderr(name)
1425 return failBecause('bad exit code')
1426
1427 # ToDo: if the sub-shell was killed by ^C, then exit
1428
1429 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1430 check_stdout_ok(name, way)):
1431 return passed()
1432 else:
1433 return failBecause('bad stdout or stderr')
1434
1435 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1436 # See Note [Universal newlines].
1437 infile = io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None)
1438 out1 = io.open(out1_fn, 'w', encoding='utf8', newline='')
1439 out2 = io.open(out2_fn, 'w', encoding='utf8', newline='')
1440
1441 line = infile.readline()
1442 while (re.sub('^\s*','',line) != delimiter and line != ''):
1443 out1.write(line)
1444 line = infile.readline()
1445 out1.close()
1446
1447 line = infile.readline()
1448 while (line != ''):
1449 out2.write(line)
1450 line = infile.readline()
1451 out2.close()
1452
1453 # -----------------------------------------------------------------------------
1454 # Utils
1455 def get_compiler_flags():
1456 opts = getTestOpts()
1457
1458 flags = copy.copy(opts.compiler_always_flags)
1459
1460 flags.append(opts.extra_hc_opts)
1461
1462 if opts.outputdir != None:
1463 flags.extend(["-outputdir", opts.outputdir])
1464
1465 return flags
1466
1467 def check_stdout_ok(name, way):
1468 actual_stdout_file = add_suffix(name, 'run.stdout')
1469 expected_stdout_file = find_expected_file(name, 'stdout')
1470
1471 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1472
1473 check_stdout = getTestOpts().check_stdout
1474 if check_stdout:
1475 actual_stdout_path = in_testdir(actual_stdout_file)
1476 return check_stdout(actual_stdout_path, extra_norm)
1477
1478 return compare_outputs(way, 'stdout', extra_norm,
1479 expected_stdout_file, actual_stdout_file)
1480
1481 def dump_stdout( name ):
1482 print('Stdout:')
1483 print(read_no_crs(in_testdir(name, 'run.stdout')))
1484
1485 def check_stderr_ok(name, way):
1486 actual_stderr_file = add_suffix(name, 'run.stderr')
1487 expected_stderr_file = find_expected_file(name, 'stderr')
1488
1489 return compare_outputs(way, 'stderr',
1490 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1491 expected_stderr_file, actual_stderr_file,
1492 whitespace_normaliser=normalise_whitespace)
1493
1494 def dump_stderr( name ):
1495 print("Stderr:")
1496 print(read_no_crs(in_testdir(name, 'run.stderr')))
1497
1498 def read_no_crs(file):
1499 str = ''
1500 try:
1501 # See Note [Universal newlines].
1502 h = io.open(file, 'r', encoding='utf8', errors='replace', newline=None)
1503 str = h.read()
1504 h.close
1505 except:
1506 # On Windows, if the program fails very early, it seems the
1507 # files stdout/stderr are redirected to may not get created
1508 pass
1509 return str
1510
1511 def write_file(file, str):
1512 # See Note [Universal newlines].
1513 h = io.open(file, 'w', encoding='utf8', newline='')
1514 h.write(str)
1515 h.close
1516
1517 # Note [Universal newlines]
1518 #
1519 # We don't want to write any Windows style line endings ever, because
1520 # it would mean that `make accept` would touch every line of the file
1521 # when switching between Linux and Windows.
1522 #
1523 # Furthermore, when reading a file, it is convenient to translate all
1524 # Windows style endings to '\n', as it simplifies searching or massaging
1525 # the content.
1526 #
1527 # Solution: use `io.open` instead of `open`
1528 # * when reading: use newline=None to translate '\r\n' to '\n'
1529 # * when writing: use newline='' to not translate '\n' to '\r\n'
1530 #
1531 # See https://docs.python.org/2/library/io.html#io.open.
1532 #
1533 # This should work with both python2 and python3, and with both mingw*
1534 # as msys2 style Python.
1535 #
1536 # Do note that io.open returns unicode strings. So we have to specify
1537 # the expected encoding. But there is at least one file which is not
1538 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1539 # Another solution would be to open files in binary mode always, and
1540 # operate on bytes.
1541
1542 def check_hp_ok(name):
1543 opts = getTestOpts()
1544
1545 # do not qualify for hp2ps because we should be in the right directory
1546 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1547
1548 hp2psResult = runCmd(hp2psCmd)
1549
1550 actual_ps_path = in_testdir(name, 'ps')
1551
1552 if hp2psResult == 0:
1553 if os.path.exists(actual_ps_path):
1554 if gs_working:
1555 gsResult = runCmd(genGSCmd(actual_ps_path))
1556 if (gsResult == 0):
1557 return (True)
1558 else:
1559 print("hp2ps output for " + name + "is not valid PostScript")
1560 else: return (True) # assume postscript is valid without ghostscript
1561 else:
1562 print("hp2ps did not generate PostScript for " + name)
1563 return (False)
1564 else:
1565 print("hp2ps error when processing heap profile for " + name)
1566 return(False)
1567
1568 def check_prof_ok(name, way):
1569 expected_prof_file = find_expected_file(name, 'prof.sample')
1570 expected_prof_path = in_testdir(expected_prof_file)
1571
1572 # Check actual prof file only if we have an expected prof file to
1573 # compare it with.
1574 if not os.path.exists(expected_prof_path):
1575 return True
1576
1577 actual_prof_file = add_suffix(name, 'prof')
1578 actual_prof_path = in_testdir(actual_prof_file)
1579
1580 if not os.path.exists(actual_prof_path):
1581 print(actual_prof_path + " does not exist")
1582 return(False)
1583
1584 if os.path.getsize(actual_prof_path) == 0:
1585 print(actual_prof_path + " is empty")
1586 return(False)
1587
1588 return compare_outputs(way, 'prof', normalise_prof,
1589 expected_prof_file, actual_prof_file,
1590 whitespace_normaliser=normalise_whitespace)
1591
1592 # Compare expected output to actual output, and optionally accept the
1593 # new output. Returns true if output matched or was accepted, false
1594 # otherwise. See Note [Output comparison] for the meaning of the
1595 # normaliser and whitespace_normaliser parameters.
1596 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1597 whitespace_normaliser=lambda x:x):
1598
1599 expected_path = in_srcdir(expected_file)
1600 actual_path = in_testdir(actual_file)
1601
1602 if os.path.exists(expected_path):
1603 expected_str = normaliser(read_no_crs(expected_path))
1604 # Create the .normalised file in the testdir, not in the srcdir.
1605 expected_normalised_file = add_suffix(expected_file, 'normalised')
1606 expected_normalised_path = in_testdir(expected_normalised_file)
1607 else:
1608 expected_str = ''
1609 expected_normalised_path = '/dev/null'
1610
1611 actual_raw = read_no_crs(actual_path)
1612 actual_str = normaliser(actual_raw)
1613
1614 # See Note [Output comparison].
1615 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1616 return 1
1617 else:
1618 if config.verbose >= 1 and _expect_pass(way):
1619 print('Actual ' + kind + ' output differs from expected:')
1620
1621 if expected_normalised_path != '/dev/null':
1622 write_file(expected_normalised_path, expected_str)
1623
1624 actual_normalised_path = add_suffix(actual_path, 'normalised')
1625 write_file(actual_normalised_path, actual_str)
1626
1627 if config.verbose >= 1 and _expect_pass(way):
1628 # See Note [Output comparison].
1629 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1630 actual_normalised_path))
1631
1632 # If for some reason there were no non-whitespace differences,
1633 # then do a full diff
1634 if r == 0:
1635 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1636 actual_normalised_path))
1637
1638 if config.accept and (getTestOpts().expect == 'fail' or
1639 way in getTestOpts().expect_fail_for):
1640 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1641 return 0
1642 elif config.accept and actual_raw:
1643 if_verbose(1, 'Accepting new output.')
1644 write_file(expected_path, actual_raw)
1645 return 1
1646 elif config.accept:
1647 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1648 os.remove(expected_path)
1649 return 1
1650 else:
1651 return 0
1652
1653 # Note [Output comparison]
1654 #
1655 # We do two types of output comparison:
1656 #
1657 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1658 # optional `whitespace_normaliser` to the expected and the actual
1659 # output, before comparing the two.
1660 #
1661 # 2. To show as a diff to the user when the test indeed failed. We apply
1662 # the same `normaliser` function to the outputs, to make the diff as
1663 # small as possible (only showing the actual problem). But we don't
1664 # apply the `whitespace_normaliser` here, because it might completely
1665 # squash all whitespace, making the diff unreadable. Instead we rely
1666 # on the `diff` program to ignore whitespace changes as much as
1667 # possible (#10152).
1668
1669 def normalise_whitespace( str ):
1670 # Merge contiguous whitespace characters into a single space.
1671 return u' '.join(w for w in str.split())
1672
1673 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1674
1675 def normalise_callstacks(s):
1676 opts = getTestOpts()
1677 def repl(matches):
1678 location = matches.group(1)
1679 location = normalise_slashes_(location)
1680 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1681 # Ignore line number differences in call stacks (#10834).
1682 s = re.sub(callSite_re, repl, s)
1683 # Ignore the change in how we identify implicit call-stacks
1684 s = s.replace('from ImplicitParams', 'from HasCallStack')
1685 if not opts.keep_prof_callstacks:
1686 # Don't output prof callstacks. Test output should be
1687 # independent from the WAY we run the test.
1688 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1689 return s
1690
1691 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1692
1693 def normalise_type_reps(str):
1694 """ Normalise out fingerprints from Typeable TyCon representations """
1695 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1696
1697 def normalise_errmsg( str ):
1698 """Normalise error-messages emitted via stderr"""
1699 # IBM AIX's `ld` is a bit chatty
1700 if opsys('aix'):
1701 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1702 # remove " error:" and lower-case " Warning:" to make patch for
1703 # trac issue #10021 smaller
1704 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1705 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1706 str = normalise_callstacks(str)
1707 str = normalise_type_reps(str)
1708
1709 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1710 # the colon is there because it appears in error messages; this
1711 # hacky solution is used in place of more sophisticated filename
1712 # mangling
1713 str = re.sub('([^\\s])\\.exe', '\\1', str)
1714 # normalise slashes, minimise Windows/Unix filename differences
1715 str = re.sub('\\\\', '/', str)
1716 # The inplace ghc's are called ghc-stage[123] to avoid filename
1717 # collisions, so we need to normalise that to just "ghc"
1718 str = re.sub('ghc-stage[123]', 'ghc', str)
1719 # Error messages simetimes contain integer implementation package
1720 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1721 # Also filter out bullet characters. This is because bullets are used to
1722 # separate error sections, and tests shouldn't be sensitive to how the
1723 # the division happens.
1724 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1725 str = str.replace(bullet, '')
1726 return str
1727
1728 # normalise a .prof file, so that we can reasonably compare it against
1729 # a sample. This doesn't compare any of the actual profiling data,
1730 # only the shape of the profile and the number of entries.
1731 def normalise_prof (str):
1732 # strip everything up to the line beginning "COST CENTRE"
1733 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1734
1735 # strip results for CAFs, these tend to change unpredictably
1736 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1737
1738 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1739 # sometimes under MAIN.
1740 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1741
1742 # We have somthing like this:
1743 #
1744 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1745 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1746 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1747 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1748 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1749 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1750 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1751 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1752 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1753 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1754 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1755 #
1756 # then we remove all the specific profiling data, leaving only the cost
1757 # centre name, module, src, and entries, to end up with this: (modulo
1758 # whitespace between columns)
1759 #
1760 # MAIN MAIN <built-in> 0
1761 # readPrec Main Main_1.hs:7:13-16 1
1762 # readPrec Main Main_1.hs:4:13-16 1
1763 # == Main Main_1.hs:7:25-26 1
1764 # == Main Main_1.hs:4:25-26 1
1765 # showsPrec Main Main_1.hs:7:19-22 2
1766 # showsPrec Main Main_1.hs:4:19-22 2
1767 # readPrec Main Main_1.hs:7:13-16 0
1768 # readPrec Main Main_1.hs:4:13-16 0
1769
1770 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1771 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1772 # this works fine.
1773 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1774 '\\1 \\2 \\3 \\5\n', str)
1775 return str
1776
1777 def normalise_slashes_( str ):
1778 str = re.sub('\\\\', '/', str)
1779 return str
1780
1781 def normalise_exe_( str ):
1782 str = re.sub('\.exe', '', str)
1783 return str
1784
1785 def normalise_output( str ):
1786 # remove " error:" and lower-case " Warning:" to make patch for
1787 # trac issue #10021 smaller
1788 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1789 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1790 # Remove a .exe extension (for Windows)
1791 # This can occur in error messages generated by the program.
1792 str = re.sub('([^\\s])\\.exe', '\\1', str)
1793 str = normalise_callstacks(str)
1794 str = normalise_type_reps(str)
1795 return str
1796
1797 def normalise_asm( str ):
1798 lines = str.split('\n')
1799 # Only keep instructions and labels not starting with a dot.
1800 metadata = re.compile('^[ \t]*\\..*$')
1801 out = []
1802 for line in lines:
1803 # Drop metadata directives (e.g. ".type")
1804 if not metadata.match(line):
1805 line = re.sub('@plt', '', line)
1806 instr = line.lstrip().split()
1807 # Drop empty lines.
1808 if not instr:
1809 continue
1810 # Drop operands, except for call instructions.
1811 elif instr[0] == 'call':
1812 out.append(instr[0] + ' ' + instr[1])
1813 else:
1814 out.append(instr[0])
1815 out = u'\n'.join(out)
1816 return out
1817
1818 def if_verbose( n, s ):
1819 if config.verbose >= n:
1820 print(s)
1821
1822 def if_verbose_dump( n, f ):
1823 if config.verbose >= n:
1824 try:
1825 print(open(f).read())
1826 except:
1827 print('')
1828
1829 def runCmd(cmd, timeout_multiplier=1.0):
1830 timeout_prog = strip_quotes(config.timeout_prog)
1831 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1832
1833 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1834 cmd = cmd.format(**config.__dict__)
1835 if_verbose( 3, cmd )
1836
1837 # cmd is a complex command in Bourne-shell syntax
1838 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1839 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1840 # to invoke the Bourne shell
1841 r = subprocess.call([timeout_prog, timeout, cmd])
1842 if r == 98:
1843 # The python timeout program uses 98 to signal that ^C was pressed
1844 stopNow()
1845 if r == 99 and getTestOpts().exit_code != 99:
1846 # Only print a message when timeout killed the process unexpectedly.
1847 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1848 return r
1849
1850 # -----------------------------------------------------------------------------
1851 # checking if ghostscript is available for checking the output of hp2ps
1852
1853 def genGSCmd(psfile):
1854 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1855
1856 def gsNotWorking():
1857 global gs_working
1858 print("GhostScript not available for hp2ps tests")
1859
1860 global gs_working
1861 gs_working = 0
1862 if config.have_profiling:
1863 if config.gs != '':
1864 resultGood = runCmd(genGSCmd(config.confdir + '/good.ps'));
1865 if resultGood == 0:
1866 resultBad = runCmd(genGSCmd(config.confdir + '/bad.ps') +
1867 ' >/dev/null 2>&1')
1868 if resultBad != 0:
1869 print("GhostScript available for hp2ps tests")
1870 gs_working = 1;
1871 else:
1872 gsNotWorking();
1873 else:
1874 gsNotWorking();
1875 else:
1876 gsNotWorking();
1877
1878 def add_suffix( name, suffix ):
1879 if suffix == '':
1880 return name
1881 else:
1882 return name + '.' + suffix
1883
1884 def add_hs_lhs_suffix(name):
1885 if getTestOpts().c_src:
1886 return add_suffix(name, 'c')
1887 elif getTestOpts().cmm_src:
1888 return add_suffix(name, 'cmm')
1889 elif getTestOpts().objc_src:
1890 return add_suffix(name, 'm')
1891 elif getTestOpts().objcpp_src:
1892 return add_suffix(name, 'mm')
1893 elif getTestOpts().literate:
1894 return add_suffix(name, 'lhs')
1895 else:
1896 return add_suffix(name, 'hs')
1897
1898 def replace_suffix( name, suffix ):
1899 base, suf = os.path.splitext(name)
1900 return base + '.' + suffix
1901
1902 def in_testdir(name, suffix=''):
1903 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1904
1905 def in_srcdir(name, suffix=''):
1906 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1907
1908 # Finding the sample output. The filename is of the form
1909 #
1910 # <test>.stdout[-ws-<wordsize>][-<platform>]
1911 #
1912 def find_expected_file(name, suff):
1913 basename = add_suffix(name, suff)
1914
1915 files = [basename + ws + plat
1916 for plat in ['-' + config.platform, '-' + config.os, '']
1917 for ws in ['-ws-' + config.wordsize, '']]
1918
1919 for f in files:
1920 if os.path.exists(in_srcdir(f)):
1921 return f
1922
1923 return basename
1924
1925 def cleanup():
1926 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
1927
1928 # -----------------------------------------------------------------------------
1929 # Return a list of all the files ending in '.T' below directories roots.
1930
1931 def findTFiles(roots):
1932 for root in roots:
1933 for path, dirs, files in os.walk(root, topdown=True):
1934 # Never pick up .T files in uncleaned .run directories.
1935 dirs[:] = [dir for dir in sorted(dirs)
1936 if not dir.endswith(testdir_suffix)]
1937 for filename in files:
1938 if filename.endswith('.T'):
1939 yield os.path.join(path, filename)
1940
1941 # -----------------------------------------------------------------------------
1942 # Output a test summary to the specified file object
1943
1944 def summary(t, file, short=False):
1945
1946 file.write('\n')
1947 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
1948
1949 if short:
1950 # Only print the list of unexpected tests above.
1951 return
1952
1953 file.write('SUMMARY for test run started at '
1954 + time.strftime("%c %Z", t.start_time) + '\n'
1955 + str(datetime.timedelta(seconds=
1956 round(time.time() - time.mktime(t.start_time)))).rjust(8)
1957 + ' spent to go through\n'
1958 + repr(t.total_tests).rjust(8)
1959 + ' total tests, which gave rise to\n'
1960 + repr(t.total_test_cases).rjust(8)
1961 + ' test cases, of which\n'
1962 + repr(t.n_tests_skipped).rjust(8)
1963 + ' were skipped\n'
1964 + '\n'
1965 + repr(t.n_missing_libs).rjust(8)
1966 + ' had missing libraries\n'
1967 + repr(t.n_expected_passes).rjust(8)
1968 + ' expected passes\n'
1969 + repr(t.n_expected_failures).rjust(8)
1970 + ' expected failures\n'
1971 + '\n'
1972 + repr(t.n_framework_failures).rjust(8)
1973 + ' caused framework failures\n'
1974 + repr(t.n_unexpected_passes).rjust(8)
1975 + ' unexpected passes\n'
1976 + repr(t.n_unexpected_failures).rjust(8)
1977 + ' unexpected failures\n'
1978 + repr(t.n_unexpected_stat_failures).rjust(8)
1979 + ' unexpected stat failures\n'
1980 + '\n')
1981
1982 if t.n_unexpected_passes > 0:
1983 file.write('Unexpected passes:\n')
1984 printPassingTestInfosSummary(file, t.unexpected_passes)
1985
1986 if t.n_unexpected_failures > 0:
1987 file.write('Unexpected failures:\n')
1988 printFailingTestInfosSummary(file, t.unexpected_failures)
1989
1990 if t.n_unexpected_stat_failures > 0:
1991 file.write('Unexpected stat failures:\n')
1992 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
1993
1994 if t.n_framework_failures > 0:
1995 file.write('Test framework failures:\n')
1996 printFrameworkFailureSummary(file, t.framework_failures)
1997
1998 if stopping():
1999 file.write('WARNING: Testsuite run was terminated early\n')
2000
2001 def printUnexpectedTests(file, testInfoss):
2002 unexpected = []
2003 for testInfos in testInfoss:
2004 directories = testInfos.keys()
2005 for directory in directories:
2006 tests = list(testInfos[directory].keys())
2007 unexpected += tests
2008 if unexpected != []:
2009 file.write('Unexpected results from:\n')
2010 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2011 file.write('\n')
2012
2013 def printPassingTestInfosSummary(file, testInfos):
2014 directories = list(testInfos.keys())
2015 directories.sort()
2016 maxDirLen = max(len(x) for x in directories)
2017 for directory in directories:
2018 tests = list(testInfos[directory].keys())
2019 tests.sort()
2020 for test in tests:
2021 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2022 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2023 file.write('\n')
2024
2025 def printFailingTestInfosSummary(file, testInfos):
2026 directories = list(testInfos.keys())
2027 directories.sort()
2028 maxDirLen = max(len(d) for d in directories)
2029 for directory in directories:
2030 tests = list(testInfos[directory].keys())
2031 tests.sort()
2032 for test in tests:
2033 reasons = testInfos[directory][test].keys()
2034 for reason in reasons:
2035 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2036 ' [' + reason + ']' + \
2037 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2038 file.write('\n')
2039
2040 def printFrameworkFailureSummary(file, testInfos):
2041 names = list(testInfos.keys())
2042 names.sort()
2043 maxNameLen = max(len(n) for n in names)
2044 for name in names:
2045 ways = testInfos[name]
2046 file.write(' ' + name.ljust(maxNameLen + 2) + \
2047 ' (' + ','.join(ways) + ')\n')
2048 file.write('\n')
2049
2050 def modify_lines(s, f):
2051 s = u'\n'.join([f(l) for l in s.splitlines()])
2052 if s and s[-1] != '\n':
2053 # Prevent '\ No newline at end of file' warnings when diffing.
2054 s += '\n'
2055 return s