Merge branch 'master' of http://git.haskell.org/ghc
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 # This allows us to use the "with X:" syntax with python 2.5:
6 from __future__ import with_statement
7
8 import shutil
9 import sys
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 import types
20 from math import ceil, trunc
21
22 have_subprocess = False
23 try:
24 import subprocess
25 have_subprocess = True
26 except:
27 print "Warning: subprocess not found, will fall back to spawnv"
28
29 from string import join
30 from testglobals import *
31 from testutil import *
32
33 if config.use_threads:
34 import threading
35 import thread
36
37 global wantToStop
38 wantToStop = False
39 def stopNow():
40 global wantToStop
41 wantToStop = True
42 def stopping():
43 return wantToStop
44
45 # Options valid for the current test only (these get reset to
46 # testdir_testopts after each test).
47
48 global testopts_local
49 if config.use_threads:
50 testopts_local = threading.local()
51 else:
52 class TestOpts_Local:
53 pass
54 testopts_local = TestOpts_Local()
55
56 def getTestOpts():
57 return testopts_local.x
58
59 def setLocalTestOpts(opts):
60 global testopts_local
61 testopts_local.x=opts
62
63 def isStatsTest():
64 opts = getTestOpts()
65 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
66
67
68 # This can be called at the top of a file of tests, to set default test options
69 # for the following tests.
70 def setTestOpts( f ):
71 global thisdir_settings
72 thisdir_settings = [thisdir_settings, f]
73
74 # -----------------------------------------------------------------------------
75 # Canned setup functions for common cases. eg. for a test you might say
76 #
77 # test('test001', normal, compile, [''])
78 #
79 # to run it without any options, but change it to
80 #
81 # test('test001', expect_fail, compile, [''])
82 #
83 # to expect failure for this test.
84
85 def normal( name, opts ):
86 return;
87
88 def skip( name, opts ):
89 opts.skip = 1
90
91 def expect_fail( name, opts ):
92 opts.expect = 'fail';
93
94 def reqlib( lib ):
95 return lambda name, opts, l=lib: _reqlib (name, opts, l )
96
97 # Cache the results of looking to see if we have a library or not.
98 # This makes quite a difference, especially on Windows.
99 have_lib = {}
100
101 def _reqlib( name, opts, lib ):
102 if have_lib.has_key(lib):
103 got_it = have_lib[lib]
104 else:
105 if have_subprocess:
106 # By preference we use subprocess, as the alternative uses
107 # /dev/null which mingw doesn't have.
108 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
109 stdout=subprocess.PIPE,
110 stderr=subprocess.PIPE)
111 # read from stdout and stderr to avoid blocking due to
112 # buffers filling
113 p.communicate()
114 r = p.wait()
115 else:
116 r = os.system(config.ghc_pkg + ' describe ' + lib
117 + ' > /dev/null 2> /dev/null')
118 got_it = r == 0
119 have_lib[lib] = got_it
120
121 if not got_it:
122 opts.expect = 'missing-lib'
123
124 def req_profiling( name, opts ):
125 if not config.have_profiling:
126 opts.expect = 'fail'
127
128 def req_shared_libs( name, opts ):
129 if not config.have_shared_libs:
130 opts.expect = 'fail'
131
132 def req_interp( name, opts ):
133 if not config.have_interp:
134 opts.expect = 'fail'
135
136 def req_smp( name, opts ):
137 if not config.have_smp:
138 opts.expect = 'fail'
139
140 def ignore_output( name, opts ):
141 opts.ignore_output = 1
142
143 def no_stdin( name, opts ):
144 opts.no_stdin = 1
145
146 def combined_output( name, opts ):
147 opts.combined_output = True
148
149 # -----
150
151 def expect_fail_for( ways ):
152 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
153
154 def _expect_fail_for( name, opts, ways ):
155 opts.expect_fail_for = ways
156
157 def expect_broken( bug ):
158 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
159
160 def _expect_broken( name, opts, bug ):
161 record_broken(name, opts, bug)
162 opts.expect = 'fail';
163
164 def expect_broken_for( bug, ways ):
165 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
166
167 def _expect_broken_for( name, opts, bug, ways ):
168 record_broken(name, opts, bug)
169 opts.expect_fail_for = ways
170
171 def record_broken(name, opts, bug):
172 global brokens
173 me = (bug, opts.testdir, name)
174 if not me in brokens:
175 brokens.append(me)
176
177 # -----
178
179 def omit_ways( ways ):
180 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
181
182 def _omit_ways( name, opts, ways ):
183 opts.omit_ways = ways
184
185 # -----
186
187 def only_ways( ways ):
188 return lambda name, opts, w=ways: _only_ways( name, opts, w )
189
190 def _only_ways( name, opts, ways ):
191 opts.only_ways = ways
192
193 # -----
194
195 def extra_ways( ways ):
196 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
197
198 def _extra_ways( name, opts, ways ):
199 opts.extra_ways = ways
200
201 # -----
202
203 def omit_compiler_types( compiler_types ):
204 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
205
206 def _omit_compiler_types( name, opts, compiler_types ):
207 if config.compiler_type in compiler_types:
208 opts.skip = 1
209
210 # -----
211
212 def only_compiler_types( compiler_types ):
213 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
214
215 def _only_compiler_types( name, opts, compiler_types ):
216 if config.compiler_type not in compiler_types:
217 opts.skip = 1
218
219 # -----
220
221 def set_stdin( file ):
222 return lambda name, opts, f=file: _set_stdin(name, opts, f);
223
224 def _set_stdin( name, opts, f ):
225 opts.stdin = f
226
227 # -----
228
229 def exit_code( val ):
230 return lambda name, opts, v=val: _exit_code(name, opts, v);
231
232 def _exit_code( name, opts, v ):
233 opts.exit_code = v
234
235 def signal_exit_code( val ):
236 if opsys('solaris2'):
237 return exit_code( val );
238 else:
239 # When application running on Linux receives fatal error
240 # signal, then its exit code is encoded as 128 + signal
241 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
242 # I assume that Mac OS X behaves in the same way at least Mac
243 # OS X builder behavior suggests this.
244 return exit_code( val+128 );
245
246 # -----
247
248 def timeout_multiplier( val ):
249 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
250
251 def _timeout_multiplier( name, opts, v ):
252 opts.timeout_multiplier = v
253
254 # -----
255
256 def extra_run_opts( val ):
257 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
258
259 def _extra_run_opts( name, opts, v ):
260 opts.extra_run_opts = v
261
262 # -----
263
264 def extra_hc_opts( val ):
265 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
266
267 def _extra_hc_opts( name, opts, v ):
268 opts.extra_hc_opts = v
269
270 # -----
271
272 def extra_clean( files ):
273 return lambda name, opts, v=files: _extra_clean(name, opts, v);
274
275 def _extra_clean( name, opts, v ):
276 opts.clean_files = v
277
278 # -----
279
280 def stats_num_field( field, expecteds ):
281 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
282
283 def _stats_num_field( name, opts, field, expecteds ):
284 if field in opts.stats_range_fields:
285 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
286
287 if type(expecteds) is types.ListType:
288 for (b, expected, dev) in expecteds:
289 if b:
290 opts.stats_range_fields[field] = (expected, dev)
291 return
292 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
293
294 else:
295 (expected, dev) = expecteds
296 opts.stats_range_fields[field] = (expected, dev)
297
298 def compiler_stats_num_field( field, expecteds ):
299 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
300
301 def _compiler_stats_num_field( name, opts, field, expecteds ):
302 if field in opts.compiler_stats_range_fields:
303 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
304
305 # Compiler performance numbers change when debugging is on, making the results
306 # useless and confusing. Therefore, skip if debugging is on.
307 if compiler_debugged():
308 skip(name, opts)
309
310 for (b, expected, dev) in expecteds:
311 if b:
312 opts.compiler_stats_range_fields[field] = (expected, dev)
313 return
314
315 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
316
317 # -----
318
319 def when(b, f):
320 # When list_brokens is on, we want to see all expect_broken calls,
321 # so we always do f
322 if b or config.list_broken:
323 return f
324 else:
325 return normal
326
327 def unless(b, f):
328 return when(not b, f)
329
330 def doing_ghci():
331 return 'ghci' in config.run_ways
332
333 def ghci_dynamic( ):
334 return config.ghc_dynamic
335
336 def fast():
337 return config.fast
338
339 def platform( plat ):
340 return config.platform == plat
341
342 def opsys( os ):
343 return config.os == os
344
345 def arch( arch ):
346 return config.arch == arch
347
348 def wordsize( ws ):
349 return config.wordsize == str(ws)
350
351 def msys( ):
352 return config.msys
353
354 def cygwin( ):
355 return config.cygwin
356
357 def have_vanilla( ):
358 return config.have_vanilla
359
360 def have_dynamic( ):
361 return config.have_dynamic
362
363 def have_profiling( ):
364 return config.have_profiling
365
366 def in_tree_compiler( ):
367 return config.in_tree_compiler
368
369 def compiler_type( compiler ):
370 return config.compiler_type == compiler
371
372 def compiler_lt( compiler, version ):
373 return config.compiler_type == compiler and \
374 version_lt(config.compiler_version, version)
375
376 def compiler_le( compiler, version ):
377 return config.compiler_type == compiler and \
378 version_le(config.compiler_version, version)
379
380 def compiler_gt( compiler, version ):
381 return config.compiler_type == compiler and \
382 version_gt(config.compiler_version, version)
383
384 def compiler_ge( compiler, version ):
385 return config.compiler_type == compiler and \
386 version_ge(config.compiler_version, version)
387
388 def unregisterised( ):
389 return config.unregisterised
390
391 def compiler_profiled( ):
392 return config.compiler_profiled
393
394 def compiler_debugged( ):
395 return config.compiler_debugged
396
397 def tag( t ):
398 return t in config.compiler_tags
399
400 # ---
401
402 def namebase( nb ):
403 return lambda opts, nb=nb: _namebase(opts, nb)
404
405 def _namebase( opts, nb ):
406 opts.with_namebase = nb
407
408 # ---
409
410 def high_memory_usage(name, opts):
411 opts.alone = True
412
413 # If a test is for a multi-CPU race, then running the test alone
414 # increases the chance that we'll actually see it.
415 def multi_cpu_race(name, opts):
416 opts.alone = True
417
418 # ---
419 def literate( name, opts ):
420 opts.literate = 1;
421
422 def c_src( name, opts ):
423 opts.c_src = 1;
424
425 def objc_src( name, opts ):
426 opts.objc_src = 1;
427
428 def objcpp_src( name, opts ):
429 opts.objcpp_src = 1;
430
431 def cmm_src( name, opts ):
432 opts.cmm_src = 1;
433
434 def outputdir( odir ):
435 return lambda name, opts, d=odir: _outputdir(name, opts, d)
436
437 def _outputdir( name, opts, odir ):
438 opts.outputdir = odir;
439
440 # ----
441
442 def pre_cmd( cmd ):
443 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
444
445 def _pre_cmd( name, opts, cmd ):
446 opts.pre_cmd = cmd
447
448 # ----
449
450 def clean_cmd( cmd ):
451 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
452
453 def _clean_cmd( name, opts, cmd ):
454 opts.clean_cmd = cmd
455
456 # ----
457
458 def cmd_prefix( prefix ):
459 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
460
461 def _cmd_prefix( name, opts, prefix ):
462 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
463
464 # ----
465
466 def cmd_wrapper( fun ):
467 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
468
469 def _cmd_wrapper( name, opts, fun ):
470 opts.cmd_wrapper = fun
471
472 # ----
473
474 def compile_cmd_prefix( prefix ):
475 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
476
477 def _compile_cmd_prefix( name, opts, prefix ):
478 opts.compile_cmd_prefix = prefix
479
480 # ----
481
482 def check_stdout( f ):
483 return lambda name, opts, f=f: _check_stdout(name, opts, f)
484
485 def _check_stdout( name, opts, f ):
486 opts.check_stdout = f
487
488 # ----
489
490 def normalise_slashes( name, opts ):
491 opts.extra_normaliser = normalise_slashes_
492
493 def normalise_exe( name, opts ):
494 opts.extra_normaliser = normalise_exe_
495
496 def normalise_fun( fun ):
497 return lambda name, opts, f=fun: _normalise_fun(name, opts, f)
498
499 def _normalise_fun( name, opts, f ):
500 opts.extra_normaliser = f
501
502 def normalise_errmsg_fun( fun ):
503 return lambda name, opts, f=fun: _normalise_errmsg_fun(name, opts, f)
504
505 def _normalise_errmsg_fun( name, opts, f ):
506 opts.extra_errmsg_normaliser = f
507
508 def two_normalisers(f, g):
509 return lambda x, f=f, g=g: f(g(x))
510
511 # ----
512 # Function for composing two opt-fns together
513
514 def executeSetups(fs, name, opts):
515 if type(fs) is types.ListType:
516 # If we have a list of setups, then execute each one
517 map (lambda f : executeSetups(f, name, opts), fs)
518 else:
519 # fs is a single function, so just apply it
520 fs(name, opts)
521
522 # -----------------------------------------------------------------------------
523 # The current directory of tests
524
525 def newTestDir( dir ):
526 global thisdir_settings
527 # reset the options for this test directory
528 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
529
530 def _newTestDir( name, opts, dir ):
531 opts.testdir = dir
532 opts.compiler_always_flags = config.compiler_always_flags
533
534 # -----------------------------------------------------------------------------
535 # Actually doing tests
536
537 parallelTests = []
538 aloneTests = []
539 allTestNames = set([])
540
541 def runTest (opts, name, func, args):
542 ok = 0
543
544 if config.use_threads:
545 t.thread_pool.acquire()
546 try:
547 while config.threads<(t.running_threads+1):
548 t.thread_pool.wait()
549 t.running_threads = t.running_threads+1
550 ok=1
551 t.thread_pool.release()
552 thread.start_new_thread(test_common_thread, (name, opts, func, args))
553 except:
554 if not ok:
555 t.thread_pool.release()
556 else:
557 test_common_work (name, opts, func, args)
558
559 # name :: String
560 # setup :: TestOpts -> IO ()
561 def test (name, setup, func, args):
562 global aloneTests
563 global parallelTests
564 global allTestNames
565 global thisdir_settings
566 if name in allTestNames:
567 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
568 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
569 framework_fail(name, 'bad_name', 'This test has an invalid name')
570
571 # Make a deep copy of the default_testopts, as we need our own copy
572 # of any dictionaries etc inside it. Otherwise, if one test modifies
573 # them, all tests will see the modified version!
574 myTestOpts = copy.deepcopy(default_testopts)
575
576 executeSetups([thisdir_settings, setup], name, myTestOpts)
577
578 thisTest = lambda : runTest(myTestOpts, name, func, args)
579 if myTestOpts.alone:
580 aloneTests.append(thisTest)
581 else:
582 parallelTests.append(thisTest)
583 allTestNames.add(name)
584
585 if config.use_threads:
586 def test_common_thread(name, opts, func, args):
587 t.lock.acquire()
588 try:
589 test_common_work(name,opts,func,args)
590 finally:
591 t.lock.release()
592 t.thread_pool.acquire()
593 t.running_threads = t.running_threads - 1
594 t.thread_pool.notify()
595 t.thread_pool.release()
596
597 def get_package_cache_timestamp():
598 if config.package_conf_cache_file == '':
599 return 0.0
600 else:
601 try:
602 return os.stat(config.package_conf_cache_file).st_mtime
603 except:
604 return 0.0
605
606
607 def test_common_work (name, opts, func, args):
608 try:
609 t.total_tests = t.total_tests+1
610 setLocalTestOpts(opts)
611
612 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
613
614 # All the ways we might run this test
615 if func == compile or func == multimod_compile:
616 all_ways = config.compile_ways
617 elif func == compile_and_run or func == multimod_compile_and_run:
618 all_ways = config.run_ways
619 elif func == ghci_script:
620 if 'ghci' in config.run_ways:
621 all_ways = ['ghci']
622 else:
623 all_ways = []
624 else:
625 all_ways = ['normal']
626
627 # A test itself can request extra ways by setting opts.extra_ways
628 all_ways = all_ways + filter(lambda way: way not in all_ways,
629 opts.extra_ways)
630
631 t.total_test_cases = t.total_test_cases + len(all_ways)
632
633 ok_way = lambda way: \
634 not getTestOpts().skip \
635 and (config.only == [] or name in config.only) \
636 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
637 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
638 and (not (config.skip_perf_tests and isStatsTest())) \
639 and way not in getTestOpts().omit_ways
640
641 # Which ways we are asked to skip
642 do_ways = filter (ok_way,all_ways)
643
644 # In fast mode, we skip all but one way
645 if config.fast and len(do_ways) > 0:
646 do_ways = [do_ways[0]]
647
648 if not config.clean_only:
649 # Run the required tests...
650 for way in do_ways:
651 if stopping():
652 break
653 do_test (name, way, func, args)
654
655 for way in all_ways:
656 if way not in do_ways:
657 skiptest (name,way)
658
659 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
660 pretest_cleanup(name)
661 clean(map (lambda suff: name + suff,
662 ['', '.exe', '.exe.manifest', '.genscript',
663 '.stderr.normalised', '.stdout.normalised',
664 '.run.stderr.normalised', '.run.stdout.normalised',
665 '.comp.stderr.normalised', '.comp.stdout.normalised',
666 '.interp.stderr.normalised', '.interp.stdout.normalised',
667 '.stats', '.comp.stats',
668 '.hi', '.o', '.prof', '.exe.prof', '.hc',
669 '_stub.h', '_stub.c', '_stub.o',
670 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']))
671
672 if func == multi_compile or func == multi_compile_fail:
673 extra_mods = args[1]
674 clean(map (lambda (f,x): replace_suffix(f, 'o'), extra_mods))
675 clean(map (lambda (f,x): replace_suffix(f, 'hi'), extra_mods))
676
677 clean(getTestOpts().clean_files)
678
679 if getTestOpts().outputdir != None:
680 odir = in_testdir(getTestOpts().outputdir)
681 try:
682 shutil.rmtree(odir)
683 except:
684 pass
685
686 try:
687 shutil.rmtree(in_testdir('.hpc.' + name))
688 except:
689 pass
690
691 try:
692 cleanCmd = getTestOpts().clean_cmd
693 if cleanCmd != None:
694 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
695 if result != 0:
696 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
697 except:
698 framework_fail(name, 'cleaning', 'clean-command exception')
699
700 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
701
702 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
703 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
704
705 try:
706 for f in files_written[name]:
707 if os.path.exists(f):
708 try:
709 if not f in files_written_not_removed[name]:
710 files_written_not_removed[name].append(f)
711 except:
712 files_written_not_removed[name] = [f]
713 except:
714 pass
715 except Exception, e:
716 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
717
718 def clean(strs):
719 for str in strs:
720 for name in glob.glob(in_testdir(str)):
721 clean_full_path(name)
722
723 def clean_full_path(name):
724 try:
725 # Remove files...
726 os.remove(name)
727 except OSError, e1:
728 try:
729 # ... and empty directories
730 os.rmdir(name)
731 except OSError, e2:
732 # We don't want to fail here, but we do want to know
733 # what went wrong, so print out the exceptions.
734 # ENOENT isn't a problem, though, as we clean files
735 # that don't necessarily exist.
736 if e1.errno != errno.ENOENT:
737 print e1
738 if e2.errno != errno.ENOENT:
739 print e2
740
741 def do_test(name, way, func, args):
742 full_name = name + '(' + way + ')'
743
744 try:
745 if_verbose(2, "=====> %s %d of %d %s " % \
746 (full_name, t.total_tests, len(allTestNames), \
747 [t.n_unexpected_passes, \
748 t.n_unexpected_failures, \
749 t.n_framework_failures]))
750
751 if config.use_threads:
752 t.lock.release()
753
754 try:
755 preCmd = getTestOpts().pre_cmd
756 if preCmd != None:
757 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
758 if result != 0:
759 framework_fail(name, way, 'pre-command failed: ' + str(result))
760 except:
761 framework_fail(name, way, 'pre-command exception')
762
763 try:
764 result = apply(func, [name,way] + args)
765 finally:
766 if config.use_threads:
767 t.lock.acquire()
768
769 if getTestOpts().expect != 'pass' and \
770 getTestOpts().expect != 'fail' and \
771 getTestOpts().expect != 'missing-lib':
772 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
773
774 try:
775 passFail = result['passFail']
776 except:
777 passFail = 'No passFail found'
778
779 if passFail == 'pass':
780 if getTestOpts().expect == 'pass' \
781 and way not in getTestOpts().expect_fail_for:
782 t.n_expected_passes = t.n_expected_passes + 1
783 if name in t.expected_passes:
784 t.expected_passes[name].append(way)
785 else:
786 t.expected_passes[name] = [way]
787 else:
788 if_verbose(1, '*** unexpected pass for %s' % full_name)
789 t.n_unexpected_passes = t.n_unexpected_passes + 1
790 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
791 elif passFail == 'fail':
792 if getTestOpts().expect == 'pass' \
793 and way not in getTestOpts().expect_fail_for:
794 if_verbose(1, '*** unexpected failure for %s' % full_name)
795 t.n_unexpected_failures = t.n_unexpected_failures + 1
796 reason = result['reason']
797 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
798 else:
799 if getTestOpts().expect == 'missing-lib':
800 t.n_missing_libs = t.n_missing_libs + 1
801 if name in t.missing_libs:
802 t.missing_libs[name].append(way)
803 else:
804 t.missing_libs[name] = [way]
805 else:
806 t.n_expected_failures = t.n_expected_failures + 1
807 if name in t.expected_failures:
808 t.expected_failures[name].append(way)
809 else:
810 t.expected_failures[name] = [way]
811 else:
812 framework_fail(name, way, 'bad result ' + passFail)
813 except KeyboardInterrupt:
814 stopNow()
815 except:
816 framework_fail(name, way, 'do_test exception')
817 traceback.print_exc()
818
819 def addPassingTestInfo (testInfos, directory, name, way):
820 directory = re.sub('^\\.[/\\\\]', '', directory)
821
822 if not directory in testInfos:
823 testInfos[directory] = {}
824
825 if not name in testInfos[directory]:
826 testInfos[directory][name] = []
827
828 testInfos[directory][name].append(way)
829
830 def addFailingTestInfo (testInfos, directory, name, reason, way):
831 directory = re.sub('^\\.[/\\\\]', '', directory)
832
833 if not directory in testInfos:
834 testInfos[directory] = {}
835
836 if not name in testInfos[directory]:
837 testInfos[directory][name] = {}
838
839 if not reason in testInfos[directory][name]:
840 testInfos[directory][name][reason] = []
841
842 testInfos[directory][name][reason].append(way)
843
844 def skiptest (name, way):
845 # print 'Skipping test \"', name, '\"'
846 t.n_tests_skipped = t.n_tests_skipped + 1
847 if name in t.tests_skipped:
848 t.tests_skipped[name].append(way)
849 else:
850 t.tests_skipped[name] = [way]
851
852 def framework_fail( name, way, reason ):
853 full_name = name + '(' + way + ')'
854 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
855 t.n_framework_failures = t.n_framework_failures + 1
856 if name in t.framework_failures:
857 t.framework_failures[name].append(way)
858 else:
859 t.framework_failures[name] = [way]
860
861 def badResult(result):
862 try:
863 if result['passFail'] == 'pass':
864 return False
865 return True
866 except:
867 return True
868
869 def passed():
870 return {'passFail': 'pass'}
871
872 def failBecause(reason):
873 return {'passFail': 'fail', 'reason': reason}
874
875 # -----------------------------------------------------------------------------
876 # Generic command tests
877
878 # A generic command test is expected to run and exit successfully.
879 #
880 # The expected exit code can be changed via exit_code() as normal, and
881 # the expected stdout/stderr are stored in <testname>.stdout and
882 # <testname>.stderr. The output of the command can be ignored
883 # altogether by using run_command_ignore_output instead of
884 # run_command.
885
886 def run_command( name, way, cmd ):
887 return simple_run( name, '', cmd, '' )
888
889 # -----------------------------------------------------------------------------
890 # GHCi tests
891
892 def ghci_script( name, way, script ):
893 # filter out -fforce-recomp from compiler_always_flags, because we're
894 # actually testing the recompilation behaviour in the GHCi tests.
895 flags = filter(lambda f: f != '-fforce-recomp', getTestOpts().compiler_always_flags)
896 flags.append(getTestOpts().extra_hc_opts)
897 if getTestOpts().outputdir != None:
898 flags.extend(["-outputdir", getTestOpts().outputdir])
899
900 # We pass HC and HC_OPTS as environment variables, so that the
901 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
902 cmd = "HC='" + config.compiler + "' " + \
903 "HC_OPTS='" + join(flags,' ') + "' " + \
904 "'" + config.compiler + "'" + \
905 ' --interactive -v0 -ignore-dot-ghci ' + \
906 join(flags,' ')
907
908 getTestOpts().stdin = script
909 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
910
911 # -----------------------------------------------------------------------------
912 # Compile-only tests
913
914 def compile( name, way, extra_hc_opts ):
915 return do_compile( name, way, 0, '', [], extra_hc_opts )
916
917 def compile_fail( name, way, extra_hc_opts ):
918 return do_compile( name, way, 1, '', [], extra_hc_opts )
919
920 def multimod_compile( name, way, top_mod, extra_hc_opts ):
921 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
922
923 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
924 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
925
926 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
927 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
928
929 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
930 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
931
932 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts ):
933 # print 'Compile only, extra args = ', extra_hc_opts
934 pretest_cleanup(name)
935
936 result = extras_build( way, extra_mods, extra_hc_opts )
937 if badResult(result):
938 return result
939 extra_hc_opts = result['hc_opts']
940
941 force = 0
942 if extra_mods:
943 force = 1
944 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force)
945
946 if badResult(result):
947 return result
948
949 # the actual stderr should always match the expected, regardless
950 # of whether we expected the compilation to fail or not (successful
951 # compilations may generate warnings).
952
953 if getTestOpts().with_namebase == None:
954 namebase = name
955 else:
956 namebase = getTestOpts().with_namebase
957
958 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
959 actual_stderr_file = qualify(name, 'comp.stderr')
960
961 if not compare_outputs('stderr', \
962 two_normalisers(two_normalisers(getTestOpts().extra_errmsg_normaliser, normalise_errmsg), normalise_whitespace), \
963 expected_stderr_file, actual_stderr_file):
964 return failBecause('stderr mismatch')
965
966 # no problems found, this test passed
967 return passed()
968
969 def compile_cmp_asm( name, way, extra_hc_opts ):
970 print 'Compile only, extra args = ', extra_hc_opts
971 pretest_cleanup(name)
972 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
973
974 if badResult(result):
975 return result
976
977 # the actual stderr should always match the expected, regardless
978 # of whether we expected the compilation to fail or not (successful
979 # compilations may generate warnings).
980
981 if getTestOpts().with_namebase == None:
982 namebase = name
983 else:
984 namebase = getTestOpts().with_namebase
985
986 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
987 actual_asm_file = qualify(name, 's')
988
989 if not compare_outputs('asm', two_normalisers(normalise_errmsg, normalise_asm), \
990 expected_asm_file, actual_asm_file):
991 return failBecause('asm mismatch')
992
993 # no problems found, this test passed
994 return passed()
995
996 # -----------------------------------------------------------------------------
997 # Compile-and-run tests
998
999 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1000 # print 'Compile and run, extra args = ', extra_hc_opts
1001 pretest_cleanup(name)
1002
1003 result = extras_build( way, extra_mods, extra_hc_opts )
1004 if badResult(result):
1005 return result
1006 extra_hc_opts = result['hc_opts']
1007
1008 if way == 'ghci': # interpreted...
1009 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1010 else: # compiled...
1011 force = 0
1012 if extra_mods:
1013 force = 1
1014
1015 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1016 if badResult(result):
1017 return result
1018
1019 cmd = './' + name;
1020
1021 # we don't check the compiler's stderr for a compile-and-run test
1022 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1023
1024 def compile_and_run( name, way, extra_hc_opts ):
1025 return compile_and_run__( name, way, '', [], extra_hc_opts)
1026
1027 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1028 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1029
1030 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1031 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1032
1033 def stats( name, way, stats_file ):
1034 opts = getTestOpts()
1035 return checkStats(name, way, stats_file, opts.stats_range_fields)
1036
1037 # -----------------------------------------------------------------------------
1038 # Check -t stats info
1039
1040 def checkStats(name, way, stats_file, range_fields):
1041 full_name = name + '(' + way + ')'
1042
1043 result = passed()
1044 if len(range_fields) > 0:
1045 f = open(in_testdir(stats_file))
1046 contents = f.read()
1047 f.close()
1048
1049 for (field, (expected, dev)) in range_fields.items():
1050 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1051 if m == None:
1052 print 'Failed to find field: ', field
1053 result = failBecause('no such stats field')
1054 val = int(m.group(1))
1055
1056 lowerBound = trunc( expected * ((100 - float(dev))/100))
1057 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1058
1059 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1060
1061 if val < lowerBound:
1062 print field, 'value is too low:'
1063 print '(If this is because you have improved GHC, please'
1064 print 'update the test so that GHC doesn\'t regress again)'
1065 result = failBecause('stat too good')
1066 if val > upperBound:
1067 print field, 'value is too high:'
1068 result = failBecause('stat not good enough')
1069
1070 if val < lowerBound or val > upperBound or config.verbose >= 4:
1071 valStr = str(val)
1072 valLen = len(valStr)
1073 expectedStr = str(expected)
1074 expectedLen = len(expectedStr)
1075 length = max(map (lambda x : len(str(x)), [expected, lowerBound, upperBound, val]))
1076 def display(descr, val, extra):
1077 print descr, string.rjust(str(val), length), extra
1078 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1079 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1080 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1081 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1082 if val != expected:
1083 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1084
1085 return result
1086
1087 # -----------------------------------------------------------------------------
1088 # Build a single-module program
1089
1090 def extras_build( way, extra_mods, extra_hc_opts ):
1091 for modopts in extra_mods:
1092 mod, opts = modopts
1093 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1094 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1095 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1096 if badResult(result):
1097 return result
1098
1099 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1100
1101
1102 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce ):
1103 opts = getTestOpts()
1104 errname = add_suffix(name, 'comp.stderr')
1105 rm_no_fail( qualify(errname, '') )
1106
1107 if top_mod != '':
1108 srcname = top_mod
1109 rm_no_fail( qualify(name, '') )
1110 base, suf = os.path.splitext(top_mod)
1111 rm_no_fail( qualify(base, '') )
1112 rm_no_fail( qualify(base, 'exe') )
1113 elif addsuf:
1114 srcname = add_hs_lhs_suffix(name)
1115 rm_no_fail( qualify(name, '') )
1116 else:
1117 srcname = name
1118 rm_no_fail( qualify(name, 'o') )
1119
1120 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1121
1122 to_do = ''
1123 if top_mod != '':
1124 to_do = '--make '
1125 if link:
1126 to_do = to_do + '-o ' + name
1127 elif link:
1128 to_do = '-o ' + name
1129 elif opts.compile_to_hc:
1130 to_do = '-C'
1131 else:
1132 to_do = '-c' # just compile
1133
1134 stats_file = name + '.comp.stats'
1135 if len(opts.compiler_stats_range_fields) > 0:
1136 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1137
1138 # Required by GHC 7.3+, harmless for earlier versions:
1139 if (getTestOpts().c_src or
1140 getTestOpts().objc_src or
1141 getTestOpts().objcpp_src or
1142 getTestOpts().cmm_src):
1143 extra_hc_opts += ' -no-hs-main '
1144
1145 if getTestOpts().compile_cmd_prefix == '':
1146 cmd_prefix = ''
1147 else:
1148 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1149
1150 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1151 if noforce:
1152 comp_flags = filter(lambda f: f != '-fforce-recomp', comp_flags)
1153 if getTestOpts().outputdir != None:
1154 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1155
1156 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1157 + config.compiler + "' " \
1158 + join(comp_flags,' ') + ' ' \
1159 + to_do + ' ' + srcname + ' ' \
1160 + join(config.way_flags(name)[way],' ') + ' ' \
1161 + extra_hc_opts + ' ' \
1162 + opts.extra_hc_opts + ' ' \
1163 + '>' + errname + ' 2>&1'
1164
1165 result = runCmdFor(name, cmd)
1166
1167 if result != 0 and not should_fail:
1168 actual_stderr = qualify(name, 'comp.stderr')
1169 if_verbose(1,'Compile failed (status ' + `result` + ') errors were:')
1170 if_verbose_dump(1,actual_stderr)
1171
1172 # ToDo: if the sub-shell was killed by ^C, then exit
1173
1174 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1175
1176 if badResult(statsResult):
1177 return statsResult
1178
1179 if should_fail:
1180 if result == 0:
1181 return failBecause('exit code 0')
1182 else:
1183 if result != 0:
1184 return failBecause('exit code non-0')
1185
1186 return passed()
1187
1188 # -----------------------------------------------------------------------------
1189 # Run a program and check its output
1190 #
1191 # If testname.stdin exists, route input from that, else
1192 # from /dev/null. Route output to testname.run.stdout and
1193 # testname.run.stderr. Returns the exit code of the run.
1194
1195 def simple_run( name, way, prog, args ):
1196 opts = getTestOpts()
1197
1198 # figure out what to use for stdin
1199 if opts.stdin != '':
1200 use_stdin = opts.stdin
1201 else:
1202 stdin_file = add_suffix(name, 'stdin')
1203 if os.path.exists(in_testdir(stdin_file)):
1204 use_stdin = stdin_file
1205 else:
1206 use_stdin = '/dev/null'
1207
1208 run_stdout = add_suffix(name,'run.stdout')
1209 run_stderr = add_suffix(name,'run.stderr')
1210
1211 rm_no_fail(qualify(name,'run.stdout'))
1212 rm_no_fail(qualify(name,'run.stderr'))
1213 rm_no_fail(qualify(name, 'hp'))
1214 rm_no_fail(qualify(name,'ps'))
1215 rm_no_fail(qualify(name, 'prof'))
1216
1217 my_rts_flags = rts_flags(way)
1218
1219 stats_file = name + '.stats'
1220 if len(opts.stats_range_fields) > 0:
1221 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1222
1223 if opts.no_stdin:
1224 stdin_comes_from = ''
1225 else:
1226 stdin_comes_from = ' <' + use_stdin
1227
1228 if opts.combined_output:
1229 redirection = ' >' + run_stdout \
1230 + ' 2>&1'
1231 else:
1232 redirection = ' >' + run_stdout \
1233 + ' 2>' + run_stderr
1234
1235 cmd = prog + ' ' + args + ' ' \
1236 + my_rts_flags + ' ' \
1237 + stdin_comes_from \
1238 + redirection
1239
1240 if opts.cmd_wrapper != None:
1241 cmd = opts.cmd_wrapper(cmd);
1242
1243 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1244
1245 # run the command
1246 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1247
1248 exit_code = result >> 8
1249 signal = result & 0xff
1250
1251 # check the exit code
1252 if exit_code != opts.exit_code:
1253 print 'Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')'
1254 dump_stdout(name)
1255 dump_stderr(name)
1256 return failBecause('bad exit code')
1257
1258 check_hp = my_rts_flags.find("-h") != -1
1259 check_prof = my_rts_flags.find("-p") != -1
1260
1261 if not opts.ignore_output:
1262 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1263 bad_stdout = not check_stdout_ok(name)
1264 if bad_stderr:
1265 return failBecause('bad stderr')
1266 if bad_stdout:
1267 return failBecause('bad stdout')
1268 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1269 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1270 return failBecause('bad heap profile')
1271 if check_prof and not check_prof_ok(name):
1272 return failBecause('bad profile')
1273
1274 return checkStats(name, way, stats_file, opts.stats_range_fields)
1275
1276 def rts_flags(way):
1277 if (way == ''):
1278 return ''
1279 else:
1280 args = config.way_rts_flags[way]
1281
1282 if args == []:
1283 return ''
1284 else:
1285 return '+RTS ' + join(args,' ') + ' -RTS'
1286
1287 # -----------------------------------------------------------------------------
1288 # Run a program in the interpreter and check its output
1289
1290 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1291 outname = add_suffix(name, 'interp.stdout')
1292 errname = add_suffix(name, 'interp.stderr')
1293 rm_no_fail(outname)
1294 rm_no_fail(errname)
1295 rm_no_fail(name)
1296
1297 if (top_mod == ''):
1298 srcname = add_hs_lhs_suffix(name)
1299 else:
1300 srcname = top_mod
1301
1302 scriptname = add_suffix(name, 'genscript')
1303 qscriptname = in_testdir(scriptname)
1304 rm_no_fail(qscriptname)
1305
1306 delimiter = '===== program output begins here\n'
1307
1308 script = open(qscriptname, 'w')
1309 if not compile_only:
1310 # set the prog name and command-line args to match the compiled
1311 # environment.
1312 script.write(':set prog ' + name + '\n')
1313 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1314 # Add marker lines to the stdout and stderr output files, so we
1315 # can separate GHCi's output from the program's.
1316 script.write(':! echo ' + delimiter)
1317 script.write(':! echo 1>&2 ' + delimiter)
1318 # Set stdout to be line-buffered to match the compiled environment.
1319 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1320 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1321 # in the event of an exception as for the compiled program.
1322 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1323 script.close()
1324
1325 # figure out what to use for stdin
1326 if getTestOpts().stdin != '':
1327 stdin_file = in_testdir(getTestOpts().stdin)
1328 else:
1329 stdin_file = qualify(name, 'stdin')
1330
1331 if os.path.exists(stdin_file):
1332 stdin = open(stdin_file, 'r')
1333 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1334
1335 script.close()
1336
1337 flags = copy.copy(getTestOpts().compiler_always_flags)
1338 if getTestOpts().outputdir != None:
1339 flags.extend(["-outputdir", getTestOpts().outputdir])
1340
1341 cmd = "'" + config.compiler + "' " \
1342 + join(flags,' ') + ' ' \
1343 + srcname + ' ' \
1344 + join(config.way_flags(name)[way],' ') + ' ' \
1345 + extra_hc_opts + ' ' \
1346 + getTestOpts().extra_hc_opts + ' ' \
1347 + '<' + scriptname + ' 1>' + outname + ' 2>' + errname
1348
1349 if getTestOpts().cmd_wrapper != None:
1350 cmd = getTestOpts().cmd_wrapper(cmd);
1351
1352 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1353
1354 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1355
1356 exit_code = result >> 8
1357 signal = result & 0xff
1358
1359 # split the stdout into compilation/program output
1360 split_file(in_testdir(outname), delimiter,
1361 qualify(name, 'comp.stdout'),
1362 qualify(name, 'run.stdout'))
1363 split_file(in_testdir(errname), delimiter,
1364 qualify(name, 'comp.stderr'),
1365 qualify(name, 'run.stderr'))
1366
1367 # check the exit code
1368 if exit_code != getTestOpts().exit_code:
1369 print 'Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')'
1370 dump_stdout(name)
1371 dump_stderr(name)
1372 return failBecause('bad exit code')
1373
1374 # ToDo: if the sub-shell was killed by ^C, then exit
1375
1376 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1377 check_stdout_ok(name)):
1378 return passed()
1379 else:
1380 return failBecause('bad stdout or stderr')
1381
1382
1383 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1384 infile = open(in_fn)
1385 out1 = open(out1_fn, 'w')
1386 out2 = open(out2_fn, 'w')
1387
1388 line = infile.readline()
1389 line = re.sub('\r', '', line) # ignore Windows EOL
1390 while (re.sub('^\s*','',line) != delimiter and line != ''):
1391 out1.write(line)
1392 line = infile.readline()
1393 line = re.sub('\r', '', line)
1394 out1.close()
1395
1396 line = infile.readline()
1397 while (line != ''):
1398 out2.write(line)
1399 line = infile.readline()
1400 out2.close()
1401
1402 # -----------------------------------------------------------------------------
1403 # Utils
1404
1405 def check_stdout_ok( name ):
1406 if getTestOpts().with_namebase == None:
1407 namebase = name
1408 else:
1409 namebase = getTestOpts().with_namebase
1410
1411 actual_stdout_file = qualify(name, 'run.stdout')
1412 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1413
1414 def norm(str):
1415 if platform_specific:
1416 return str
1417 else:
1418 return normalise_output(str)
1419
1420 two_norm = two_normalisers(norm, getTestOpts().extra_normaliser)
1421
1422 check_stdout = getTestOpts().check_stdout
1423 if check_stdout:
1424 return check_stdout(actual_stdout_file, two_norm)
1425
1426 return compare_outputs('stdout', \
1427 two_norm, \
1428 expected_stdout_file, actual_stdout_file)
1429
1430 def dump_stdout( name ):
1431 print 'Stdout:'
1432 print read_no_crs(qualify(name, 'run.stdout'))
1433
1434 def check_stderr_ok( name ):
1435 if getTestOpts().with_namebase == None:
1436 namebase = name
1437 else:
1438 namebase = getTestOpts().with_namebase
1439
1440 actual_stderr_file = qualify(name, 'run.stderr')
1441 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1442
1443 def norm(str):
1444 if platform_specific:
1445 return str
1446 else:
1447 return normalise_errmsg(str)
1448
1449 return compare_outputs('stderr', \
1450 two_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1451 expected_stderr_file, actual_stderr_file)
1452
1453 def dump_stderr( name ):
1454 print "Stderr:"
1455 print read_no_crs(qualify(name, 'run.stderr'))
1456
1457 def read_no_crs(file):
1458 str = ''
1459 try:
1460 h = open(file)
1461 str = h.read()
1462 h.close
1463 except:
1464 # On Windows, if the program fails very early, it seems the
1465 # files stdout/stderr are redirected to may not get created
1466 pass
1467 return re.sub('\r', '', str)
1468
1469 def write_file(file, str):
1470 h = open(file, 'w')
1471 h.write(str)
1472 h.close
1473
1474 def check_hp_ok(name):
1475
1476 # do not qualify for hp2ps because we should be in the right directory
1477 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1478
1479 hp2psResult = runCmdExitCode(hp2psCmd)
1480
1481 actual_ps_file = qualify(name, 'ps')
1482
1483 if(hp2psResult == 0):
1484 if (os.path.exists(actual_ps_file)):
1485 if gs_working:
1486 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1487 if (gsResult == 0):
1488 return (True)
1489 else:
1490 print "hp2ps output for " + name + "is not valid PostScript"
1491 else: return (True) # assume postscript is valid without ghostscript
1492 else:
1493 print "hp2ps did not generate PostScript for " + name
1494 return (False)
1495 else:
1496 print "hp2ps error when processing heap profile for " + name
1497 return(False)
1498
1499 def check_prof_ok(name):
1500
1501 prof_file = qualify(name,'prof')
1502
1503 if not os.path.exists(prof_file):
1504 print prof_file + " does not exist"
1505 return(False)
1506
1507 if os.path.getsize(qualify(name,'prof')) == 0:
1508 print prof_file + " is empty"
1509 return(False)
1510
1511 if getTestOpts().with_namebase == None:
1512 namebase = name
1513 else:
1514 namebase = getTestOpts().with_namebase
1515
1516 (platform_specific, expected_prof_file) = \
1517 platform_wordsize_qualify(namebase, 'prof.sample')
1518
1519 # sample prof file is not required
1520 if not os.path.exists(expected_prof_file):
1521 return True
1522 else:
1523 return compare_outputs('prof', \
1524 two_normalisers(normalise_whitespace,normalise_prof), \
1525 expected_prof_file, prof_file)
1526
1527 # Compare expected output to actual output, and optionally accept the
1528 # new output. Returns true if output matched or was accepted, false
1529 # otherwise.
1530 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1531 if os.path.exists(expected_file):
1532 expected_raw = read_no_crs(expected_file)
1533 # print "norm:", normaliser(expected_raw)
1534 expected_str = normaliser(expected_raw)
1535 expected_file_for_diff = expected_file
1536 else:
1537 expected_str = ''
1538 expected_file_for_diff = '/dev/null'
1539
1540 actual_raw = read_no_crs(actual_file)
1541 actual_str = normaliser(actual_raw)
1542
1543 if expected_str == actual_str:
1544 return 1
1545 else:
1546 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1547
1548 if expected_file_for_diff == '/dev/null':
1549 expected_normalised_file = '/dev/null'
1550 else:
1551 expected_normalised_file = expected_file + ".normalised"
1552 write_file(expected_normalised_file, expected_str)
1553
1554 actual_normalised_file = actual_file + ".normalised"
1555 write_file(actual_normalised_file, actual_str)
1556
1557 # Ignore whitespace when diffing. We should only get to this
1558 # point if there are non-whitespace differences
1559 #
1560 # Note we are diffing the *actual* output, not the normalised
1561 # output. The normalised output may have whitespace squashed
1562 # (including newlines) so the diff would be hard to read.
1563 # This does mean that the diff might contain changes that
1564 # would be normalised away.
1565 if (config.verbose >= 1):
1566 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1567 ' ' + actual_file )
1568
1569 # If for some reason there were no non-whitespace differences,
1570 # then do a full diff
1571 if r == 0:
1572 r = os.system( 'diff -u ' + expected_file_for_diff + \
1573 ' ' + actual_file )
1574
1575 if config.accept:
1576 if_verbose(1, 'Accepting new output.')
1577 write_file(expected_file, actual_raw)
1578 return 1
1579 else:
1580 return 0
1581
1582
1583 def normalise_whitespace( str ):
1584 # Merge contiguous whitespace characters into a single space.
1585 str = re.sub('[ \t\n]+', ' ', str)
1586 return str
1587
1588 def normalise_errmsg( str ):
1589 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1590 # the colon is there because it appears in error messages; this
1591 # hacky solution is used in place of more sophisticated filename
1592 # mangling
1593 str = re.sub('([^\\s])\\.exe', '\\1', str)
1594 # normalise slashes, minimise Windows/Unix filename differences
1595 str = re.sub('\\\\', '/', str)
1596 # The inplace ghc's are called ghc-stage[123] to avoid filename
1597 # collisions, so we need to normalise that to just "ghc"
1598 str = re.sub('ghc-stage[123]', 'ghc', str)
1599 # Error messages simetimes contain integer implementation package
1600 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1601 return str
1602
1603 # normalise a .prof file, so that we can reasonably compare it against
1604 # a sample. This doesn't compare any of the actual profiling data,
1605 # only the shape of the profile and the number of entries.
1606 def normalise_prof (str):
1607 # strip everything up to the line beginning "COST CENTRE"
1608 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1609
1610 # strip results for CAFs, these tend to change unpredictably
1611 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1612
1613 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1614 # sometimes under MAIN.
1615 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1616
1617 # We have somthing like this:
1618
1619 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1620 # k Main 204 1 0.0 0.0 0.0 0.0
1621 # foo Main 205 1 0.0 0.0 0.0 0.0
1622 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1623
1624 # then we remove all the specific profiling data, leaving only the
1625 # cost centre name, module, and entries, to end up with this:
1626
1627 # MAIN MAIN 0
1628 # k Main 1
1629 # foo Main 1
1630 # foo.bar Main 1
1631
1632 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1633 return str
1634
1635 def normalise_slashes_( str ):
1636 str = re.sub('\\\\', '/', str)
1637 return str
1638
1639 def normalise_exe_( str ):
1640 str = re.sub('\.exe', '', str)
1641 return str
1642
1643 def normalise_output( str ):
1644 # Remove a .exe extension (for Windows)
1645 # This can occur in error messages generated by the program.
1646 str = re.sub('([^\\s])\\.exe', '\\1', str)
1647 return str
1648
1649 def normalise_asm( str ):
1650 lines = str.split('\n')
1651 # Only keep instructions and labels not starting with a dot.
1652 metadata = re.compile('^[ \t]*\\..*$')
1653 out = []
1654 for line in lines:
1655 # Drop metadata directives (e.g. ".type")
1656 if not metadata.match(line):
1657 line = re.sub('@plt', '', line)
1658 instr = line.lstrip().split()
1659 # Drop empty lines.
1660 if not instr:
1661 continue
1662 # Drop operands, except for call instructions.
1663 elif instr[0] == 'call':
1664 out.append(instr[0] + ' ' + instr[1])
1665 else:
1666 out.append(instr[0])
1667 out = '\n'.join(out)
1668 return out
1669
1670 def if_verbose( n, str ):
1671 if config.verbose >= n:
1672 print str
1673
1674 def if_verbose_dump( n, f ):
1675 if config.verbose >= n:
1676 try:
1677 print open(f).read()
1678 except:
1679 print ''
1680
1681 def rawSystem(cmd_and_args):
1682 # We prefer subprocess.call to os.spawnv as the latter
1683 # seems to send its arguments through a shell or something
1684 # with the Windows (non-cygwin) python. An argument "a b c"
1685 # turns into three arguments ["a", "b", "c"].
1686
1687 # However, subprocess is new in python 2.4, so fall back to
1688 # using spawnv if we don't have it
1689
1690 if have_subprocess:
1691 return subprocess.call(cmd_and_args)
1692 else:
1693 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1694
1695 # Note that this doesn't handle the timeout itself; it is just used for
1696 # commands that have timeout handling built-in.
1697 def rawSystemWithTimeout(cmd_and_args):
1698 r = rawSystem(cmd_and_args)
1699 if r == 98:
1700 # The python timeout program uses 98 to signal that ^C was pressed
1701 stopNow()
1702 return r
1703
1704 # cmd is a complex command in Bourne-shell syntax
1705 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1706 # Hence it must ultimately be run by a Bourne shell
1707 #
1708 # Mostly it invokes the command wrapped in 'timeout' thus
1709 # timeout 300 'cd . && ...blah blah'
1710 # so it's timeout's job to invoke the Bourne shell
1711 #
1712 # But watch out for the case when there is no timeout program!
1713 # Then, when using the native Python, os.system will invoke the cmd shell
1714
1715 def runCmd( cmd ):
1716 if_verbose( 3, cmd )
1717 r = 0
1718 if config.os == 'mingw32':
1719 # On MinGW, we will always have timeout
1720 assert config.timeout_prog!=''
1721
1722 if config.timeout_prog != '':
1723 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1724 else:
1725 r = os.system(cmd)
1726 return r << 8
1727
1728 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1729 if_verbose( 3, cmd )
1730 r = 0
1731 if config.os == 'mingw32':
1732 # On MinGW, we will always have timeout
1733 assert config.timeout_prog!=''
1734 timeout = int(ceil(config.timeout * timeout_multiplier))
1735
1736 if config.timeout_prog != '':
1737 if config.check_files_written:
1738 fn = name + ".strace"
1739 r = rawSystemWithTimeout(
1740 ["strace", "-o", fn, "-fF",
1741 "-e", "creat,open,chdir,clone,vfork",
1742 config.timeout_prog, str(timeout), cmd])
1743 addTestFilesWritten(name, fn)
1744 rm_no_fail(fn)
1745 else:
1746 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1747 else:
1748 r = os.system(cmd)
1749 return r << 8
1750
1751 def runCmdExitCode( cmd ):
1752 return (runCmd(cmd) >> 8);
1753
1754
1755 # -----------------------------------------------------------------------------
1756 # checking for files being written to by multiple tests
1757
1758 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1759 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1760 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1761 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1762 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1763 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1764 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1765 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1766 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1767 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1768 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1769 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1770 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1771 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1772 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1773 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1774 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1775
1776 # Files that are read or written but shouldn't be:
1777 # * ghci_history shouldn't be read or written by tests
1778 # * things under package.conf.d shouldn't be written by tests
1779 bad_file_usages = {}
1780
1781 # Mapping from tests to the list of files that they write
1782 files_written = {}
1783
1784 # Mapping from tests to the list of files that they write but don't clean
1785 files_written_not_removed = {}
1786
1787 def add_bad_file_usage(name, file):
1788 try:
1789 if not file in bad_file_usages[name]:
1790 bad_file_usages[name].append(file)
1791 except:
1792 bad_file_usages[name] = [file]
1793
1794 def mkPath(curdir, path):
1795 # Given the current full directory is 'curdir', what is the full
1796 # path to 'path'?
1797 return os.path.realpath(os.path.join(curdir, path))
1798
1799 def addTestFilesWritten(name, fn):
1800 if config.use_threads:
1801 with t.lockFilesWritten:
1802 addTestFilesWrittenHelper(name, fn)
1803 else:
1804 addTestFilesWrittenHelper(name, fn)
1805
1806 def addTestFilesWrittenHelper(name, fn):
1807 started = False
1808 working_directories = {}
1809
1810 with open(fn, 'r') as f:
1811 for line in f:
1812 m_pid = re_strace_pid.match(line)
1813 if m_pid:
1814 pid = m_pid.group(1)
1815 content = m_pid.group(2)
1816 elif re_strace_unavailable.match(line):
1817 next
1818 else:
1819 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1820
1821 m_open = re_strace_open.match(content)
1822 m_chdir = re_strace_chdir.match(content)
1823 m_clone = re_strace_clone.match(content)
1824 m_vfork = re_strace_vfork.match(content)
1825
1826 if not started:
1827 working_directories[pid] = os.getcwd()
1828 started = True
1829
1830 if m_open:
1831 file = m_open.group(1)
1832 file = mkPath(working_directories[pid], file)
1833 if file.endswith("ghci_history"):
1834 add_bad_file_usage(name, file)
1835 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1836 flags = m_open.group(2).split('|')
1837 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1838 if re.match('package\.conf\.d', file):
1839 add_bad_file_usage(name, file)
1840 else:
1841 try:
1842 if not file in files_written[name]:
1843 files_written[name].append(file)
1844 except:
1845 files_written[name] = [file]
1846 elif 'O_RDONLY' in flags:
1847 pass
1848 else:
1849 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1850 elif m_chdir:
1851 # We optimistically assume that unfinished chdir's are going to succeed
1852 dir = m_chdir.group(1)
1853 working_directories[pid] = mkPath(working_directories[pid], dir)
1854 elif m_clone:
1855 working_directories[m_clone.group(2)] = working_directories[pid]
1856 elif m_vfork:
1857 working_directories[m_vfork.group(2)] = working_directories[pid]
1858 elif re_strace_open_resumed.match(content):
1859 pass
1860 elif re_strace_chdir_resumed.match(content):
1861 pass
1862 elif re_strace_vfork_unfinished.match(content):
1863 pass
1864 elif re_strace_clone_unfinished.match(content):
1865 pass
1866 elif re_strace_ignore_sigchild.match(content):
1867 pass
1868 elif re_strace_ignore_sigvtalarm.match(content):
1869 pass
1870 elif re_strace_ignore_sigint.match(content):
1871 pass
1872 elif re_strace_ignore_sigfpe.match(content):
1873 pass
1874 elif re_strace_ignore_sigsegv.match(content):
1875 pass
1876 elif re_strace_ignore_sigpipe.match(content):
1877 pass
1878 else:
1879 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1880
1881 def checkForFilesWrittenProblems(file):
1882 foundProblem = False
1883
1884 files_written_inverted = {}
1885 for t in files_written.keys():
1886 for f in files_written[t]:
1887 try:
1888 files_written_inverted[f].append(t)
1889 except:
1890 files_written_inverted[f] = [t]
1891
1892 for f in files_written_inverted.keys():
1893 if len(files_written_inverted[f]) > 1:
1894 if not foundProblem:
1895 foundProblem = True
1896 file.write("\n")
1897 file.write("\nSome files are written by multiple tests:\n")
1898 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
1899 if foundProblem:
1900 file.write("\n")
1901
1902 # -----
1903
1904 if len(files_written_not_removed) > 0:
1905 file.write("\n")
1906 file.write("\nSome files written but not removed:\n")
1907 tests = files_written_not_removed.keys()
1908 tests.sort()
1909 for t in tests:
1910 for f in files_written_not_removed[t]:
1911 file.write(" " + t + ": " + f + "\n")
1912 file.write("\n")
1913
1914 # -----
1915
1916 if len(bad_file_usages) > 0:
1917 file.write("\n")
1918 file.write("\nSome bad file usages:\n")
1919 tests = bad_file_usages.keys()
1920 tests.sort()
1921 for t in tests:
1922 for f in bad_file_usages[t]:
1923 file.write(" " + t + ": " + f + "\n")
1924 file.write("\n")
1925
1926 # -----------------------------------------------------------------------------
1927 # checking if ghostscript is available for checking the output of hp2ps
1928
1929 def genGSCmd(psfile):
1930 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
1931
1932 def gsNotWorking():
1933 global gs_working
1934 print "GhostScript not available for hp2ps tests"
1935
1936 global gs_working
1937 gs_working = 0
1938 if config.have_profiling:
1939 if config.gs != '':
1940 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
1941 if resultGood == 0:
1942 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
1943 if resultBad != 0:
1944 print "GhostScript available for hp2ps tests"
1945 gs_working = 1;
1946 else:
1947 gsNotWorking();
1948 else:
1949 gsNotWorking();
1950 else:
1951 gsNotWorking();
1952
1953 def rm_no_fail( file ):
1954 try:
1955 os.remove( file )
1956 finally:
1957 return
1958
1959 def add_suffix( name, suffix ):
1960 if suffix == '':
1961 return name
1962 else:
1963 return name + '.' + suffix
1964
1965 def add_hs_lhs_suffix(name):
1966 if getTestOpts().c_src:
1967 return add_suffix(name, 'c')
1968 elif getTestOpts().cmm_src:
1969 return add_suffix(name, 'cmm')
1970 elif getTestOpts().objc_src:
1971 return add_suffix(name, 'm')
1972 elif getTestOpts().objcpp_src:
1973 return add_suffix(name, 'mm')
1974 elif getTestOpts().literate:
1975 return add_suffix(name, 'lhs')
1976 else:
1977 return add_suffix(name, 'hs')
1978
1979 def replace_suffix( name, suffix ):
1980 base, suf = os.path.splitext(name)
1981 return base + '.' + suffix
1982
1983 def in_testdir( name ):
1984 return (getTestOpts().testdir + '/' + name)
1985
1986 def qualify( name, suff ):
1987 return in_testdir(add_suffix(name, suff))
1988
1989
1990 # Finding the sample output. The filename is of the form
1991 #
1992 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
1993 #
1994 # and we pick the most specific version available. The <version> is
1995 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
1996 # more fine-grained control use if_compiler_lt().
1997 #
1998 def platform_wordsize_qualify( name, suff ):
1999
2000 basepath = qualify(name, suff)
2001
2002 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2003 for (platformSpecific, plat) in [(1, '-' + config.platform),
2004 (1, '-' + config.os),
2005 (0, '')]
2006 for ws in ['-ws-' + config.wordsize, '']
2007 for comp in ['-' + config.compiler_type, '']
2008 for vers in ['-' + config.compiler_maj_version, '']]
2009
2010 dir = glob.glob(basepath + '*')
2011 dir = map (lambda d: normalise_slashes_(d), dir)
2012
2013 for (platformSpecific, f) in paths:
2014 if f in dir:
2015 return (platformSpecific,f)
2016
2017 return (0, basepath)
2018
2019 # Clean up prior to the test, so that we can't spuriously conclude
2020 # that it passed on the basis of old run outputs.
2021 def pretest_cleanup(name):
2022 if getTestOpts().outputdir != None:
2023 odir = in_testdir(getTestOpts().outputdir)
2024 try:
2025 shutil.rmtree(odir)
2026 except:
2027 pass
2028 os.mkdir(odir)
2029
2030 rm_no_fail(qualify(name,'interp.stderr'))
2031 rm_no_fail(qualify(name,'interp.stdout'))
2032 rm_no_fail(qualify(name,'comp.stderr'))
2033 rm_no_fail(qualify(name,'comp.stdout'))
2034 rm_no_fail(qualify(name,'run.stderr'))
2035 rm_no_fail(qualify(name,'run.stdout'))
2036 rm_no_fail(qualify(name,'tix'))
2037 rm_no_fail(qualify(name,'exe.tix'))
2038 # simple_build zaps the following:
2039 # rm_nofail(qualify("o"))
2040 # rm_nofail(qualify(""))
2041 # not interested in the return code
2042
2043 # -----------------------------------------------------------------------------
2044 # Return a list of all the files ending in '.T' below the directory dir.
2045
2046 def findTFiles(roots):
2047 return concat(map(findTFiles_,roots))
2048
2049 def findTFiles_(path):
2050 if os.path.isdir(path):
2051 paths = map(lambda x, p=path: p + '/' + x, os.listdir(path))
2052 return findTFiles(paths)
2053 elif path[-2:] == '.T':
2054 return [path]
2055 else:
2056 return []
2057
2058 # -----------------------------------------------------------------------------
2059 # Output a test summary to the specified file object
2060
2061 def summary(t, file):
2062
2063 file.write('\n')
2064 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures])
2065 file.write('OVERALL SUMMARY for test run started at '
2066 + time.strftime("%c %Z", t.start_time) + '\n'
2067 + string.rjust(str(datetime.timedelta(seconds=
2068 round(time.time() - time.mktime(t.start_time)))), 8)
2069 + ' spent to go through\n'
2070 + string.rjust(`t.total_tests`, 8)
2071 + ' total tests, which gave rise to\n'
2072 + string.rjust(`t.total_test_cases`, 8)
2073 + ' test cases, of which\n'
2074 + string.rjust(`t.n_tests_skipped`, 8)
2075 + ' were skipped\n'
2076 + '\n'
2077 + string.rjust(`t.n_missing_libs`, 8)
2078 + ' had missing libraries\n'
2079 + string.rjust(`t.n_expected_passes`, 8)
2080 + ' expected passes\n'
2081 + string.rjust(`t.n_expected_failures`, 8)
2082 + ' expected failures\n'
2083 + '\n'
2084 + string.rjust(`t.n_framework_failures`, 8)
2085 + ' caused framework failures\n'
2086 + string.rjust(`t.n_unexpected_passes`, 8)
2087 + ' unexpected passes\n'
2088 + string.rjust(`t.n_unexpected_failures`, 8)
2089 + ' unexpected failures\n'
2090 + '\n')
2091
2092 if t.n_unexpected_passes > 0:
2093 file.write('Unexpected passes:\n')
2094 printPassingTestInfosSummary(file, t.unexpected_passes)
2095
2096 if t.n_unexpected_failures > 0:
2097 file.write('Unexpected failures:\n')
2098 printFailingTestInfosSummary(file, t.unexpected_failures)
2099
2100 if config.check_files_written:
2101 checkForFilesWrittenProblems(file)
2102
2103 if stopping():
2104 file.write('WARNING: Testsuite run was terminated early\n')
2105
2106 def printUnexpectedTests(file, testInfoss):
2107 unexpected = []
2108 for testInfos in testInfoss:
2109 directories = testInfos.keys()
2110 for directory in directories:
2111 tests = testInfos[directory].keys()
2112 unexpected += tests
2113 if unexpected != []:
2114 file.write('Unexpected results from:\n')
2115 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2116 file.write('\n')
2117
2118 def printPassingTestInfosSummary(file, testInfos):
2119 directories = testInfos.keys()
2120 directories.sort()
2121 maxDirLen = max(map ((lambda x : len(x)), directories))
2122 for directory in directories:
2123 tests = testInfos[directory].keys()
2124 tests.sort()
2125 for test in tests:
2126 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2127 ' (' + join(testInfos[directory][test],',') + ')\n')
2128 file.write('\n')
2129
2130 def printFailingTestInfosSummary(file, testInfos):
2131 directories = testInfos.keys()
2132 directories.sort()
2133 maxDirLen = max(map ((lambda x : len(x)), directories))
2134 for directory in directories:
2135 tests = testInfos[directory].keys()
2136 tests.sort()
2137 for test in tests:
2138 reasons = testInfos[directory][test].keys()
2139 for reason in reasons:
2140 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2141 ' [' + reason + ']' + \
2142 ' (' + join(testInfos[directory][test][reason],',') + ')\n')
2143 file.write('\n')
2144
2145 def getStdout(cmd):
2146 if have_subprocess:
2147 p = subprocess.Popen(cmd,
2148 stdout=subprocess.PIPE,
2149 stderr=subprocess.PIPE)
2150 (stdout, stderr) = p.communicate()
2151 r = p.wait()
2152 if r != 0:
2153 raise Exception("Command failed: " + str(cmd))
2154 if stderr != '':
2155 raise Exception("stderr from command: " + str(cmd))
2156 return stdout
2157 else:
2158 raise Exception("Need subprocess to get stdout, but don't have it")