e44f5f54cb85221ba9aa2ed4bf67c1829b7d3b4a
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 # This allows us to use the "with X:" syntax with python 2.5:
6 from __future__ import with_statement
7
8 import shutil
9 import sys
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 import types
20 from math import ceil, trunc
21
22 have_subprocess = False
23 try:
24 import subprocess
25 have_subprocess = True
26 except:
27 print "Warning: subprocess not found, will fall back to spawnv"
28
29 from string import join
30 from testglobals import *
31 from testutil import *
32
33 if config.use_threads:
34 import threading
35 import thread
36
37 global wantToStop
38 wantToStop = False
39 def stopNow():
40 global wantToStop
41 wantToStop = True
42 def stopping():
43 return wantToStop
44
45 # Options valid for the current test only (these get reset to
46 # testdir_testopts after each test).
47
48 global testopts_local
49 if config.use_threads:
50 testopts_local = threading.local()
51 else:
52 class TestOpts_Local:
53 pass
54 testopts_local = TestOpts_Local()
55
56 def getTestOpts():
57 return testopts_local.x
58
59 def setLocalTestOpts(opts):
60 global testopts_local
61 testopts_local.x=opts
62
63 def isStatsTest():
64 opts = getTestOpts()
65 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
66
67
68 # This can be called at the top of a file of tests, to set default test options
69 # for the following tests.
70 def setTestOpts( f ):
71 global thisdir_settings
72 thisdir_settings = [thisdir_settings, f]
73
74 # -----------------------------------------------------------------------------
75 # Canned setup functions for common cases. eg. for a test you might say
76 #
77 # test('test001', normal, compile, [''])
78 #
79 # to run it without any options, but change it to
80 #
81 # test('test001', expect_fail, compile, [''])
82 #
83 # to expect failure for this test.
84
85 def normal( name, opts ):
86 return;
87
88 def skip( name, opts ):
89 opts.skip = 1
90
91 def expect_fail( name, opts ):
92 opts.expect = 'fail';
93
94 def reqlib( lib ):
95 return lambda name, opts, l=lib: _reqlib (name, opts, l )
96
97 # Cache the results of looking to see if we have a library or not.
98 # This makes quite a difference, especially on Windows.
99 have_lib = {}
100
101 def _reqlib( name, opts, lib ):
102 if have_lib.has_key(lib):
103 got_it = have_lib[lib]
104 else:
105 if have_subprocess:
106 # By preference we use subprocess, as the alternative uses
107 # /dev/null which mingw doesn't have.
108 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
109 stdout=subprocess.PIPE,
110 stderr=subprocess.PIPE)
111 # read from stdout and stderr to avoid blocking due to
112 # buffers filling
113 p.communicate()
114 r = p.wait()
115 else:
116 r = os.system(config.ghc_pkg + ' describe ' + lib
117 + ' > /dev/null 2> /dev/null')
118 got_it = r == 0
119 have_lib[lib] = got_it
120
121 if not got_it:
122 opts.expect = 'missing-lib'
123
124 def req_profiling( name, opts ):
125 if not config.have_profiling:
126 opts.expect = 'fail'
127
128 def req_shared_libs( name, opts ):
129 if not config.have_shared_libs:
130 opts.expect = 'fail'
131
132 def req_interp( name, opts ):
133 if not config.have_interp:
134 opts.expect = 'fail'
135
136 def req_smp( name, opts ):
137 if not config.have_smp:
138 opts.expect = 'fail'
139
140 def ignore_output( name, opts ):
141 opts.ignore_output = 1
142
143 def no_stdin( name, opts ):
144 opts.no_stdin = 1
145
146 def combined_output( name, opts ):
147 opts.combined_output = True
148
149 # -----
150
151 def expect_fail_for( ways ):
152 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
153
154 def _expect_fail_for( name, opts, ways ):
155 opts.expect_fail_for = ways
156
157 def expect_broken( bug ):
158 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
159
160 def _expect_broken( name, opts, bug ):
161 record_broken(name, opts, bug)
162 opts.expect = 'fail';
163
164 def expect_broken_for( bug, ways ):
165 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
166
167 def _expect_broken_for( name, opts, bug, ways ):
168 record_broken(name, opts, bug)
169 opts.expect_fail_for = ways
170
171 def record_broken(name, opts, bug):
172 global brokens
173 me = (bug, opts.testdir, name)
174 if not me in brokens:
175 brokens.append(me)
176
177 # -----
178
179 def omit_ways( ways ):
180 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
181
182 def _omit_ways( name, opts, ways ):
183 opts.omit_ways = ways
184
185 # -----
186
187 def only_ways( ways ):
188 return lambda name, opts, w=ways: _only_ways( name, opts, w )
189
190 def _only_ways( name, opts, ways ):
191 opts.only_ways = ways
192
193 # -----
194
195 def extra_ways( ways ):
196 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
197
198 def _extra_ways( name, opts, ways ):
199 opts.extra_ways = ways
200
201 # -----
202
203 def omit_compiler_types( compiler_types ):
204 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
205
206 def _omit_compiler_types( name, opts, compiler_types ):
207 if config.compiler_type in compiler_types:
208 opts.skip = 1
209
210 # -----
211
212 def only_compiler_types( compiler_types ):
213 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
214
215 def _only_compiler_types( name, opts, compiler_types ):
216 if config.compiler_type not in compiler_types:
217 opts.skip = 1
218
219 # -----
220
221 def set_stdin( file ):
222 return lambda name, opts, f=file: _set_stdin(name, opts, f);
223
224 def _set_stdin( name, opts, f ):
225 opts.stdin = f
226
227 # -----
228
229 def exit_code( val ):
230 return lambda name, opts, v=val: _exit_code(name, opts, v);
231
232 def _exit_code( name, opts, v ):
233 opts.exit_code = v
234
235 # -----
236
237 def timeout_multiplier( val ):
238 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
239
240 def _timeout_multiplier( name, opts, v ):
241 opts.timeout_multiplier = v
242
243 # -----
244
245 def extra_run_opts( val ):
246 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
247
248 def _extra_run_opts( name, opts, v ):
249 opts.extra_run_opts = v
250
251 # -----
252
253 def extra_hc_opts( val ):
254 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
255
256 def _extra_hc_opts( name, opts, v ):
257 opts.extra_hc_opts = v
258
259 # -----
260
261 def extra_clean( files ):
262 return lambda name, opts, v=files: _extra_clean(name, opts, v);
263
264 def _extra_clean( name, opts, v ):
265 opts.clean_files = v
266
267 # -----
268
269 def stats_num_field( field, expecteds ):
270 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
271
272 def _stats_num_field( name, opts, field, expecteds ):
273 if field in opts.stats_range_fields:
274 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
275
276 if type(expecteds) is types.ListType:
277 for (b, expected, dev) in expecteds:
278 if b:
279 opts.stats_range_fields[field] = (expected, dev)
280 return
281 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
282
283 else:
284 (expected, dev) = expecteds
285 opts.stats_range_fields[field] = (expected, dev)
286
287 def compiler_stats_num_field( field, expecteds ):
288 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
289
290 def _compiler_stats_num_field( name, opts, field, expecteds ):
291 if field in opts.compiler_stats_range_fields:
292 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
293
294 # Compiler performance numbers change when debugging is on, making the results
295 # useless and confusing. Therefore, skip if debugging is on.
296 if compiler_debugged():
297 skip(name, opts)
298
299 for (b, expected, dev) in expecteds:
300 if b:
301 opts.compiler_stats_range_fields[field] = (expected, dev)
302 return
303
304 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
305
306 # -----
307
308 def when(b, f):
309 # When list_brokens is on, we want to see all expect_broken calls,
310 # so we always do f
311 if b or config.list_broken:
312 return f
313 else:
314 return normal
315
316 def unless(b, f):
317 return when(not b, f)
318
319 def doing_ghci():
320 return 'ghci' in config.run_ways
321
322 def ghci_dynamic( ):
323 return config.ghc_dynamic
324
325 def fast():
326 return config.fast
327
328 def platform( plat ):
329 return config.platform == plat
330
331 def opsys( os ):
332 return config.os == os
333
334 def arch( arch ):
335 return config.arch == arch
336
337 def wordsize( ws ):
338 return config.wordsize == str(ws)
339
340 def msys( ):
341 return config.msys
342
343 def cygwin( ):
344 return config.cygwin
345
346 def have_vanilla( ):
347 return config.have_vanilla
348
349 def have_dynamic( ):
350 return config.have_dynamic
351
352 def have_profiling( ):
353 return config.have_profiling
354
355 def in_tree_compiler( ):
356 return config.in_tree_compiler
357
358 def compiler_type( compiler ):
359 return config.compiler_type == compiler
360
361 def compiler_lt( compiler, version ):
362 return config.compiler_type == compiler and \
363 version_lt(config.compiler_version, version)
364
365 def compiler_le( compiler, version ):
366 return config.compiler_type == compiler and \
367 version_le(config.compiler_version, version)
368
369 def compiler_gt( compiler, version ):
370 return config.compiler_type == compiler and \
371 version_gt(config.compiler_version, version)
372
373 def compiler_ge( compiler, version ):
374 return config.compiler_type == compiler and \
375 version_ge(config.compiler_version, version)
376
377 def unregisterised( ):
378 return config.unregisterised
379
380 def compiler_profiled( ):
381 return config.compiler_profiled
382
383 def compiler_debugged( ):
384 return config.compiler_debugged
385
386 def tag( t ):
387 return t in config.compiler_tags
388
389 # ---
390
391 def namebase( nb ):
392 return lambda opts, nb=nb: _namebase(opts, nb)
393
394 def _namebase( opts, nb ):
395 opts.with_namebase = nb
396
397 # ---
398
399 def high_memory_usage(name, opts):
400 opts.alone = True
401
402 # If a test is for a multi-CPU race, then running the test alone
403 # increases the chance that we'll actually see it.
404 def multi_cpu_race(name, opts):
405 opts.alone = True
406
407 # ---
408 def literate( name, opts ):
409 opts.literate = 1;
410
411 def c_src( name, opts ):
412 opts.c_src = 1;
413
414 def objc_src( name, opts ):
415 opts.objc_src = 1;
416
417 def objcpp_src( name, opts ):
418 opts.objcpp_src = 1;
419
420 def cmm_src( name, opts ):
421 opts.cmm_src = 1;
422
423 def outputdir( odir ):
424 return lambda name, opts, d=odir: _outputdir(name, opts, d)
425
426 def _outputdir( name, opts, odir ):
427 opts.outputdir = odir;
428
429 # ----
430
431 def pre_cmd( cmd ):
432 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
433
434 def _pre_cmd( name, opts, cmd ):
435 opts.pre_cmd = cmd
436
437 # ----
438
439 def clean_cmd( cmd ):
440 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
441
442 def _clean_cmd( name, opts, cmd ):
443 opts.clean_cmd = cmd
444
445 # ----
446
447 def cmd_prefix( prefix ):
448 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
449
450 def _cmd_prefix( name, opts, prefix ):
451 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
452
453 # ----
454
455 def cmd_wrapper( fun ):
456 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
457
458 def _cmd_wrapper( name, opts, fun ):
459 opts.cmd_wrapper = fun
460
461 # ----
462
463 def compile_cmd_prefix( prefix ):
464 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
465
466 def _compile_cmd_prefix( name, opts, prefix ):
467 opts.compile_cmd_prefix = prefix
468
469 # ----
470
471 def check_stdout( f ):
472 return lambda name, opts, f=f: _check_stdout(name, opts, f)
473
474 def _check_stdout( name, opts, f ):
475 opts.check_stdout = f
476
477 # ----
478
479 def normalise_slashes( name, opts ):
480 opts.extra_normaliser = normalise_slashes_
481
482 def normalise_exe( name, opts ):
483 opts.extra_normaliser = normalise_exe_
484
485 def normalise_fun( fun ):
486 return lambda name, opts, f=fun: _normalise_fun(name, opts, f)
487
488 def _normalise_fun( name, opts, f ):
489 opts.extra_normaliser = f
490
491 def normalise_errmsg_fun( fun ):
492 return lambda name, opts, f=fun: _normalise_errmsg_fun(name, opts, f)
493
494 def _normalise_errmsg_fun( name, opts, f ):
495 opts.extra_errmsg_normaliser = f
496
497 def two_normalisers(f, g):
498 return lambda x, f=f, g=g: f(g(x))
499
500 # ----
501 # Function for composing two opt-fns together
502
503 def executeSetups(fs, name, opts):
504 if type(fs) is types.ListType:
505 # If we have a list of setups, then execute each one
506 map (lambda f : executeSetups(f, name, opts), fs)
507 else:
508 # fs is a single function, so just apply it
509 fs(name, opts)
510
511 # -----------------------------------------------------------------------------
512 # The current directory of tests
513
514 def newTestDir( dir ):
515 global thisdir_settings
516 # reset the options for this test directory
517 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
518
519 def _newTestDir( name, opts, dir ):
520 opts.testdir = dir
521 opts.compiler_always_flags = config.compiler_always_flags
522
523 # -----------------------------------------------------------------------------
524 # Actually doing tests
525
526 parallelTests = []
527 aloneTests = []
528 allTestNames = set([])
529
530 def runTest (opts, name, func, args):
531 ok = 0
532
533 if config.use_threads:
534 t.thread_pool.acquire()
535 try:
536 while config.threads<(t.running_threads+1):
537 t.thread_pool.wait()
538 t.running_threads = t.running_threads+1
539 ok=1
540 t.thread_pool.release()
541 thread.start_new_thread(test_common_thread, (name, opts, func, args))
542 except:
543 if not ok:
544 t.thread_pool.release()
545 else:
546 test_common_work (name, opts, func, args)
547
548 # name :: String
549 # setup :: TestOpts -> IO ()
550 def test (name, setup, func, args):
551 global aloneTests
552 global parallelTests
553 global allTestNames
554 global thisdir_settings
555 if name in allTestNames:
556 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
557 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
558 framework_fail(name, 'bad_name', 'This test has an invalid name')
559
560 # Make a deep copy of the default_testopts, as we need our own copy
561 # of any dictionaries etc inside it. Otherwise, if one test modifies
562 # them, all tests will see the modified version!
563 myTestOpts = copy.deepcopy(default_testopts)
564
565 executeSetups([thisdir_settings, setup], name, myTestOpts)
566
567 thisTest = lambda : runTest(myTestOpts, name, func, args)
568 if myTestOpts.alone:
569 aloneTests.append(thisTest)
570 else:
571 parallelTests.append(thisTest)
572 allTestNames.add(name)
573
574 if config.use_threads:
575 def test_common_thread(name, opts, func, args):
576 t.lock.acquire()
577 try:
578 test_common_work(name,opts,func,args)
579 finally:
580 t.lock.release()
581 t.thread_pool.acquire()
582 t.running_threads = t.running_threads - 1
583 t.thread_pool.notify()
584 t.thread_pool.release()
585
586 def get_package_cache_timestamp():
587 if config.package_conf_cache_file == '':
588 return 0.0
589 else:
590 try:
591 return os.stat(config.package_conf_cache_file).st_mtime
592 except:
593 return 0.0
594
595
596 def test_common_work (name, opts, func, args):
597 try:
598 t.total_tests = t.total_tests+1
599 setLocalTestOpts(opts)
600
601 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
602
603 # All the ways we might run this test
604 if func == compile or func == multimod_compile:
605 all_ways = config.compile_ways
606 elif func == compile_and_run or func == multimod_compile_and_run:
607 all_ways = config.run_ways
608 elif func == ghci_script:
609 if 'ghci' in config.run_ways:
610 all_ways = ['ghci']
611 else:
612 all_ways = []
613 else:
614 all_ways = ['normal']
615
616 # A test itself can request extra ways by setting opts.extra_ways
617 all_ways = all_ways + filter(lambda way: way not in all_ways,
618 opts.extra_ways)
619
620 t.total_test_cases = t.total_test_cases + len(all_ways)
621
622 ok_way = lambda way: \
623 not getTestOpts().skip \
624 and (config.only == [] or name in config.only) \
625 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
626 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
627 and (not (config.skip_perf_tests and isStatsTest())) \
628 and way not in getTestOpts().omit_ways
629
630 # Which ways we are asked to skip
631 do_ways = filter (ok_way,all_ways)
632
633 # In fast mode, we skip all but one way
634 if config.fast and len(do_ways) > 0:
635 do_ways = [do_ways[0]]
636
637 if not config.clean_only:
638 # Run the required tests...
639 for way in do_ways:
640 if stopping():
641 break
642 do_test (name, way, func, args)
643
644 for way in all_ways:
645 if way not in do_ways:
646 skiptest (name,way)
647
648 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
649 pretest_cleanup(name)
650 clean(map (lambda suff: name + suff,
651 ['', '.exe', '.exe.manifest', '.genscript',
652 '.stderr.normalised', '.stdout.normalised',
653 '.run.stderr.normalised', '.run.stdout.normalised',
654 '.comp.stderr.normalised', '.comp.stdout.normalised',
655 '.interp.stderr.normalised', '.interp.stdout.normalised',
656 '.stats', '.comp.stats',
657 '.hi', '.o', '.prof', '.exe.prof', '.hc',
658 '_stub.h', '_stub.c', '_stub.o',
659 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']))
660
661 if func == multi_compile or func == multi_compile_fail:
662 extra_mods = args[1]
663 clean(map (lambda (f,x): replace_suffix(f, 'o'), extra_mods))
664 clean(map (lambda (f,x): replace_suffix(f, 'hi'), extra_mods))
665
666 clean(getTestOpts().clean_files)
667
668 if getTestOpts().outputdir != None:
669 odir = in_testdir(getTestOpts().outputdir)
670 try:
671 shutil.rmtree(odir)
672 except:
673 pass
674
675 try:
676 shutil.rmtree(in_testdir('.hpc.' + name))
677 except:
678 pass
679
680 try:
681 cleanCmd = getTestOpts().clean_cmd
682 if cleanCmd != None:
683 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
684 if result != 0:
685 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
686 except:
687 framework_fail(name, 'cleaning', 'clean-command exception')
688
689 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
690
691 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
692 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
693
694 try:
695 for f in files_written[name]:
696 if os.path.exists(f):
697 try:
698 if not f in files_written_not_removed[name]:
699 files_written_not_removed[name].append(f)
700 except:
701 files_written_not_removed[name] = [f]
702 except:
703 pass
704 except Exception, e:
705 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
706
707 def clean(strs):
708 for str in strs:
709 for name in glob.glob(in_testdir(str)):
710 clean_full_path(name)
711
712 def clean_full_path(name):
713 try:
714 # Remove files...
715 os.remove(name)
716 except OSError, e1:
717 try:
718 # ... and empty directories
719 os.rmdir(name)
720 except OSError, e2:
721 # We don't want to fail here, but we do want to know
722 # what went wrong, so print out the exceptions.
723 # ENOENT isn't a problem, though, as we clean files
724 # that don't necessarily exist.
725 if e1.errno != errno.ENOENT:
726 print e1
727 if e2.errno != errno.ENOENT:
728 print e2
729
730 def do_test(name, way, func, args):
731 full_name = name + '(' + way + ')'
732
733 try:
734 if_verbose(2, "=====> %s %d of %d %s " % \
735 (full_name, t.total_tests, len(allTestNames), \
736 [t.n_unexpected_passes, \
737 t.n_unexpected_failures, \
738 t.n_framework_failures]))
739
740 if config.use_threads:
741 t.lock.release()
742
743 try:
744 preCmd = getTestOpts().pre_cmd
745 if preCmd != None:
746 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
747 if result != 0:
748 framework_fail(name, way, 'pre-command failed: ' + str(result))
749 except:
750 framework_fail(name, way, 'pre-command exception')
751
752 try:
753 result = apply(func, [name,way] + args)
754 finally:
755 if config.use_threads:
756 t.lock.acquire()
757
758 if getTestOpts().expect != 'pass' and \
759 getTestOpts().expect != 'fail' and \
760 getTestOpts().expect != 'missing-lib':
761 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
762
763 try:
764 passFail = result['passFail']
765 except:
766 passFail = 'No passFail found'
767
768 if passFail == 'pass':
769 if getTestOpts().expect == 'pass' \
770 and way not in getTestOpts().expect_fail_for:
771 t.n_expected_passes = t.n_expected_passes + 1
772 if name in t.expected_passes:
773 t.expected_passes[name].append(way)
774 else:
775 t.expected_passes[name] = [way]
776 else:
777 if_verbose(1, '*** unexpected pass for %s' % full_name)
778 t.n_unexpected_passes = t.n_unexpected_passes + 1
779 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
780 elif passFail == 'fail':
781 if getTestOpts().expect == 'pass' \
782 and way not in getTestOpts().expect_fail_for:
783 if_verbose(1, '*** unexpected failure for %s' % full_name)
784 t.n_unexpected_failures = t.n_unexpected_failures + 1
785 reason = result['reason']
786 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
787 else:
788 if getTestOpts().expect == 'missing-lib':
789 t.n_missing_libs = t.n_missing_libs + 1
790 if name in t.missing_libs:
791 t.missing_libs[name].append(way)
792 else:
793 t.missing_libs[name] = [way]
794 else:
795 t.n_expected_failures = t.n_expected_failures + 1
796 if name in t.expected_failures:
797 t.expected_failures[name].append(way)
798 else:
799 t.expected_failures[name] = [way]
800 else:
801 framework_fail(name, way, 'bad result ' + passFail)
802 except KeyboardInterrupt:
803 stopNow()
804 except:
805 framework_fail(name, way, 'do_test exception')
806 traceback.print_exc()
807
808 def addPassingTestInfo (testInfos, directory, name, way):
809 directory = re.sub('^\\.[/\\\\]', '', directory)
810
811 if not directory in testInfos:
812 testInfos[directory] = {}
813
814 if not name in testInfos[directory]:
815 testInfos[directory][name] = []
816
817 testInfos[directory][name].append(way)
818
819 def addFailingTestInfo (testInfos, directory, name, reason, way):
820 directory = re.sub('^\\.[/\\\\]', '', directory)
821
822 if not directory in testInfos:
823 testInfos[directory] = {}
824
825 if not name in testInfos[directory]:
826 testInfos[directory][name] = {}
827
828 if not reason in testInfos[directory][name]:
829 testInfos[directory][name][reason] = []
830
831 testInfos[directory][name][reason].append(way)
832
833 def skiptest (name, way):
834 # print 'Skipping test \"', name, '\"'
835 t.n_tests_skipped = t.n_tests_skipped + 1
836 if name in t.tests_skipped:
837 t.tests_skipped[name].append(way)
838 else:
839 t.tests_skipped[name] = [way]
840
841 def framework_fail( name, way, reason ):
842 full_name = name + '(' + way + ')'
843 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
844 t.n_framework_failures = t.n_framework_failures + 1
845 if name in t.framework_failures:
846 t.framework_failures[name].append(way)
847 else:
848 t.framework_failures[name] = [way]
849
850 def badResult(result):
851 try:
852 if result['passFail'] == 'pass':
853 return False
854 return True
855 except:
856 return True
857
858 def passed():
859 return {'passFail': 'pass'}
860
861 def failBecause(reason):
862 return {'passFail': 'fail', 'reason': reason}
863
864 # -----------------------------------------------------------------------------
865 # Generic command tests
866
867 # A generic command test is expected to run and exit successfully.
868 #
869 # The expected exit code can be changed via exit_code() as normal, and
870 # the expected stdout/stderr are stored in <testname>.stdout and
871 # <testname>.stderr. The output of the command can be ignored
872 # altogether by using run_command_ignore_output instead of
873 # run_command.
874
875 def run_command( name, way, cmd ):
876 return simple_run( name, '', cmd, '' )
877
878 # -----------------------------------------------------------------------------
879 # GHCi tests
880
881 def ghci_script( name, way, script ):
882 # filter out -fforce-recomp from compiler_always_flags, because we're
883 # actually testing the recompilation behaviour in the GHCi tests.
884 flags = filter(lambda f: f != '-fforce-recomp', getTestOpts().compiler_always_flags)
885 flags.append(getTestOpts().extra_hc_opts)
886 if getTestOpts().outputdir != None:
887 flags.extend(["-outputdir", getTestOpts().outputdir])
888
889 # We pass HC and HC_OPTS as environment variables, so that the
890 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
891 cmd = "HC='" + config.compiler + "' " + \
892 "HC_OPTS='" + join(flags,' ') + "' " + \
893 "'" + config.compiler + "'" + \
894 ' --interactive -v0 -ignore-dot-ghci ' + \
895 join(flags,' ')
896
897 getTestOpts().stdin = script
898 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
899
900 # -----------------------------------------------------------------------------
901 # Compile-only tests
902
903 def compile( name, way, extra_hc_opts ):
904 return do_compile( name, way, 0, '', [], extra_hc_opts )
905
906 def compile_fail( name, way, extra_hc_opts ):
907 return do_compile( name, way, 1, '', [], extra_hc_opts )
908
909 def multimod_compile( name, way, top_mod, extra_hc_opts ):
910 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
911
912 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
913 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
914
915 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
916 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
917
918 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
919 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
920
921 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts ):
922 # print 'Compile only, extra args = ', extra_hc_opts
923 pretest_cleanup(name)
924
925 result = extras_build( way, extra_mods, extra_hc_opts )
926 if badResult(result):
927 return result
928 extra_hc_opts = result['hc_opts']
929
930 force = 0
931 if extra_mods:
932 force = 1
933 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force)
934
935 if badResult(result):
936 return result
937
938 # the actual stderr should always match the expected, regardless
939 # of whether we expected the compilation to fail or not (successful
940 # compilations may generate warnings).
941
942 if getTestOpts().with_namebase == None:
943 namebase = name
944 else:
945 namebase = getTestOpts().with_namebase
946
947 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
948 actual_stderr_file = qualify(name, 'comp.stderr')
949
950 if not compare_outputs('stderr', \
951 two_normalisers(two_normalisers(getTestOpts().extra_errmsg_normaliser, normalise_errmsg), normalise_whitespace), \
952 expected_stderr_file, actual_stderr_file):
953 return failBecause('stderr mismatch')
954
955 # no problems found, this test passed
956 return passed()
957
958 def compile_cmp_asm( name, way, extra_hc_opts ):
959 print 'Compile only, extra args = ', extra_hc_opts
960 pretest_cleanup(name)
961 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
962
963 if badResult(result):
964 return result
965
966 # the actual stderr should always match the expected, regardless
967 # of whether we expected the compilation to fail or not (successful
968 # compilations may generate warnings).
969
970 if getTestOpts().with_namebase == None:
971 namebase = name
972 else:
973 namebase = getTestOpts().with_namebase
974
975 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
976 actual_asm_file = qualify(name, 's')
977
978 if not compare_outputs('asm', two_normalisers(normalise_errmsg, normalise_asm), \
979 expected_asm_file, actual_asm_file):
980 return failBecause('asm mismatch')
981
982 # no problems found, this test passed
983 return passed()
984
985 # -----------------------------------------------------------------------------
986 # Compile-and-run tests
987
988 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
989 # print 'Compile and run, extra args = ', extra_hc_opts
990 pretest_cleanup(name)
991
992 result = extras_build( way, extra_mods, extra_hc_opts )
993 if badResult(result):
994 return result
995 extra_hc_opts = result['hc_opts']
996
997 if way == 'ghci': # interpreted...
998 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
999 else: # compiled...
1000 force = 0
1001 if extra_mods:
1002 force = 1
1003
1004 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1005 if badResult(result):
1006 return result
1007
1008 cmd = './' + name;
1009
1010 # we don't check the compiler's stderr for a compile-and-run test
1011 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1012
1013 def compile_and_run( name, way, extra_hc_opts ):
1014 return compile_and_run__( name, way, '', [], extra_hc_opts)
1015
1016 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1017 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1018
1019 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1020 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1021
1022 def stats( name, way, stats_file ):
1023 opts = getTestOpts()
1024 return checkStats(stats_file, opts.stats_range_fields)
1025
1026 # -----------------------------------------------------------------------------
1027 # Check -t stats info
1028
1029 def checkStats(stats_file, range_fields):
1030 result = passed()
1031 if len(range_fields) > 0:
1032 f = open(in_testdir(stats_file))
1033 contents = f.read()
1034 f.close()
1035
1036 for (field, (expected, dev)) in range_fields.items():
1037 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1038 if m == None:
1039 print 'Failed to find field: ', field
1040 result = failBecause('no such stats field')
1041 val = int(m.group(1))
1042
1043 lowerBound = trunc( expected * ((100 - float(dev))/100));
1044 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)));
1045
1046 if val < lowerBound:
1047 print field, 'value is too low:'
1048 print '(If this is because you have improved GHC, please'
1049 print 'update the test so that GHC doesn\'t regress again)'
1050 result = failBecause('stat too good')
1051 if val > upperBound:
1052 print field, 'value is too high:'
1053 result = failBecause('stat not good enough')
1054
1055 if val < lowerBound or val > upperBound or config.verbose >= 4:
1056 valStr = str(val)
1057 valLen = len(valStr)
1058 expectedStr = str(expected)
1059 expectedLen = len(expectedStr)
1060 length = max(map (lambda x : len(str(x)), [expected, lowerBound, upperBound, val]))
1061 def display(descr, val, extra):
1062 print descr, string.rjust(str(val), length), extra
1063 display(' Expected ' + field + ':', expected, '+/-' + str(dev) + '%')
1064 display(' Lower bound ' + field + ':', lowerBound, '')
1065 display(' Upper bound ' + field + ':', upperBound, '')
1066 display(' Actual ' + field + ':', val, '')
1067
1068 return result
1069
1070 # -----------------------------------------------------------------------------
1071 # Build a single-module program
1072
1073 def extras_build( way, extra_mods, extra_hc_opts ):
1074 for modopts in extra_mods:
1075 mod, opts = modopts
1076 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1077 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1078 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1079 if badResult(result):
1080 return result
1081
1082 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1083
1084
1085 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce ):
1086 opts = getTestOpts()
1087 errname = add_suffix(name, 'comp.stderr')
1088 rm_no_fail( qualify(errname, '') )
1089
1090 if top_mod != '':
1091 srcname = top_mod
1092 rm_no_fail( qualify(name, '') )
1093 base, suf = os.path.splitext(top_mod)
1094 rm_no_fail( qualify(base, '') )
1095 rm_no_fail( qualify(base, 'exe') )
1096 elif addsuf:
1097 srcname = add_hs_lhs_suffix(name)
1098 rm_no_fail( qualify(name, '') )
1099 else:
1100 srcname = name
1101 rm_no_fail( qualify(name, 'o') )
1102
1103 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1104
1105 to_do = ''
1106 if top_mod != '':
1107 to_do = '--make '
1108 if link:
1109 to_do = to_do + '-o ' + name
1110 elif link:
1111 to_do = '-o ' + name
1112 elif opts.compile_to_hc:
1113 to_do = '-C'
1114 else:
1115 to_do = '-c' # just compile
1116
1117 stats_file = name + '.comp.stats'
1118 if len(opts.compiler_stats_range_fields) > 0:
1119 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1120
1121 # Required by GHC 7.3+, harmless for earlier versions:
1122 if (getTestOpts().c_src or
1123 getTestOpts().objc_src or
1124 getTestOpts().objcpp_src or
1125 getTestOpts().cmm_src):
1126 extra_hc_opts += ' -no-hs-main '
1127
1128 if getTestOpts().compile_cmd_prefix == '':
1129 cmd_prefix = ''
1130 else:
1131 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1132
1133 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1134 if noforce:
1135 comp_flags = filter(lambda f: f != '-fforce-recomp', comp_flags)
1136 if getTestOpts().outputdir != None:
1137 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1138
1139 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1140 + config.compiler + "' " \
1141 + join(comp_flags,' ') + ' ' \
1142 + to_do + ' ' + srcname + ' ' \
1143 + join(config.way_flags(name)[way],' ') + ' ' \
1144 + extra_hc_opts + ' ' \
1145 + opts.extra_hc_opts + ' ' \
1146 + '>' + errname + ' 2>&1'
1147
1148 result = runCmdFor(name, cmd)
1149
1150 if result != 0 and not should_fail:
1151 actual_stderr = qualify(name, 'comp.stderr')
1152 if_verbose(1,'Compile failed (status ' + `result` + ') errors were:')
1153 if_verbose_dump(1,actual_stderr)
1154
1155 # ToDo: if the sub-shell was killed by ^C, then exit
1156
1157 statsResult = checkStats(stats_file, opts.compiler_stats_range_fields)
1158
1159 if badResult(statsResult):
1160 return statsResult
1161
1162 if should_fail:
1163 if result == 0:
1164 return failBecause('exit code 0')
1165 else:
1166 if result != 0:
1167 return failBecause('exit code non-0')
1168
1169 return passed()
1170
1171 # -----------------------------------------------------------------------------
1172 # Run a program and check its output
1173 #
1174 # If testname.stdin exists, route input from that, else
1175 # from /dev/null. Route output to testname.run.stdout and
1176 # testname.run.stderr. Returns the exit code of the run.
1177
1178 def simple_run( name, way, prog, args ):
1179 opts = getTestOpts()
1180
1181 # figure out what to use for stdin
1182 if opts.stdin != '':
1183 use_stdin = opts.stdin
1184 else:
1185 stdin_file = add_suffix(name, 'stdin')
1186 if os.path.exists(in_testdir(stdin_file)):
1187 use_stdin = stdin_file
1188 else:
1189 use_stdin = '/dev/null'
1190
1191 run_stdout = add_suffix(name,'run.stdout')
1192 run_stderr = add_suffix(name,'run.stderr')
1193
1194 rm_no_fail(qualify(name,'run.stdout'))
1195 rm_no_fail(qualify(name,'run.stderr'))
1196 rm_no_fail(qualify(name, 'hp'))
1197 rm_no_fail(qualify(name,'ps'))
1198 rm_no_fail(qualify(name, 'prof'))
1199
1200 my_rts_flags = rts_flags(way)
1201
1202 stats_file = name + '.stats'
1203 if len(opts.stats_range_fields) > 0:
1204 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1205
1206 if opts.no_stdin:
1207 stdin_comes_from = ''
1208 else:
1209 stdin_comes_from = ' <' + use_stdin
1210
1211 if opts.combined_output:
1212 redirection = ' >' + run_stdout \
1213 + ' 2>&1'
1214 else:
1215 redirection = ' >' + run_stdout \
1216 + ' 2>' + run_stderr
1217
1218 cmd = prog + ' ' + args + ' ' \
1219 + my_rts_flags + ' ' \
1220 + stdin_comes_from \
1221 + redirection
1222
1223 if opts.cmd_wrapper != None:
1224 cmd = opts.cmd_wrapper(cmd);
1225
1226 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1227
1228 # run the command
1229 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1230
1231 exit_code = result >> 8
1232 signal = result & 0xff
1233
1234 # check the exit code
1235 if exit_code != opts.exit_code:
1236 print 'Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')'
1237 dump_stdout(name)
1238 dump_stderr(name)
1239 return failBecause('bad exit code')
1240
1241 check_hp = my_rts_flags.find("-h") != -1
1242 check_prof = my_rts_flags.find("-p") != -1
1243
1244 if not opts.ignore_output:
1245 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1246 bad_stdout = not check_stdout_ok(name)
1247 if bad_stderr:
1248 return failBecause('bad stderr')
1249 if bad_stdout:
1250 return failBecause('bad stdout')
1251 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1252 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1253 return failBecause('bad heap profile')
1254 if check_prof and not check_prof_ok(name):
1255 return failBecause('bad profile')
1256
1257 return checkStats(stats_file, opts.stats_range_fields)
1258
1259 def rts_flags(way):
1260 if (way == ''):
1261 return ''
1262 else:
1263 args = config.way_rts_flags[way]
1264
1265 if args == []:
1266 return ''
1267 else:
1268 return '+RTS ' + join(args,' ') + ' -RTS'
1269
1270 # -----------------------------------------------------------------------------
1271 # Run a program in the interpreter and check its output
1272
1273 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1274 outname = add_suffix(name, 'interp.stdout')
1275 errname = add_suffix(name, 'interp.stderr')
1276 rm_no_fail(outname)
1277 rm_no_fail(errname)
1278 rm_no_fail(name)
1279
1280 if (top_mod == ''):
1281 srcname = add_hs_lhs_suffix(name)
1282 else:
1283 srcname = top_mod
1284
1285 scriptname = add_suffix(name, 'genscript')
1286 qscriptname = in_testdir(scriptname)
1287 rm_no_fail(qscriptname)
1288
1289 delimiter = '===== program output begins here\n'
1290
1291 script = open(qscriptname, 'w')
1292 if not compile_only:
1293 # set the prog name and command-line args to match the compiled
1294 # environment.
1295 script.write(':set prog ' + name + '\n')
1296 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1297 # Add marker lines to the stdout and stderr output files, so we
1298 # can separate GHCi's output from the program's.
1299 script.write(':! echo ' + delimiter)
1300 script.write(':! echo 1>&2 ' + delimiter)
1301 # Set stdout to be line-buffered to match the compiled environment.
1302 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1303 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1304 # in the event of an exception as for the compiled program.
1305 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1306 script.close()
1307
1308 # figure out what to use for stdin
1309 if getTestOpts().stdin != '':
1310 stdin_file = in_testdir(getTestOpts().stdin)
1311 else:
1312 stdin_file = qualify(name, 'stdin')
1313
1314 if os.path.exists(stdin_file):
1315 stdin = open(stdin_file, 'r')
1316 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1317
1318 script.close()
1319
1320 flags = copy.copy(getTestOpts().compiler_always_flags)
1321 if getTestOpts().outputdir != None:
1322 flags.extend(["-outputdir", getTestOpts().outputdir])
1323
1324 cmd = "'" + config.compiler + "' " \
1325 + join(flags,' ') + ' ' \
1326 + srcname + ' ' \
1327 + join(config.way_flags(name)[way],' ') + ' ' \
1328 + extra_hc_opts + ' ' \
1329 + getTestOpts().extra_hc_opts + ' ' \
1330 + '<' + scriptname + ' 1>' + outname + ' 2>' + errname
1331
1332 if getTestOpts().cmd_wrapper != None:
1333 cmd = getTestOpts().cmd_wrapper(cmd);
1334
1335 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1336
1337 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1338
1339 exit_code = result >> 8
1340 signal = result & 0xff
1341
1342 # split the stdout into compilation/program output
1343 split_file(in_testdir(outname), delimiter,
1344 qualify(name, 'comp.stdout'),
1345 qualify(name, 'run.stdout'))
1346 split_file(in_testdir(errname), delimiter,
1347 qualify(name, 'comp.stderr'),
1348 qualify(name, 'run.stderr'))
1349
1350 # check the exit code
1351 if exit_code != getTestOpts().exit_code:
1352 print 'Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')'
1353 dump_stdout(name)
1354 dump_stderr(name)
1355 return failBecause('bad exit code')
1356
1357 # ToDo: if the sub-shell was killed by ^C, then exit
1358
1359 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1360 check_stdout_ok(name)):
1361 return passed()
1362 else:
1363 return failBecause('bad stdout or stderr')
1364
1365
1366 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1367 infile = open(in_fn)
1368 out1 = open(out1_fn, 'w')
1369 out2 = open(out2_fn, 'w')
1370
1371 line = infile.readline()
1372 line = re.sub('\r', '', line) # ignore Windows EOL
1373 while (re.sub('^\s*','',line) != delimiter and line != ''):
1374 out1.write(line)
1375 line = infile.readline()
1376 line = re.sub('\r', '', line)
1377 out1.close()
1378
1379 line = infile.readline()
1380 while (line != ''):
1381 out2.write(line)
1382 line = infile.readline()
1383 out2.close()
1384
1385 # -----------------------------------------------------------------------------
1386 # Utils
1387
1388 def check_stdout_ok( name ):
1389 if getTestOpts().with_namebase == None:
1390 namebase = name
1391 else:
1392 namebase = getTestOpts().with_namebase
1393
1394 actual_stdout_file = qualify(name, 'run.stdout')
1395 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1396
1397 def norm(str):
1398 if platform_specific:
1399 return str
1400 else:
1401 return normalise_output(str)
1402
1403 two_norm = two_normalisers(norm, getTestOpts().extra_normaliser)
1404
1405 check_stdout = getTestOpts().check_stdout
1406 if check_stdout:
1407 return check_stdout(actual_stdout_file, two_norm)
1408
1409 return compare_outputs('stdout', \
1410 two_norm, \
1411 expected_stdout_file, actual_stdout_file)
1412
1413 def dump_stdout( name ):
1414 print 'Stdout:'
1415 print read_no_crs(qualify(name, 'run.stdout'))
1416
1417 def check_stderr_ok( name ):
1418 if getTestOpts().with_namebase == None:
1419 namebase = name
1420 else:
1421 namebase = getTestOpts().with_namebase
1422
1423 actual_stderr_file = qualify(name, 'run.stderr')
1424 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1425
1426 def norm(str):
1427 if platform_specific:
1428 return str
1429 else:
1430 return normalise_errmsg(str)
1431
1432 return compare_outputs('stderr', \
1433 two_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1434 expected_stderr_file, actual_stderr_file)
1435
1436 def dump_stderr( name ):
1437 print "Stderr:"
1438 print read_no_crs(qualify(name, 'run.stderr'))
1439
1440 def read_no_crs(file):
1441 str = ''
1442 try:
1443 h = open(file)
1444 str = h.read()
1445 h.close
1446 except:
1447 # On Windows, if the program fails very early, it seems the
1448 # files stdout/stderr are redirected to may not get created
1449 pass
1450 return re.sub('\r', '', str)
1451
1452 def write_file(file, str):
1453 h = open(file, 'w')
1454 h.write(str)
1455 h.close
1456
1457 def check_hp_ok(name):
1458
1459 # do not qualify for hp2ps because we should be in the right directory
1460 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1461
1462 hp2psResult = runCmdExitCode(hp2psCmd)
1463
1464 actual_ps_file = qualify(name, 'ps')
1465
1466 if(hp2psResult == 0):
1467 if (os.path.exists(actual_ps_file)):
1468 if gs_working:
1469 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1470 if (gsResult == 0):
1471 return (True)
1472 else:
1473 print "hp2ps output for " + name + "is not valid PostScript"
1474 else: return (True) # assume postscript is valid without ghostscript
1475 else:
1476 print "hp2ps did not generate PostScript for " + name
1477 return (False)
1478 else:
1479 print "hp2ps error when processing heap profile for " + name
1480 return(False)
1481
1482 def check_prof_ok(name):
1483
1484 prof_file = qualify(name,'prof')
1485
1486 if not os.path.exists(prof_file):
1487 print prof_file + " does not exist"
1488 return(False)
1489
1490 if os.path.getsize(qualify(name,'prof')) == 0:
1491 print prof_file + " is empty"
1492 return(False)
1493
1494 if getTestOpts().with_namebase == None:
1495 namebase = name
1496 else:
1497 namebase = getTestOpts().with_namebase
1498
1499 (platform_specific, expected_prof_file) = \
1500 platform_wordsize_qualify(namebase, 'prof.sample')
1501
1502 # sample prof file is not required
1503 if not os.path.exists(expected_prof_file):
1504 return True
1505 else:
1506 return compare_outputs('prof', \
1507 two_normalisers(normalise_whitespace,normalise_prof), \
1508 expected_prof_file, prof_file)
1509
1510 # Compare expected output to actual output, and optionally accept the
1511 # new output. Returns true if output matched or was accepted, false
1512 # otherwise.
1513 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1514 if os.path.exists(expected_file):
1515 expected_raw = read_no_crs(expected_file)
1516 # print "norm:", normaliser(expected_raw)
1517 expected_str = normaliser(expected_raw)
1518 expected_file_for_diff = expected_file
1519 else:
1520 expected_str = ''
1521 expected_file_for_diff = '/dev/null'
1522
1523 actual_raw = read_no_crs(actual_file)
1524 actual_str = normaliser(actual_raw)
1525
1526 if expected_str == actual_str:
1527 return 1
1528 else:
1529 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1530
1531 if expected_file_for_diff == '/dev/null':
1532 expected_normalised_file = '/dev/null'
1533 else:
1534 expected_normalised_file = expected_file + ".normalised"
1535 write_file(expected_normalised_file, expected_str)
1536
1537 actual_normalised_file = actual_file + ".normalised"
1538 write_file(actual_normalised_file, actual_str)
1539
1540 # Ignore whitespace when diffing. We should only get to this
1541 # point if there are non-whitespace differences
1542 #
1543 # Note we are diffing the *actual* output, not the normalised
1544 # output. The normalised output may have whitespace squashed
1545 # (including newlines) so the diff would be hard to read.
1546 # This does mean that the diff might contain changes that
1547 # would be normalised away.
1548 if (config.verbose >= 1):
1549 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1550 ' ' + actual_file )
1551
1552 # If for some reason there were no non-whitespace differences,
1553 # then do a full diff
1554 if r == 0:
1555 r = os.system( 'diff -u ' + expected_file_for_diff + \
1556 ' ' + actual_file )
1557
1558 if config.accept:
1559 if_verbose(1, 'Accepting new output.')
1560 write_file(expected_file, actual_raw)
1561 return 1
1562 else:
1563 return 0
1564
1565
1566 def normalise_whitespace( str ):
1567 # Merge contiguous whitespace characters into a single space.
1568 str = re.sub('[ \t\n]+', ' ', str)
1569 return str
1570
1571 def normalise_errmsg( str ):
1572 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1573 # the colon is there because it appears in error messages; this
1574 # hacky solution is used in place of more sophisticated filename
1575 # mangling
1576 str = re.sub('([^\\s])\\.exe', '\\1', str)
1577 # normalise slashes, minimise Windows/Unix filename differences
1578 str = re.sub('\\\\', '/', str)
1579 # The inplace ghc's are called ghc-stage[123] to avoid filename
1580 # collisions, so we need to normalise that to just "ghc"
1581 str = re.sub('ghc-stage[123]', 'ghc', str)
1582 # We sometimes see the name of the integer-gmp package on stderr,
1583 # but this can change (either the implementation name or the
1584 # version number), so we canonicalise it here
1585 str = re.sub('integer-[a-z]+', 'integer-impl', str)
1586 return str
1587
1588 # normalise a .prof file, so that we can reasonably compare it against
1589 # a sample. This doesn't compare any of the actual profiling data,
1590 # only the shape of the profile and the number of entries.
1591 def normalise_prof (str):
1592 # strip everything up to the line beginning "COST CENTRE"
1593 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1594
1595 # strip results for CAFs, these tend to change unpredictably
1596 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1597
1598 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1599 # sometimes under MAIN.
1600 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1601
1602 # We have somthing like this:
1603
1604 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1605 # k Main 204 1 0.0 0.0 0.0 0.0
1606 # foo Main 205 1 0.0 0.0 0.0 0.0
1607 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1608
1609 # then we remove all the specific profiling data, leaving only the
1610 # cost centre name, module, and entries, to end up with this:
1611
1612 # MAIN MAIN 0
1613 # k Main 1
1614 # foo Main 1
1615 # foo.bar Main 1
1616
1617 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1618 return str
1619
1620 def normalise_slashes_( str ):
1621 str = re.sub('\\\\', '/', str)
1622 return str
1623
1624 def normalise_exe_( str ):
1625 str = re.sub('\.exe', '', str)
1626 return str
1627
1628 def normalise_output( str ):
1629 # Remove a .exe extension (for Windows)
1630 # This can occur in error messages generated by the program.
1631 str = re.sub('([^\\s])\\.exe', '\\1', str)
1632 return str
1633
1634 def normalise_asm( str ):
1635 lines = str.split('\n')
1636 # Only keep instructions and labels not starting with a dot.
1637 metadata = re.compile('^[ \t]*\\..*$')
1638 out = []
1639 for line in lines:
1640 # Drop metadata directives (e.g. ".type")
1641 if not metadata.match(line):
1642 line = re.sub('@plt', '', line)
1643 instr = line.lstrip().split()
1644 # Drop empty lines.
1645 if not instr:
1646 continue
1647 # Drop operands, except for call instructions.
1648 elif instr[0] == 'call':
1649 out.append(instr[0] + ' ' + instr[1])
1650 else:
1651 out.append(instr[0])
1652 out = '\n'.join(out)
1653 return out
1654
1655 def if_verbose( n, str ):
1656 if config.verbose >= n:
1657 print str
1658
1659 def if_verbose_dump( n, f ):
1660 if config.verbose >= n:
1661 try:
1662 print open(f).read()
1663 except:
1664 print ''
1665
1666 def rawSystem(cmd_and_args):
1667 # We prefer subprocess.call to os.spawnv as the latter
1668 # seems to send its arguments through a shell or something
1669 # with the Windows (non-cygwin) python. An argument "a b c"
1670 # turns into three arguments ["a", "b", "c"].
1671
1672 # However, subprocess is new in python 2.4, so fall back to
1673 # using spawnv if we don't have it
1674
1675 if have_subprocess:
1676 return subprocess.call(cmd_and_args)
1677 else:
1678 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1679
1680 # Note that this doesn't handle the timeout itself; it is just used for
1681 # commands that have timeout handling built-in.
1682 def rawSystemWithTimeout(cmd_and_args):
1683 r = rawSystem(cmd_and_args)
1684 if r == 98:
1685 # The python timeout program uses 98 to signal that ^C was pressed
1686 stopNow()
1687 return r
1688
1689 # cmd is a complex command in Bourne-shell syntax
1690 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1691 # Hence it must ultimately be run by a Bourne shell
1692 #
1693 # Mostly it invokes the command wrapped in 'timeout' thus
1694 # timeout 300 'cd . && ...blah blah'
1695 # so it's timeout's job to invoke the Bourne shell
1696 #
1697 # But watch out for the case when there is no timeout program!
1698 # Then, when using the native Python, os.system will invoke the cmd shell
1699
1700 def runCmd( cmd ):
1701 if_verbose( 3, cmd )
1702 r = 0
1703 if config.os == 'mingw32':
1704 # On MinGW, we will always have timeout
1705 assert config.timeout_prog!=''
1706
1707 if config.timeout_prog != '':
1708 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1709 else:
1710 r = os.system(cmd)
1711 return r << 8
1712
1713 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1714 if_verbose( 3, cmd )
1715 r = 0
1716 if config.os == 'mingw32':
1717 # On MinGW, we will always have timeout
1718 assert config.timeout_prog!=''
1719 timeout = int(ceil(config.timeout * timeout_multiplier))
1720
1721 if config.timeout_prog != '':
1722 if config.check_files_written:
1723 fn = name + ".strace"
1724 r = rawSystemWithTimeout(
1725 ["strace", "-o", fn, "-fF",
1726 "-e", "creat,open,chdir,clone,vfork",
1727 config.timeout_prog, str(timeout), cmd])
1728 addTestFilesWritten(name, fn)
1729 rm_no_fail(fn)
1730 else:
1731 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1732 else:
1733 r = os.system(cmd)
1734 return r << 8
1735
1736 def runCmdExitCode( cmd ):
1737 return (runCmd(cmd) >> 8);
1738
1739
1740 # -----------------------------------------------------------------------------
1741 # checking for files being written to by multiple tests
1742
1743 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1744 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1745 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1746 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1747 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1748 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1749 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1750 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1751 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1752 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1753 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1754 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1755 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1756 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1757 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1758 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1759 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1760
1761 # Files that are read or written but shouldn't be:
1762 # * ghci_history shouldn't be read or written by tests
1763 # * things under package.conf.d shouldn't be written by tests
1764 bad_file_usages = {}
1765
1766 # Mapping from tests to the list of files that they write
1767 files_written = {}
1768
1769 # Mapping from tests to the list of files that they write but don't clean
1770 files_written_not_removed = {}
1771
1772 def add_bad_file_usage(name, file):
1773 try:
1774 if not file in bad_file_usages[name]:
1775 bad_file_usages[name].append(file)
1776 except:
1777 bad_file_usages[name] = [file]
1778
1779 def mkPath(curdir, path):
1780 # Given the current full directory is 'curdir', what is the full
1781 # path to 'path'?
1782 return os.path.realpath(os.path.join(curdir, path))
1783
1784 def addTestFilesWritten(name, fn):
1785 if config.use_threads:
1786 with t.lockFilesWritten:
1787 addTestFilesWrittenHelper(name, fn)
1788 else:
1789 addTestFilesWrittenHelper(name, fn)
1790
1791 def addTestFilesWrittenHelper(name, fn):
1792 started = False
1793 working_directories = {}
1794
1795 with open(fn, 'r') as f:
1796 for line in f:
1797 m_pid = re_strace_pid.match(line)
1798 if m_pid:
1799 pid = m_pid.group(1)
1800 content = m_pid.group(2)
1801 elif re_strace_unavailable.match(line):
1802 next
1803 else:
1804 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1805
1806 m_open = re_strace_open.match(content)
1807 m_chdir = re_strace_chdir.match(content)
1808 m_clone = re_strace_clone.match(content)
1809 m_vfork = re_strace_vfork.match(content)
1810
1811 if not started:
1812 working_directories[pid] = os.getcwd()
1813 started = True
1814
1815 if m_open:
1816 file = m_open.group(1)
1817 file = mkPath(working_directories[pid], file)
1818 if file.endswith("ghci_history"):
1819 add_bad_file_usage(name, file)
1820 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1821 flags = m_open.group(2).split('|')
1822 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1823 if re.match('package\.conf\.d', file):
1824 add_bad_file_usage(name, file)
1825 else:
1826 try:
1827 if not file in files_written[name]:
1828 files_written[name].append(file)
1829 except:
1830 files_written[name] = [file]
1831 elif 'O_RDONLY' in flags:
1832 pass
1833 else:
1834 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1835 elif m_chdir:
1836 # We optimistically assume that unfinished chdir's are going to succeed
1837 dir = m_chdir.group(1)
1838 working_directories[pid] = mkPath(working_directories[pid], dir)
1839 elif m_clone:
1840 working_directories[m_clone.group(2)] = working_directories[pid]
1841 elif m_vfork:
1842 working_directories[m_vfork.group(2)] = working_directories[pid]
1843 elif re_strace_open_resumed.match(content):
1844 pass
1845 elif re_strace_chdir_resumed.match(content):
1846 pass
1847 elif re_strace_vfork_unfinished.match(content):
1848 pass
1849 elif re_strace_clone_unfinished.match(content):
1850 pass
1851 elif re_strace_ignore_sigchild.match(content):
1852 pass
1853 elif re_strace_ignore_sigvtalarm.match(content):
1854 pass
1855 elif re_strace_ignore_sigint.match(content):
1856 pass
1857 elif re_strace_ignore_sigfpe.match(content):
1858 pass
1859 elif re_strace_ignore_sigsegv.match(content):
1860 pass
1861 elif re_strace_ignore_sigpipe.match(content):
1862 pass
1863 else:
1864 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1865
1866 def checkForFilesWrittenProblems(file):
1867 foundProblem = False
1868
1869 files_written_inverted = {}
1870 for t in files_written.keys():
1871 for f in files_written[t]:
1872 try:
1873 files_written_inverted[f].append(t)
1874 except:
1875 files_written_inverted[f] = [t]
1876
1877 for f in files_written_inverted.keys():
1878 if len(files_written_inverted[f]) > 1:
1879 if not foundProblem:
1880 foundProblem = True
1881 file.write("\n")
1882 file.write("\nSome files are written by multiple tests:\n")
1883 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
1884 if foundProblem:
1885 file.write("\n")
1886
1887 # -----
1888
1889 if len(files_written_not_removed) > 0:
1890 file.write("\n")
1891 file.write("\nSome files written but not removed:\n")
1892 tests = files_written_not_removed.keys()
1893 tests.sort()
1894 for t in tests:
1895 for f in files_written_not_removed[t]:
1896 file.write(" " + t + ": " + f + "\n")
1897 file.write("\n")
1898
1899 # -----
1900
1901 if len(bad_file_usages) > 0:
1902 file.write("\n")
1903 file.write("\nSome bad file usages:\n")
1904 tests = bad_file_usages.keys()
1905 tests.sort()
1906 for t in tests:
1907 for f in bad_file_usages[t]:
1908 file.write(" " + t + ": " + f + "\n")
1909 file.write("\n")
1910
1911 # -----------------------------------------------------------------------------
1912 # checking if ghostscript is available for checking the output of hp2ps
1913
1914 def genGSCmd(psfile):
1915 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
1916
1917 def gsNotWorking():
1918 global gs_working
1919 print "GhostScript not available for hp2ps tests"
1920
1921 global gs_working
1922 gs_working = 0
1923 if config.have_profiling:
1924 if config.gs != '':
1925 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
1926 if resultGood == 0:
1927 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
1928 if resultBad != 0:
1929 print "GhostScript available for hp2ps tests"
1930 gs_working = 1;
1931 else:
1932 gsNotWorking();
1933 else:
1934 gsNotWorking();
1935 else:
1936 gsNotWorking();
1937
1938 def rm_no_fail( file ):
1939 try:
1940 os.remove( file )
1941 finally:
1942 return
1943
1944 def add_suffix( name, suffix ):
1945 if suffix == '':
1946 return name
1947 else:
1948 return name + '.' + suffix
1949
1950 def add_hs_lhs_suffix(name):
1951 if getTestOpts().c_src:
1952 return add_suffix(name, 'c')
1953 elif getTestOpts().cmm_src:
1954 return add_suffix(name, 'cmm')
1955 elif getTestOpts().objc_src:
1956 return add_suffix(name, 'm')
1957 elif getTestOpts().objcpp_src:
1958 return add_suffix(name, 'mm')
1959 elif getTestOpts().literate:
1960 return add_suffix(name, 'lhs')
1961 else:
1962 return add_suffix(name, 'hs')
1963
1964 def replace_suffix( name, suffix ):
1965 base, suf = os.path.splitext(name)
1966 return base + '.' + suffix
1967
1968 def in_testdir( name ):
1969 return (getTestOpts().testdir + '/' + name)
1970
1971 def qualify( name, suff ):
1972 return in_testdir(add_suffix(name, suff))
1973
1974
1975 # Finding the sample output. The filename is of the form
1976 #
1977 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
1978 #
1979 # and we pick the most specific version available. The <version> is
1980 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
1981 # more fine-grained control use if_compiler_lt().
1982 #
1983 def platform_wordsize_qualify( name, suff ):
1984
1985 basepath = qualify(name, suff)
1986
1987 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
1988 for (platformSpecific, plat) in [(1, '-' + config.platform),
1989 (1, '-' + config.os),
1990 (0, '')]
1991 for ws in ['-ws-' + config.wordsize, '']
1992 for comp in ['-' + config.compiler_type, '']
1993 for vers in ['-' + config.compiler_maj_version, '']]
1994
1995 dir = glob.glob(basepath + '*')
1996 dir = map (lambda d: normalise_slashes_(d), dir)
1997
1998 for (platformSpecific, f) in paths:
1999 if f in dir:
2000 return (platformSpecific,f)
2001
2002 return (0, basepath)
2003
2004 # Clean up prior to the test, so that we can't spuriously conclude
2005 # that it passed on the basis of old run outputs.
2006 def pretest_cleanup(name):
2007 if getTestOpts().outputdir != None:
2008 odir = in_testdir(getTestOpts().outputdir)
2009 try:
2010 shutil.rmtree(odir)
2011 except:
2012 pass
2013 os.mkdir(odir)
2014
2015 rm_no_fail(qualify(name,'interp.stderr'))
2016 rm_no_fail(qualify(name,'interp.stdout'))
2017 rm_no_fail(qualify(name,'comp.stderr'))
2018 rm_no_fail(qualify(name,'comp.stdout'))
2019 rm_no_fail(qualify(name,'run.stderr'))
2020 rm_no_fail(qualify(name,'run.stdout'))
2021 rm_no_fail(qualify(name,'tix'))
2022 rm_no_fail(qualify(name,'exe.tix'))
2023 # simple_build zaps the following:
2024 # rm_nofail(qualify("o"))
2025 # rm_nofail(qualify(""))
2026 # not interested in the return code
2027
2028 # -----------------------------------------------------------------------------
2029 # Return a list of all the files ending in '.T' below the directory dir.
2030
2031 def findTFiles(roots):
2032 return concat(map(findTFiles_,roots))
2033
2034 def findTFiles_(path):
2035 if os.path.isdir(path):
2036 paths = map(lambda x, p=path: p + '/' + x, os.listdir(path))
2037 return findTFiles(paths)
2038 elif path[-2:] == '.T':
2039 return [path]
2040 else:
2041 return []
2042
2043 # -----------------------------------------------------------------------------
2044 # Output a test summary to the specified file object
2045
2046 def summary(t, file):
2047
2048 file.write('\n')
2049 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures])
2050 file.write('OVERALL SUMMARY for test run started at '
2051 + time.strftime("%c %Z", t.start_time) + '\n'
2052 + string.rjust(str(datetime.timedelta(seconds=
2053 round(time.time() - time.mktime(t.start_time)))), 8)
2054 + ' spent to go through\n'
2055 + string.rjust(`t.total_tests`, 8)
2056 + ' total tests, which gave rise to\n'
2057 + string.rjust(`t.total_test_cases`, 8)
2058 + ' test cases, of which\n'
2059 + string.rjust(`t.n_tests_skipped`, 8)
2060 + ' were skipped\n'
2061 + '\n'
2062 + string.rjust(`t.n_missing_libs`, 8)
2063 + ' had missing libraries\n'
2064 + string.rjust(`t.n_expected_passes`, 8)
2065 + ' expected passes\n'
2066 + string.rjust(`t.n_expected_failures`, 8)
2067 + ' expected failures\n'
2068 + '\n'
2069 + string.rjust(`t.n_framework_failures`, 8)
2070 + ' caused framework failures\n'
2071 + string.rjust(`t.n_unexpected_passes`, 8)
2072 + ' unexpected passes\n'
2073 + string.rjust(`t.n_unexpected_failures`, 8)
2074 + ' unexpected failures\n'
2075 + '\n')
2076
2077 if t.n_unexpected_passes > 0:
2078 file.write('Unexpected passes:\n')
2079 printPassingTestInfosSummary(file, t.unexpected_passes)
2080
2081 if t.n_unexpected_failures > 0:
2082 file.write('Unexpected failures:\n')
2083 printFailingTestInfosSummary(file, t.unexpected_failures)
2084
2085 if config.check_files_written:
2086 checkForFilesWrittenProblems(file)
2087
2088 if stopping():
2089 file.write('WARNING: Testsuite run was terminated early\n')
2090
2091 def printUnexpectedTests(file, testInfoss):
2092 unexpected = []
2093 for testInfos in testInfoss:
2094 directories = testInfos.keys()
2095 for directory in directories:
2096 tests = testInfos[directory].keys()
2097 unexpected += tests
2098 if unexpected != []:
2099 file.write('Unexpected results from:\n')
2100 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2101 file.write('\n')
2102
2103 def printPassingTestInfosSummary(file, testInfos):
2104 directories = testInfos.keys()
2105 directories.sort()
2106 maxDirLen = max(map ((lambda x : len(x)), directories))
2107 for directory in directories:
2108 tests = testInfos[directory].keys()
2109 tests.sort()
2110 for test in tests:
2111 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2112 ' (' + join(testInfos[directory][test],',') + ')\n')
2113 file.write('\n')
2114
2115 def printFailingTestInfosSummary(file, testInfos):
2116 directories = testInfos.keys()
2117 directories.sort()
2118 maxDirLen = max(map ((lambda x : len(x)), directories))
2119 for directory in directories:
2120 tests = testInfos[directory].keys()
2121 tests.sort()
2122 for test in tests:
2123 reasons = testInfos[directory][test].keys()
2124 for reason in reasons:
2125 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2126 ' [' + reason + ']' + \
2127 ' (' + join(testInfos[directory][test][reason],',') + ')\n')
2128 file.write('\n')
2129
2130 def getStdout(cmd):
2131 if have_subprocess:
2132 p = subprocess.Popen(cmd,
2133 stdout=subprocess.PIPE,
2134 stderr=subprocess.PIPE)
2135 (stdout, stderr) = p.communicate()
2136 r = p.wait()
2137 if r != 0:
2138 raise Exception("Command failed: " + str(cmd))
2139 if stderr != '':
2140 raise Exception("stderr from command: " + str(cmd))
2141 return stdout
2142 else:
2143 raise Exception("Need subprocess to get stdout, but don't have it")