Add n-ary version of `two_normalisers` to testsuite lib
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19
20 have_subprocess = False
21 try:
22 import subprocess
23 have_subprocess = True
24 except:
25 print("Warning: subprocess not found, will fall back to spawnv")
26
27 from testglobals import *
28 from testutil import *
29
30 if config.use_threads:
31 import threading
32 try:
33 import thread
34 except ImportError: # Python 3
35 import _thread as thread
36
37 global wantToStop
38 wantToStop = False
39 def stopNow():
40 global wantToStop
41 wantToStop = True
42 def stopping():
43 return wantToStop
44
45 # Options valid for the current test only (these get reset to
46 # testdir_testopts after each test).
47
48 global testopts_local
49 if config.use_threads:
50 testopts_local = threading.local()
51 else:
52 class TestOpts_Local:
53 pass
54 testopts_local = TestOpts_Local()
55
56 def getTestOpts():
57 return testopts_local.x
58
59 def setLocalTestOpts(opts):
60 global testopts_local
61 testopts_local.x=opts
62
63 def isStatsTest():
64 opts = getTestOpts()
65 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
66
67
68 # This can be called at the top of a file of tests, to set default test options
69 # for the following tests.
70 def setTestOpts( f ):
71 global thisdir_settings
72 thisdir_settings = [thisdir_settings, f]
73
74 # -----------------------------------------------------------------------------
75 # Canned setup functions for common cases. eg. for a test you might say
76 #
77 # test('test001', normal, compile, [''])
78 #
79 # to run it without any options, but change it to
80 #
81 # test('test001', expect_fail, compile, [''])
82 #
83 # to expect failure for this test.
84
85 def normal( name, opts ):
86 return;
87
88 def skip( name, opts ):
89 opts.skip = 1
90
91 def expect_fail( name, opts ):
92 opts.expect = 'fail';
93
94 def reqlib( lib ):
95 return lambda name, opts, l=lib: _reqlib (name, opts, l )
96
97 # Cache the results of looking to see if we have a library or not.
98 # This makes quite a difference, especially on Windows.
99 have_lib = {}
100
101 def _reqlib( name, opts, lib ):
102 if lib in have_lib:
103 got_it = have_lib[lib]
104 else:
105 if have_subprocess:
106 # By preference we use subprocess, as the alternative uses
107 # /dev/null which mingw doesn't have.
108 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
109 stdout=subprocess.PIPE,
110 stderr=subprocess.PIPE)
111 # read from stdout and stderr to avoid blocking due to
112 # buffers filling
113 p.communicate()
114 r = p.wait()
115 else:
116 r = os.system(config.ghc_pkg + ' describe ' + lib
117 + ' > /dev/null 2> /dev/null')
118 got_it = r == 0
119 have_lib[lib] = got_it
120
121 if not got_it:
122 opts.expect = 'missing-lib'
123
124 def req_profiling( name, opts ):
125 if not config.have_profiling:
126 opts.expect = 'fail'
127
128 def req_shared_libs( name, opts ):
129 if not config.have_shared_libs:
130 opts.expect = 'fail'
131
132 def req_interp( name, opts ):
133 if not config.have_interp:
134 opts.expect = 'fail'
135
136 def req_smp( name, opts ):
137 if not config.have_smp:
138 opts.expect = 'fail'
139
140 def ignore_output( name, opts ):
141 opts.ignore_output = 1
142
143 def no_stdin( name, opts ):
144 opts.no_stdin = 1
145
146 def combined_output( name, opts ):
147 opts.combined_output = True
148
149 # -----
150
151 def expect_fail_for( ways ):
152 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
153
154 def _expect_fail_for( name, opts, ways ):
155 opts.expect_fail_for = ways
156
157 def expect_broken( bug ):
158 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
159
160 def _expect_broken( name, opts, bug ):
161 record_broken(name, opts, bug)
162 opts.expect = 'fail';
163
164 def expect_broken_for( bug, ways ):
165 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
166
167 def _expect_broken_for( name, opts, bug, ways ):
168 record_broken(name, opts, bug)
169 opts.expect_fail_for = ways
170
171 def record_broken(name, opts, bug):
172 global brokens
173 me = (bug, opts.testdir, name)
174 if not me in brokens:
175 brokens.append(me)
176
177 # -----
178
179 def omit_ways( ways ):
180 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
181
182 def _omit_ways( name, opts, ways ):
183 opts.omit_ways = ways
184
185 # -----
186
187 def only_ways( ways ):
188 return lambda name, opts, w=ways: _only_ways( name, opts, w )
189
190 def _only_ways( name, opts, ways ):
191 opts.only_ways = ways
192
193 # -----
194
195 def extra_ways( ways ):
196 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
197
198 def _extra_ways( name, opts, ways ):
199 opts.extra_ways = ways
200
201 # -----
202
203 def omit_compiler_types( compiler_types ):
204 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
205
206 def _omit_compiler_types( name, opts, compiler_types ):
207 if config.compiler_type in compiler_types:
208 opts.skip = 1
209
210 # -----
211
212 def only_compiler_types( compiler_types ):
213 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
214
215 def _only_compiler_types( name, opts, compiler_types ):
216 if config.compiler_type not in compiler_types:
217 opts.skip = 1
218
219 # -----
220
221 def set_stdin( file ):
222 return lambda name, opts, f=file: _set_stdin(name, opts, f);
223
224 def _set_stdin( name, opts, f ):
225 opts.stdin = f
226
227 # -----
228
229 def exit_code( val ):
230 return lambda name, opts, v=val: _exit_code(name, opts, v);
231
232 def _exit_code( name, opts, v ):
233 opts.exit_code = v
234
235 def signal_exit_code( val ):
236 if opsys('solaris2'):
237 return exit_code( val );
238 else:
239 # When application running on Linux receives fatal error
240 # signal, then its exit code is encoded as 128 + signal
241 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
242 # I assume that Mac OS X behaves in the same way at least Mac
243 # OS X builder behavior suggests this.
244 return exit_code( val+128 );
245
246 # -----
247
248 def timeout_multiplier( val ):
249 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
250
251 def _timeout_multiplier( name, opts, v ):
252 opts.timeout_multiplier = v
253
254 # -----
255
256 def extra_run_opts( val ):
257 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
258
259 def _extra_run_opts( name, opts, v ):
260 opts.extra_run_opts = v
261
262 # -----
263
264 def extra_hc_opts( val ):
265 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
266
267 def _extra_hc_opts( name, opts, v ):
268 opts.extra_hc_opts = v
269
270 # -----
271
272 def extra_clean( files ):
273 return lambda name, opts, v=files: _extra_clean(name, opts, v);
274
275 def _extra_clean( name, opts, v ):
276 opts.clean_files = v
277
278 # -----
279
280 def stats_num_field( field, expecteds ):
281 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
282
283 def _stats_num_field( name, opts, field, expecteds ):
284 if field in opts.stats_range_fields:
285 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
286
287 if type(expecteds) is list:
288 for (b, expected, dev) in expecteds:
289 if b:
290 opts.stats_range_fields[field] = (expected, dev)
291 return
292 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
293
294 else:
295 (expected, dev) = expecteds
296 opts.stats_range_fields[field] = (expected, dev)
297
298 def compiler_stats_num_field( field, expecteds ):
299 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
300
301 def _compiler_stats_num_field( name, opts, field, expecteds ):
302 if field in opts.compiler_stats_range_fields:
303 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
304
305 # Compiler performance numbers change when debugging is on, making the results
306 # useless and confusing. Therefore, skip if debugging is on.
307 if compiler_debugged():
308 skip(name, opts)
309
310 for (b, expected, dev) in expecteds:
311 if b:
312 opts.compiler_stats_range_fields[field] = (expected, dev)
313 return
314
315 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
316
317 # -----
318
319 def when(b, f):
320 # When list_brokens is on, we want to see all expect_broken calls,
321 # so we always do f
322 if b or config.list_broken:
323 return f
324 else:
325 return normal
326
327 def unless(b, f):
328 return when(not b, f)
329
330 def doing_ghci():
331 return 'ghci' in config.run_ways
332
333 def ghci_dynamic( ):
334 return config.ghc_dynamic
335
336 def fast():
337 return config.fast
338
339 def platform( plat ):
340 return config.platform == plat
341
342 def opsys( os ):
343 return config.os == os
344
345 def arch( arch ):
346 return config.arch == arch
347
348 def wordsize( ws ):
349 return config.wordsize == str(ws)
350
351 def msys( ):
352 return config.msys
353
354 def cygwin( ):
355 return config.cygwin
356
357 def have_vanilla( ):
358 return config.have_vanilla
359
360 def have_dynamic( ):
361 return config.have_dynamic
362
363 def have_profiling( ):
364 return config.have_profiling
365
366 def in_tree_compiler( ):
367 return config.in_tree_compiler
368
369 def compiler_type( compiler ):
370 return config.compiler_type == compiler
371
372 def compiler_lt( compiler, version ):
373 return config.compiler_type == compiler and \
374 version_lt(config.compiler_version, version)
375
376 def compiler_le( compiler, version ):
377 return config.compiler_type == compiler and \
378 version_le(config.compiler_version, version)
379
380 def compiler_gt( compiler, version ):
381 return config.compiler_type == compiler and \
382 version_gt(config.compiler_version, version)
383
384 def compiler_ge( compiler, version ):
385 return config.compiler_type == compiler and \
386 version_ge(config.compiler_version, version)
387
388 def unregisterised( ):
389 return config.unregisterised
390
391 def compiler_profiled( ):
392 return config.compiler_profiled
393
394 def compiler_debugged( ):
395 return config.compiler_debugged
396
397 def tag( t ):
398 return t in config.compiler_tags
399
400 # ---
401
402 def namebase( nb ):
403 return lambda opts, nb=nb: _namebase(opts, nb)
404
405 def _namebase( opts, nb ):
406 opts.with_namebase = nb
407
408 # ---
409
410 def high_memory_usage(name, opts):
411 opts.alone = True
412
413 # If a test is for a multi-CPU race, then running the test alone
414 # increases the chance that we'll actually see it.
415 def multi_cpu_race(name, opts):
416 opts.alone = True
417
418 # ---
419 def literate( name, opts ):
420 opts.literate = 1;
421
422 def c_src( name, opts ):
423 opts.c_src = 1;
424
425 def objc_src( name, opts ):
426 opts.objc_src = 1;
427
428 def objcpp_src( name, opts ):
429 opts.objcpp_src = 1;
430
431 def cmm_src( name, opts ):
432 opts.cmm_src = 1;
433
434 def outputdir( odir ):
435 return lambda name, opts, d=odir: _outputdir(name, opts, d)
436
437 def _outputdir( name, opts, odir ):
438 opts.outputdir = odir;
439
440 # ----
441
442 def pre_cmd( cmd ):
443 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
444
445 def _pre_cmd( name, opts, cmd ):
446 opts.pre_cmd = cmd
447
448 # ----
449
450 def clean_cmd( cmd ):
451 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
452
453 def _clean_cmd( name, opts, cmd ):
454 opts.clean_cmd = cmd
455
456 # ----
457
458 def cmd_prefix( prefix ):
459 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
460
461 def _cmd_prefix( name, opts, prefix ):
462 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
463
464 # ----
465
466 def cmd_wrapper( fun ):
467 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
468
469 def _cmd_wrapper( name, opts, fun ):
470 opts.cmd_wrapper = fun
471
472 # ----
473
474 def compile_cmd_prefix( prefix ):
475 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
476
477 def _compile_cmd_prefix( name, opts, prefix ):
478 opts.compile_cmd_prefix = prefix
479
480 # ----
481
482 def check_stdout( f ):
483 return lambda name, opts, f=f: _check_stdout(name, opts, f)
484
485 def _check_stdout( name, opts, f ):
486 opts.check_stdout = f
487
488 # ----
489
490 def normalise_slashes( name, opts ):
491 opts.extra_normaliser = normalise_slashes_
492
493 def normalise_exe( name, opts ):
494 opts.extra_normaliser = normalise_exe_
495
496 def normalise_fun( fun ):
497 return lambda name, opts, f=fun: _normalise_fun(name, opts, f)
498
499 def _normalise_fun( name, opts, f ):
500 opts.extra_normaliser = f
501
502 def normalise_errmsg_fun( fun ):
503 return lambda name, opts, f=fun: _normalise_errmsg_fun(name, opts, f)
504
505 def _normalise_errmsg_fun( name, opts, f ):
506 opts.extra_errmsg_normaliser = f
507
508 def two_normalisers(f, g):
509 """
510 See also `join_normalisers` for a n-ary version of `two_normalisers`
511 """
512 return lambda x, f=f, g=g: f(g(x))
513
514 def join_normalisers(*a):
515 """
516 Compose functions, e.g.
517
518 join_normalisers(f1,f2,f3)
519
520 is the same as
521
522 lambda x: f1(f2(f3(x)))
523 """
524
525 assert all(callable(f) for f in a)
526
527 fn = lambda x:x # identity function
528 for f in a:
529 fn = lambda x,f=f,fn=fn: fn(f(x))
530 return fn
531
532 # ----
533 # Function for composing two opt-fns together
534
535 def executeSetups(fs, name, opts):
536 if type(fs) is list:
537 # If we have a list of setups, then execute each one
538 for f in fs:
539 executeSetups(f, name, opts)
540 else:
541 # fs is a single function, so just apply it
542 fs(name, opts)
543
544 # -----------------------------------------------------------------------------
545 # The current directory of tests
546
547 def newTestDir( dir ):
548 global thisdir_settings
549 # reset the options for this test directory
550 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
551
552 def _newTestDir( name, opts, dir ):
553 opts.testdir = dir
554 opts.compiler_always_flags = config.compiler_always_flags
555
556 # -----------------------------------------------------------------------------
557 # Actually doing tests
558
559 parallelTests = []
560 aloneTests = []
561 allTestNames = set([])
562
563 def runTest (opts, name, func, args):
564 ok = 0
565
566 if config.use_threads:
567 t.thread_pool.acquire()
568 try:
569 while config.threads<(t.running_threads+1):
570 t.thread_pool.wait()
571 t.running_threads = t.running_threads+1
572 ok=1
573 t.thread_pool.release()
574 thread.start_new_thread(test_common_thread, (name, opts, func, args))
575 except:
576 if not ok:
577 t.thread_pool.release()
578 else:
579 test_common_work (name, opts, func, args)
580
581 # name :: String
582 # setup :: TestOpts -> IO ()
583 def test (name, setup, func, args):
584 global aloneTests
585 global parallelTests
586 global allTestNames
587 global thisdir_settings
588 if name in allTestNames:
589 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
590 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
591 framework_fail(name, 'bad_name', 'This test has an invalid name')
592
593 # Make a deep copy of the default_testopts, as we need our own copy
594 # of any dictionaries etc inside it. Otherwise, if one test modifies
595 # them, all tests will see the modified version!
596 myTestOpts = copy.deepcopy(default_testopts)
597
598 executeSetups([thisdir_settings, setup], name, myTestOpts)
599
600 thisTest = lambda : runTest(myTestOpts, name, func, args)
601 if myTestOpts.alone:
602 aloneTests.append(thisTest)
603 else:
604 parallelTests.append(thisTest)
605 allTestNames.add(name)
606
607 if config.use_threads:
608 def test_common_thread(name, opts, func, args):
609 t.lock.acquire()
610 try:
611 test_common_work(name,opts,func,args)
612 finally:
613 t.lock.release()
614 t.thread_pool.acquire()
615 t.running_threads = t.running_threads - 1
616 t.thread_pool.notify()
617 t.thread_pool.release()
618
619 def get_package_cache_timestamp():
620 if config.package_conf_cache_file == '':
621 return 0.0
622 else:
623 try:
624 return os.stat(config.package_conf_cache_file).st_mtime
625 except:
626 return 0.0
627
628
629 def test_common_work (name, opts, func, args):
630 try:
631 t.total_tests = t.total_tests+1
632 setLocalTestOpts(opts)
633
634 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
635
636 # All the ways we might run this test
637 if func == compile or func == multimod_compile:
638 all_ways = config.compile_ways
639 elif func == compile_and_run or func == multimod_compile_and_run:
640 all_ways = config.run_ways
641 elif func == ghci_script:
642 if 'ghci' in config.run_ways:
643 all_ways = ['ghci']
644 else:
645 all_ways = []
646 else:
647 all_ways = ['normal']
648
649 # A test itself can request extra ways by setting opts.extra_ways
650 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
651
652 t.total_test_cases = t.total_test_cases + len(all_ways)
653
654 ok_way = lambda way: \
655 not getTestOpts().skip \
656 and (config.only == [] or name in config.only) \
657 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
658 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
659 and (not (config.skip_perf_tests and isStatsTest())) \
660 and way not in getTestOpts().omit_ways
661
662 # Which ways we are asked to skip
663 do_ways = list(filter (ok_way,all_ways))
664
665 # In fast mode, we skip all but one way
666 if config.fast and len(do_ways) > 0:
667 do_ways = [do_ways[0]]
668
669 if not config.clean_only:
670 # Run the required tests...
671 for way in do_ways:
672 if stopping():
673 break
674 do_test (name, way, func, args)
675
676 for way in all_ways:
677 if way not in do_ways:
678 skiptest (name,way)
679
680 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
681 pretest_cleanup(name)
682 clean([name + suff for suff in [
683 '', '.exe', '.exe.manifest', '.genscript',
684 '.stderr.normalised', '.stdout.normalised',
685 '.run.stderr.normalised', '.run.stdout.normalised',
686 '.comp.stderr.normalised', '.comp.stdout.normalised',
687 '.interp.stderr.normalised', '.interp.stdout.normalised',
688 '.stats', '.comp.stats',
689 '.hi', '.o', '.prof', '.exe.prof', '.hc',
690 '_stub.h', '_stub.c', '_stub.o',
691 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
692
693 if func == multi_compile or func == multi_compile_fail:
694 extra_mods = args[1]
695 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
696 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
697
698
699 clean(getTestOpts().clean_files)
700
701 if getTestOpts().outputdir != None:
702 odir = in_testdir(getTestOpts().outputdir)
703 try:
704 shutil.rmtree(odir)
705 except:
706 pass
707
708 try:
709 shutil.rmtree(in_testdir('.hpc.' + name))
710 except:
711 pass
712
713 try:
714 cleanCmd = getTestOpts().clean_cmd
715 if cleanCmd != None:
716 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
717 if result != 0:
718 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
719 except:
720 framework_fail(name, 'cleaning', 'clean-command exception')
721
722 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
723
724 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
725 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
726
727 try:
728 for f in files_written[name]:
729 if os.path.exists(f):
730 try:
731 if not f in files_written_not_removed[name]:
732 files_written_not_removed[name].append(f)
733 except:
734 files_written_not_removed[name] = [f]
735 except:
736 pass
737 except Exception as e:
738 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
739
740 def clean(strs):
741 for str in strs:
742 for name in glob.glob(in_testdir(str)):
743 clean_full_path(name)
744
745 def clean_full_path(name):
746 try:
747 # Remove files...
748 os.remove(name)
749 except OSError as e1:
750 try:
751 # ... and empty directories
752 os.rmdir(name)
753 except OSError as e2:
754 # We don't want to fail here, but we do want to know
755 # what went wrong, so print out the exceptions.
756 # ENOENT isn't a problem, though, as we clean files
757 # that don't necessarily exist.
758 if e1.errno != errno.ENOENT:
759 print(e1)
760 if e2.errno != errno.ENOENT:
761 print(e2)
762
763 def do_test(name, way, func, args):
764 full_name = name + '(' + way + ')'
765
766 try:
767 if_verbose(2, "=====> %s %d of %d %s " % \
768 (full_name, t.total_tests, len(allTestNames), \
769 [t.n_unexpected_passes, \
770 t.n_unexpected_failures, \
771 t.n_framework_failures]))
772
773 if config.use_threads:
774 t.lock.release()
775
776 try:
777 preCmd = getTestOpts().pre_cmd
778 if preCmd != None:
779 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
780 if result != 0:
781 framework_fail(name, way, 'pre-command failed: ' + str(result))
782 except:
783 framework_fail(name, way, 'pre-command exception')
784
785 try:
786 result = func(*[name,way] + args)
787 finally:
788 if config.use_threads:
789 t.lock.acquire()
790
791 if getTestOpts().expect != 'pass' and \
792 getTestOpts().expect != 'fail' and \
793 getTestOpts().expect != 'missing-lib':
794 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
795
796 try:
797 passFail = result['passFail']
798 except:
799 passFail = 'No passFail found'
800
801 if passFail == 'pass':
802 if getTestOpts().expect == 'pass' \
803 and way not in getTestOpts().expect_fail_for:
804 t.n_expected_passes = t.n_expected_passes + 1
805 if name in t.expected_passes:
806 t.expected_passes[name].append(way)
807 else:
808 t.expected_passes[name] = [way]
809 else:
810 if_verbose(1, '*** unexpected pass for %s' % full_name)
811 t.n_unexpected_passes = t.n_unexpected_passes + 1
812 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
813 elif passFail == 'fail':
814 if getTestOpts().expect == 'pass' \
815 and way not in getTestOpts().expect_fail_for:
816 if_verbose(1, '*** unexpected failure for %s' % full_name)
817 t.n_unexpected_failures = t.n_unexpected_failures + 1
818 reason = result['reason']
819 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
820 else:
821 if getTestOpts().expect == 'missing-lib':
822 t.n_missing_libs = t.n_missing_libs + 1
823 if name in t.missing_libs:
824 t.missing_libs[name].append(way)
825 else:
826 t.missing_libs[name] = [way]
827 else:
828 t.n_expected_failures = t.n_expected_failures + 1
829 if name in t.expected_failures:
830 t.expected_failures[name].append(way)
831 else:
832 t.expected_failures[name] = [way]
833 else:
834 framework_fail(name, way, 'bad result ' + passFail)
835 except KeyboardInterrupt:
836 stopNow()
837 except:
838 framework_fail(name, way, 'do_test exception')
839 traceback.print_exc()
840
841 def addPassingTestInfo (testInfos, directory, name, way):
842 directory = re.sub('^\\.[/\\\\]', '', directory)
843
844 if not directory in testInfos:
845 testInfos[directory] = {}
846
847 if not name in testInfos[directory]:
848 testInfos[directory][name] = []
849
850 testInfos[directory][name].append(way)
851
852 def addFailingTestInfo (testInfos, directory, name, reason, way):
853 directory = re.sub('^\\.[/\\\\]', '', directory)
854
855 if not directory in testInfos:
856 testInfos[directory] = {}
857
858 if not name in testInfos[directory]:
859 testInfos[directory][name] = {}
860
861 if not reason in testInfos[directory][name]:
862 testInfos[directory][name][reason] = []
863
864 testInfos[directory][name][reason].append(way)
865
866 def skiptest (name, way):
867 # print 'Skipping test \"', name, '\"'
868 t.n_tests_skipped = t.n_tests_skipped + 1
869 if name in t.tests_skipped:
870 t.tests_skipped[name].append(way)
871 else:
872 t.tests_skipped[name] = [way]
873
874 def framework_fail( name, way, reason ):
875 full_name = name + '(' + way + ')'
876 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
877 t.n_framework_failures = t.n_framework_failures + 1
878 if name in t.framework_failures:
879 t.framework_failures[name].append(way)
880 else:
881 t.framework_failures[name] = [way]
882
883 def badResult(result):
884 try:
885 if result['passFail'] == 'pass':
886 return False
887 return True
888 except:
889 return True
890
891 def passed():
892 return {'passFail': 'pass'}
893
894 def failBecause(reason):
895 return {'passFail': 'fail', 'reason': reason}
896
897 # -----------------------------------------------------------------------------
898 # Generic command tests
899
900 # A generic command test is expected to run and exit successfully.
901 #
902 # The expected exit code can be changed via exit_code() as normal, and
903 # the expected stdout/stderr are stored in <testname>.stdout and
904 # <testname>.stderr. The output of the command can be ignored
905 # altogether by using run_command_ignore_output instead of
906 # run_command.
907
908 def run_command( name, way, cmd ):
909 return simple_run( name, '', cmd, '' )
910
911 # -----------------------------------------------------------------------------
912 # GHCi tests
913
914 def ghci_script_without_flag(flag):
915 def apply(name, way, script):
916 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
917 return ghci_script_override_default_flags(overrides)(name, way, script)
918
919 return apply
920
921 def ghci_script_override_default_flags(overrides):
922 def apply(name, way, script):
923 return ghci_script(name, way, script, overrides)
924
925 return apply
926
927 def ghci_script( name, way, script, override_flags = None ):
928 # Use overriden default flags when given
929 if override_flags:
930 default_flags = override_flags
931 else:
932 default_flags = getTestOpts().compiler_always_flags
933
934 # filter out -fforce-recomp from compiler_always_flags, because we're
935 # actually testing the recompilation behaviour in the GHCi tests.
936 flags = [f for f in default_flags if f != '-fforce-recomp']
937 flags.append(getTestOpts().extra_hc_opts)
938 if getTestOpts().outputdir != None:
939 flags.extend(["-outputdir", getTestOpts().outputdir])
940
941 # We pass HC and HC_OPTS as environment variables, so that the
942 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
943 cmd = "HC='" + config.compiler + "' " + \
944 "HC_OPTS='" + ' '.join(flags) + "' " + \
945 "'" + config.compiler + "'" + \
946 ' --interactive -v0 -ignore-dot-ghci ' + \
947 ' '.join(flags)
948
949 getTestOpts().stdin = script
950 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
951
952 # -----------------------------------------------------------------------------
953 # Compile-only tests
954
955 def compile_override_default_flags(overrides):
956 def apply(name, way, extra_opts):
957 return do_compile(name, way, 0, '', [], extra_opts, overrides)
958
959 return apply
960
961 def compile_fail_override_default_flags(overrides):
962 def apply(name, way, extra_opts):
963 return do_compile(name, way, 1, '', [], extra_opts, overrides)
964
965 return apply
966
967 def compile_without_flag(flag):
968 def apply(name, way, extra_opts):
969 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
970 return compile_override_default_flags(overrides)(name, way, extra_opts)
971
972 return apply
973
974 def compile_fail_without_flag(flag):
975 def apply(name, way, extra_opts):
976 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
977 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
978
979 return apply
980
981 def compile( name, way, extra_hc_opts ):
982 return do_compile( name, way, 0, '', [], extra_hc_opts )
983
984 def compile_fail( name, way, extra_hc_opts ):
985 return do_compile( name, way, 1, '', [], extra_hc_opts )
986
987 def multimod_compile( name, way, top_mod, extra_hc_opts ):
988 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
989
990 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
991 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
992
993 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
994 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
995
996 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
997 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
998
999 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1000 # print 'Compile only, extra args = ', extra_hc_opts
1001 pretest_cleanup(name)
1002
1003 result = extras_build( way, extra_mods, extra_hc_opts )
1004 if badResult(result):
1005 return result
1006 extra_hc_opts = result['hc_opts']
1007
1008 force = 0
1009 if extra_mods:
1010 force = 1
1011 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1012
1013 if badResult(result):
1014 return result
1015
1016 # the actual stderr should always match the expected, regardless
1017 # of whether we expected the compilation to fail or not (successful
1018 # compilations may generate warnings).
1019
1020 if getTestOpts().with_namebase == None:
1021 namebase = name
1022 else:
1023 namebase = getTestOpts().with_namebase
1024
1025 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1026 actual_stderr_file = qualify(name, 'comp.stderr')
1027
1028 if not compare_outputs('stderr',
1029 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1030 normalise_errmsg,
1031 normalise_whitespace),
1032 expected_stderr_file, actual_stderr_file):
1033 return failBecause('stderr mismatch')
1034
1035 # no problems found, this test passed
1036 return passed()
1037
1038 def compile_cmp_asm( name, way, extra_hc_opts ):
1039 print('Compile only, extra args = ', extra_hc_opts)
1040 pretest_cleanup(name)
1041 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1042
1043 if badResult(result):
1044 return result
1045
1046 # the actual stderr should always match the expected, regardless
1047 # of whether we expected the compilation to fail or not (successful
1048 # compilations may generate warnings).
1049
1050 if getTestOpts().with_namebase == None:
1051 namebase = name
1052 else:
1053 namebase = getTestOpts().with_namebase
1054
1055 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
1056 actual_asm_file = qualify(name, 's')
1057
1058 if not compare_outputs('asm', two_normalisers(normalise_errmsg, normalise_asm), \
1059 expected_asm_file, actual_asm_file):
1060 return failBecause('asm mismatch')
1061
1062 # no problems found, this test passed
1063 return passed()
1064
1065 # -----------------------------------------------------------------------------
1066 # Compile-and-run tests
1067
1068 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1069 # print 'Compile and run, extra args = ', extra_hc_opts
1070 pretest_cleanup(name)
1071
1072 result = extras_build( way, extra_mods, extra_hc_opts )
1073 if badResult(result):
1074 return result
1075 extra_hc_opts = result['hc_opts']
1076
1077 if way == 'ghci': # interpreted...
1078 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1079 else: # compiled...
1080 force = 0
1081 if extra_mods:
1082 force = 1
1083
1084 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1085 if badResult(result):
1086 return result
1087
1088 cmd = './' + name;
1089
1090 # we don't check the compiler's stderr for a compile-and-run test
1091 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1092
1093 def compile_and_run( name, way, extra_hc_opts ):
1094 return compile_and_run__( name, way, '', [], extra_hc_opts)
1095
1096 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1097 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1098
1099 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1100 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1101
1102 def stats( name, way, stats_file ):
1103 opts = getTestOpts()
1104 return checkStats(name, way, stats_file, opts.stats_range_fields)
1105
1106 # -----------------------------------------------------------------------------
1107 # Check -t stats info
1108
1109 def checkStats(name, way, stats_file, range_fields):
1110 full_name = name + '(' + way + ')'
1111
1112 result = passed()
1113 if len(range_fields) > 0:
1114 f = open(in_testdir(stats_file))
1115 contents = f.read()
1116 f.close()
1117
1118 for (field, (expected, dev)) in range_fields.items():
1119 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1120 if m == None:
1121 print('Failed to find field: ', field)
1122 result = failBecause('no such stats field')
1123 val = int(m.group(1))
1124
1125 lowerBound = trunc( expected * ((100 - float(dev))/100))
1126 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1127
1128 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1129
1130 if val < lowerBound:
1131 print(field, 'value is too low:')
1132 print('(If this is because you have improved GHC, please')
1133 print('update the test so that GHC doesn\'t regress again)')
1134 result = failBecause('stat too good')
1135 if val > upperBound:
1136 print(field, 'value is too high:')
1137 result = failBecause('stat not good enough')
1138
1139 if val < lowerBound or val > upperBound or config.verbose >= 4:
1140 valStr = str(val)
1141 valLen = len(valStr)
1142 expectedStr = str(expected)
1143 expectedLen = len(expectedStr)
1144 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1145
1146 def display(descr, val, extra):
1147 print(descr, str(val).rjust(length), extra)
1148
1149 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1150 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1151 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1152 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1153 if val != expected:
1154 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1155
1156 return result
1157
1158 # -----------------------------------------------------------------------------
1159 # Build a single-module program
1160
1161 def extras_build( way, extra_mods, extra_hc_opts ):
1162 for modopts in extra_mods:
1163 mod, opts = modopts
1164 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1165 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1166 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1167 if badResult(result):
1168 return result
1169
1170 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1171
1172
1173 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1174 opts = getTestOpts()
1175 errname = add_suffix(name, 'comp.stderr')
1176 rm_no_fail( qualify(errname, '') )
1177
1178 if top_mod != '':
1179 srcname = top_mod
1180 rm_no_fail( qualify(name, '') )
1181 base, suf = os.path.splitext(top_mod)
1182 rm_no_fail( qualify(base, '') )
1183 rm_no_fail( qualify(base, 'exe') )
1184 elif addsuf:
1185 srcname = add_hs_lhs_suffix(name)
1186 rm_no_fail( qualify(name, '') )
1187 else:
1188 srcname = name
1189 rm_no_fail( qualify(name, 'o') )
1190
1191 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1192
1193 to_do = ''
1194 if top_mod != '':
1195 to_do = '--make '
1196 if link:
1197 to_do = to_do + '-o ' + name
1198 elif link:
1199 to_do = '-o ' + name
1200 elif opts.compile_to_hc:
1201 to_do = '-C'
1202 else:
1203 to_do = '-c' # just compile
1204
1205 stats_file = name + '.comp.stats'
1206 if len(opts.compiler_stats_range_fields) > 0:
1207 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1208
1209 # Required by GHC 7.3+, harmless for earlier versions:
1210 if (getTestOpts().c_src or
1211 getTestOpts().objc_src or
1212 getTestOpts().objcpp_src or
1213 getTestOpts().cmm_src):
1214 extra_hc_opts += ' -no-hs-main '
1215
1216 if getTestOpts().compile_cmd_prefix == '':
1217 cmd_prefix = ''
1218 else:
1219 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1220
1221 if override_flags:
1222 comp_flags = copy.copy(override_flags)
1223 else:
1224 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1225
1226 if noforce:
1227 comp_flags = [f for f in comp_flags if f != '-fforce-recomp']
1228 if getTestOpts().outputdir != None:
1229 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1230
1231 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1232 + config.compiler + "' " \
1233 + ' '.join(comp_flags) + ' ' \
1234 + to_do + ' ' + srcname + ' ' \
1235 + ' '.join(config.way_flags(name)[way]) + ' ' \
1236 + extra_hc_opts + ' ' \
1237 + opts.extra_hc_opts + ' ' \
1238 + '>' + errname + ' 2>&1'
1239
1240 result = runCmdFor(name, cmd)
1241
1242 if result != 0 and not should_fail:
1243 actual_stderr = qualify(name, 'comp.stderr')
1244 if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:')
1245 if_verbose_dump(1,actual_stderr)
1246
1247 # ToDo: if the sub-shell was killed by ^C, then exit
1248
1249 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1250
1251 if badResult(statsResult):
1252 return statsResult
1253
1254 if should_fail:
1255 if result == 0:
1256 return failBecause('exit code 0')
1257 else:
1258 if result != 0:
1259 return failBecause('exit code non-0')
1260
1261 return passed()
1262
1263 # -----------------------------------------------------------------------------
1264 # Run a program and check its output
1265 #
1266 # If testname.stdin exists, route input from that, else
1267 # from /dev/null. Route output to testname.run.stdout and
1268 # testname.run.stderr. Returns the exit code of the run.
1269
1270 def simple_run( name, way, prog, args ):
1271 opts = getTestOpts()
1272
1273 # figure out what to use for stdin
1274 if opts.stdin != '':
1275 use_stdin = opts.stdin
1276 else:
1277 stdin_file = add_suffix(name, 'stdin')
1278 if os.path.exists(in_testdir(stdin_file)):
1279 use_stdin = stdin_file
1280 else:
1281 use_stdin = '/dev/null'
1282
1283 run_stdout = add_suffix(name,'run.stdout')
1284 run_stderr = add_suffix(name,'run.stderr')
1285
1286 rm_no_fail(qualify(name,'run.stdout'))
1287 rm_no_fail(qualify(name,'run.stderr'))
1288 rm_no_fail(qualify(name, 'hp'))
1289 rm_no_fail(qualify(name,'ps'))
1290 rm_no_fail(qualify(name, 'prof'))
1291
1292 my_rts_flags = rts_flags(way)
1293
1294 stats_file = name + '.stats'
1295 if len(opts.stats_range_fields) > 0:
1296 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1297
1298 if opts.no_stdin:
1299 stdin_comes_from = ''
1300 else:
1301 stdin_comes_from = ' <' + use_stdin
1302
1303 if opts.combined_output:
1304 redirection = ' >' + run_stdout \
1305 + ' 2>&1'
1306 else:
1307 redirection = ' >' + run_stdout \
1308 + ' 2>' + run_stderr
1309
1310 cmd = prog + ' ' + args + ' ' \
1311 + my_rts_flags + ' ' \
1312 + stdin_comes_from \
1313 + redirection
1314
1315 if opts.cmd_wrapper != None:
1316 cmd = opts.cmd_wrapper(cmd);
1317
1318 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1319
1320 # run the command
1321 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1322
1323 exit_code = result >> 8
1324 signal = result & 0xff
1325
1326 # check the exit code
1327 if exit_code != opts.exit_code:
1328 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1329 dump_stdout(name)
1330 dump_stderr(name)
1331 return failBecause('bad exit code')
1332
1333 check_hp = my_rts_flags.find("-h") != -1
1334 check_prof = my_rts_flags.find("-p") != -1
1335
1336 if not opts.ignore_output:
1337 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1338 bad_stdout = not check_stdout_ok(name)
1339 if bad_stderr:
1340 return failBecause('bad stderr')
1341 if bad_stdout:
1342 return failBecause('bad stdout')
1343 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1344 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1345 return failBecause('bad heap profile')
1346 if check_prof and not check_prof_ok(name):
1347 return failBecause('bad profile')
1348
1349 return checkStats(name, way, stats_file, opts.stats_range_fields)
1350
1351 def rts_flags(way):
1352 if (way == ''):
1353 return ''
1354 else:
1355 args = config.way_rts_flags[way]
1356
1357 if args == []:
1358 return ''
1359 else:
1360 return '+RTS ' + ' '.join(args) + ' -RTS'
1361
1362 # -----------------------------------------------------------------------------
1363 # Run a program in the interpreter and check its output
1364
1365 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1366 outname = add_suffix(name, 'interp.stdout')
1367 errname = add_suffix(name, 'interp.stderr')
1368 rm_no_fail(outname)
1369 rm_no_fail(errname)
1370 rm_no_fail(name)
1371
1372 if (top_mod == ''):
1373 srcname = add_hs_lhs_suffix(name)
1374 else:
1375 srcname = top_mod
1376
1377 scriptname = add_suffix(name, 'genscript')
1378 qscriptname = in_testdir(scriptname)
1379 rm_no_fail(qscriptname)
1380
1381 delimiter = '===== program output begins here\n'
1382
1383 script = open(qscriptname, 'w')
1384 if not compile_only:
1385 # set the prog name and command-line args to match the compiled
1386 # environment.
1387 script.write(':set prog ' + name + '\n')
1388 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1389 # Add marker lines to the stdout and stderr output files, so we
1390 # can separate GHCi's output from the program's.
1391 script.write(':! echo ' + delimiter)
1392 script.write(':! echo 1>&2 ' + delimiter)
1393 # Set stdout to be line-buffered to match the compiled environment.
1394 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1395 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1396 # in the event of an exception as for the compiled program.
1397 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1398 script.close()
1399
1400 # figure out what to use for stdin
1401 if getTestOpts().stdin != '':
1402 stdin_file = in_testdir(getTestOpts().stdin)
1403 else:
1404 stdin_file = qualify(name, 'stdin')
1405
1406 if os.path.exists(stdin_file):
1407 stdin = open(stdin_file, 'r')
1408 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1409
1410 script.close()
1411
1412 flags = copy.copy(getTestOpts().compiler_always_flags)
1413 if getTestOpts().outputdir != None:
1414 flags.extend(["-outputdir", getTestOpts().outputdir])
1415
1416 cmd = "'" + config.compiler + "' " \
1417 + ' '.join(flags) + ' ' \
1418 + srcname + ' ' \
1419 + ' '.join(config.way_flags(name)[way]) + ' ' \
1420 + extra_hc_opts + ' ' \
1421 + getTestOpts().extra_hc_opts + ' ' \
1422 + '<' + scriptname + ' 1>' + outname + ' 2>' + errname
1423
1424 if getTestOpts().cmd_wrapper != None:
1425 cmd = getTestOpts().cmd_wrapper(cmd);
1426
1427 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1428
1429 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1430
1431 exit_code = result >> 8
1432 signal = result & 0xff
1433
1434 # split the stdout into compilation/program output
1435 split_file(in_testdir(outname), delimiter,
1436 qualify(name, 'comp.stdout'),
1437 qualify(name, 'run.stdout'))
1438 split_file(in_testdir(errname), delimiter,
1439 qualify(name, 'comp.stderr'),
1440 qualify(name, 'run.stderr'))
1441
1442 # check the exit code
1443 if exit_code != getTestOpts().exit_code:
1444 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1445 dump_stdout(name)
1446 dump_stderr(name)
1447 return failBecause('bad exit code')
1448
1449 # ToDo: if the sub-shell was killed by ^C, then exit
1450
1451 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1452 check_stdout_ok(name)):
1453 return passed()
1454 else:
1455 return failBecause('bad stdout or stderr')
1456
1457
1458 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1459 infile = open(in_fn)
1460 out1 = open(out1_fn, 'w')
1461 out2 = open(out2_fn, 'w')
1462
1463 line = infile.readline()
1464 line = re.sub('\r', '', line) # ignore Windows EOL
1465 while (re.sub('^\s*','',line) != delimiter and line != ''):
1466 out1.write(line)
1467 line = infile.readline()
1468 line = re.sub('\r', '', line)
1469 out1.close()
1470
1471 line = infile.readline()
1472 while (line != ''):
1473 out2.write(line)
1474 line = infile.readline()
1475 out2.close()
1476
1477 # -----------------------------------------------------------------------------
1478 # Utils
1479
1480 def check_stdout_ok( name ):
1481 if getTestOpts().with_namebase == None:
1482 namebase = name
1483 else:
1484 namebase = getTestOpts().with_namebase
1485
1486 actual_stdout_file = qualify(name, 'run.stdout')
1487 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1488
1489 def norm(str):
1490 if platform_specific:
1491 return str
1492 else:
1493 return normalise_output(str)
1494
1495 two_norm = two_normalisers(norm, getTestOpts().extra_normaliser)
1496
1497 check_stdout = getTestOpts().check_stdout
1498 if check_stdout:
1499 return check_stdout(actual_stdout_file, two_norm)
1500
1501 return compare_outputs('stdout', \
1502 two_norm, \
1503 expected_stdout_file, actual_stdout_file)
1504
1505 def dump_stdout( name ):
1506 print('Stdout:')
1507 print(read_no_crs(qualify(name, 'run.stdout')))
1508
1509 def check_stderr_ok( name ):
1510 if getTestOpts().with_namebase == None:
1511 namebase = name
1512 else:
1513 namebase = getTestOpts().with_namebase
1514
1515 actual_stderr_file = qualify(name, 'run.stderr')
1516 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1517
1518 def norm(str):
1519 if platform_specific:
1520 return str
1521 else:
1522 return normalise_errmsg(str)
1523
1524 return compare_outputs('stderr', \
1525 two_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1526 expected_stderr_file, actual_stderr_file)
1527
1528 def dump_stderr( name ):
1529 print("Stderr:")
1530 print(read_no_crs(qualify(name, 'run.stderr')))
1531
1532 def read_no_crs(file):
1533 str = ''
1534 try:
1535 h = open(file)
1536 str = h.read()
1537 h.close
1538 except:
1539 # On Windows, if the program fails very early, it seems the
1540 # files stdout/stderr are redirected to may not get created
1541 pass
1542 return re.sub('\r', '', str)
1543
1544 def write_file(file, str):
1545 h = open(file, 'w')
1546 h.write(str)
1547 h.close
1548
1549 def check_hp_ok(name):
1550
1551 # do not qualify for hp2ps because we should be in the right directory
1552 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1553
1554 hp2psResult = runCmdExitCode(hp2psCmd)
1555
1556 actual_ps_file = qualify(name, 'ps')
1557
1558 if(hp2psResult == 0):
1559 if (os.path.exists(actual_ps_file)):
1560 if gs_working:
1561 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1562 if (gsResult == 0):
1563 return (True)
1564 else:
1565 print("hp2ps output for " + name + "is not valid PostScript")
1566 else: return (True) # assume postscript is valid without ghostscript
1567 else:
1568 print("hp2ps did not generate PostScript for " + name)
1569 return (False)
1570 else:
1571 print("hp2ps error when processing heap profile for " + name)
1572 return(False)
1573
1574 def check_prof_ok(name):
1575
1576 prof_file = qualify(name,'prof')
1577
1578 if not os.path.exists(prof_file):
1579 print(prof_file + " does not exist")
1580 return(False)
1581
1582 if os.path.getsize(qualify(name,'prof')) == 0:
1583 print(prof_file + " is empty")
1584 return(False)
1585
1586 if getTestOpts().with_namebase == None:
1587 namebase = name
1588 else:
1589 namebase = getTestOpts().with_namebase
1590
1591 (platform_specific, expected_prof_file) = \
1592 platform_wordsize_qualify(namebase, 'prof.sample')
1593
1594 # sample prof file is not required
1595 if not os.path.exists(expected_prof_file):
1596 return True
1597 else:
1598 return compare_outputs('prof', \
1599 two_normalisers(normalise_whitespace,normalise_prof), \
1600 expected_prof_file, prof_file)
1601
1602 # Compare expected output to actual output, and optionally accept the
1603 # new output. Returns true if output matched or was accepted, false
1604 # otherwise.
1605 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1606 if os.path.exists(expected_file):
1607 expected_raw = read_no_crs(expected_file)
1608 # print "norm:", normaliser(expected_raw)
1609 expected_str = normaliser(expected_raw)
1610 expected_file_for_diff = expected_file
1611 else:
1612 expected_str = ''
1613 expected_file_for_diff = '/dev/null'
1614
1615 actual_raw = read_no_crs(actual_file)
1616 actual_str = normaliser(actual_raw)
1617
1618 if expected_str == actual_str:
1619 return 1
1620 else:
1621 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1622
1623 if expected_file_for_diff == '/dev/null':
1624 expected_normalised_file = '/dev/null'
1625 else:
1626 expected_normalised_file = expected_file + ".normalised"
1627 write_file(expected_normalised_file, expected_str)
1628
1629 actual_normalised_file = actual_file + ".normalised"
1630 write_file(actual_normalised_file, actual_str)
1631
1632 # Ignore whitespace when diffing. We should only get to this
1633 # point if there are non-whitespace differences
1634 #
1635 # Note we are diffing the *actual* output, not the normalised
1636 # output. The normalised output may have whitespace squashed
1637 # (including newlines) so the diff would be hard to read.
1638 # This does mean that the diff might contain changes that
1639 # would be normalised away.
1640 if (config.verbose >= 1):
1641 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1642 ' ' + actual_file )
1643
1644 # If for some reason there were no non-whitespace differences,
1645 # then do a full diff
1646 if r == 0:
1647 r = os.system( 'diff -u ' + expected_file_for_diff + \
1648 ' ' + actual_file )
1649
1650 if config.accept:
1651 if_verbose(1, 'Accepting new output.')
1652 write_file(expected_file, actual_raw)
1653 return 1
1654 else:
1655 return 0
1656
1657
1658 def normalise_whitespace( str ):
1659 # Merge contiguous whitespace characters into a single space.
1660 str = re.sub('[ \t\n]+', ' ', str)
1661 return str
1662
1663 def normalise_errmsg( str ):
1664 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1665 # the colon is there because it appears in error messages; this
1666 # hacky solution is used in place of more sophisticated filename
1667 # mangling
1668 str = re.sub('([^\\s])\\.exe', '\\1', str)
1669 # normalise slashes, minimise Windows/Unix filename differences
1670 str = re.sub('\\\\', '/', str)
1671 # The inplace ghc's are called ghc-stage[123] to avoid filename
1672 # collisions, so we need to normalise that to just "ghc"
1673 str = re.sub('ghc-stage[123]', 'ghc', str)
1674 # Error messages simetimes contain integer implementation package
1675 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1676 return str
1677
1678 # normalise a .prof file, so that we can reasonably compare it against
1679 # a sample. This doesn't compare any of the actual profiling data,
1680 # only the shape of the profile and the number of entries.
1681 def normalise_prof (str):
1682 # strip everything up to the line beginning "COST CENTRE"
1683 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1684
1685 # strip results for CAFs, these tend to change unpredictably
1686 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1687
1688 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1689 # sometimes under MAIN.
1690 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1691
1692 # We have somthing like this:
1693
1694 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1695 # k Main 204 1 0.0 0.0 0.0 0.0
1696 # foo Main 205 1 0.0 0.0 0.0 0.0
1697 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1698
1699 # then we remove all the specific profiling data, leaving only the
1700 # cost centre name, module, and entries, to end up with this:
1701
1702 # MAIN MAIN 0
1703 # k Main 1
1704 # foo Main 1
1705 # foo.bar Main 1
1706
1707 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1708 return str
1709
1710 def normalise_slashes_( str ):
1711 str = re.sub('\\\\', '/', str)
1712 return str
1713
1714 def normalise_exe_( str ):
1715 str = re.sub('\.exe', '', str)
1716 return str
1717
1718 def normalise_output( str ):
1719 # Remove a .exe extension (for Windows)
1720 # This can occur in error messages generated by the program.
1721 str = re.sub('([^\\s])\\.exe', '\\1', str)
1722 return str
1723
1724 def normalise_asm( str ):
1725 lines = str.split('\n')
1726 # Only keep instructions and labels not starting with a dot.
1727 metadata = re.compile('^[ \t]*\\..*$')
1728 out = []
1729 for line in lines:
1730 # Drop metadata directives (e.g. ".type")
1731 if not metadata.match(line):
1732 line = re.sub('@plt', '', line)
1733 instr = line.lstrip().split()
1734 # Drop empty lines.
1735 if not instr:
1736 continue
1737 # Drop operands, except for call instructions.
1738 elif instr[0] == 'call':
1739 out.append(instr[0] + ' ' + instr[1])
1740 else:
1741 out.append(instr[0])
1742 out = '\n'.join(out)
1743 return out
1744
1745 def if_verbose( n, s ):
1746 if config.verbose >= n:
1747 print(s)
1748
1749 def if_verbose_dump( n, f ):
1750 if config.verbose >= n:
1751 try:
1752 print(open(f).read())
1753 except:
1754 print('')
1755
1756 def rawSystem(cmd_and_args):
1757 # We prefer subprocess.call to os.spawnv as the latter
1758 # seems to send its arguments through a shell or something
1759 # with the Windows (non-cygwin) python. An argument "a b c"
1760 # turns into three arguments ["a", "b", "c"].
1761
1762 # However, subprocess is new in python 2.4, so fall back to
1763 # using spawnv if we don't have it
1764
1765 if have_subprocess:
1766 return subprocess.call(cmd_and_args)
1767 else:
1768 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1769
1770 # Note that this doesn't handle the timeout itself; it is just used for
1771 # commands that have timeout handling built-in.
1772 def rawSystemWithTimeout(cmd_and_args):
1773 r = rawSystem(cmd_and_args)
1774 if r == 98:
1775 # The python timeout program uses 98 to signal that ^C was pressed
1776 stopNow()
1777 return r
1778
1779 # cmd is a complex command in Bourne-shell syntax
1780 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1781 # Hence it must ultimately be run by a Bourne shell
1782 #
1783 # Mostly it invokes the command wrapped in 'timeout' thus
1784 # timeout 300 'cd . && ...blah blah'
1785 # so it's timeout's job to invoke the Bourne shell
1786 #
1787 # But watch out for the case when there is no timeout program!
1788 # Then, when using the native Python, os.system will invoke the cmd shell
1789
1790 def runCmd( cmd ):
1791 if_verbose( 3, cmd )
1792 r = 0
1793 if config.os == 'mingw32':
1794 # On MinGW, we will always have timeout
1795 assert config.timeout_prog!=''
1796
1797 if config.timeout_prog != '':
1798 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1799 else:
1800 r = os.system(cmd)
1801 return r << 8
1802
1803 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1804 if_verbose( 3, cmd )
1805 r = 0
1806 if config.os == 'mingw32':
1807 # On MinGW, we will always have timeout
1808 assert config.timeout_prog!=''
1809 timeout = int(ceil(config.timeout * timeout_multiplier))
1810
1811 if config.timeout_prog != '':
1812 if config.check_files_written:
1813 fn = name + ".strace"
1814 r = rawSystemWithTimeout(
1815 ["strace", "-o", fn, "-fF",
1816 "-e", "creat,open,chdir,clone,vfork",
1817 config.timeout_prog, str(timeout), cmd])
1818 addTestFilesWritten(name, fn)
1819 rm_no_fail(fn)
1820 else:
1821 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1822 else:
1823 r = os.system(cmd)
1824 return r << 8
1825
1826 def runCmdExitCode( cmd ):
1827 return (runCmd(cmd) >> 8);
1828
1829
1830 # -----------------------------------------------------------------------------
1831 # checking for files being written to by multiple tests
1832
1833 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1834 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1835 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1836 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1837 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1838 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1839 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1840 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1841 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1842 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1843 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1844 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1845 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1846 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1847 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1848 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1849 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1850
1851 # Files that are read or written but shouldn't be:
1852 # * ghci_history shouldn't be read or written by tests
1853 # * things under package.conf.d shouldn't be written by tests
1854 bad_file_usages = {}
1855
1856 # Mapping from tests to the list of files that they write
1857 files_written = {}
1858
1859 # Mapping from tests to the list of files that they write but don't clean
1860 files_written_not_removed = {}
1861
1862 def add_bad_file_usage(name, file):
1863 try:
1864 if not file in bad_file_usages[name]:
1865 bad_file_usages[name].append(file)
1866 except:
1867 bad_file_usages[name] = [file]
1868
1869 def mkPath(curdir, path):
1870 # Given the current full directory is 'curdir', what is the full
1871 # path to 'path'?
1872 return os.path.realpath(os.path.join(curdir, path))
1873
1874 def addTestFilesWritten(name, fn):
1875 if config.use_threads:
1876 with t.lockFilesWritten:
1877 addTestFilesWrittenHelper(name, fn)
1878 else:
1879 addTestFilesWrittenHelper(name, fn)
1880
1881 def addTestFilesWrittenHelper(name, fn):
1882 started = False
1883 working_directories = {}
1884
1885 with open(fn, 'r') as f:
1886 for line in f:
1887 m_pid = re_strace_pid.match(line)
1888 if m_pid:
1889 pid = m_pid.group(1)
1890 content = m_pid.group(2)
1891 elif re_strace_unavailable.match(line):
1892 next
1893 else:
1894 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1895
1896 m_open = re_strace_open.match(content)
1897 m_chdir = re_strace_chdir.match(content)
1898 m_clone = re_strace_clone.match(content)
1899 m_vfork = re_strace_vfork.match(content)
1900
1901 if not started:
1902 working_directories[pid] = os.getcwd()
1903 started = True
1904
1905 if m_open:
1906 file = m_open.group(1)
1907 file = mkPath(working_directories[pid], file)
1908 if file.endswith("ghci_history"):
1909 add_bad_file_usage(name, file)
1910 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1911 flags = m_open.group(2).split('|')
1912 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1913 if re.match('package\.conf\.d', file):
1914 add_bad_file_usage(name, file)
1915 else:
1916 try:
1917 if not file in files_written[name]:
1918 files_written[name].append(file)
1919 except:
1920 files_written[name] = [file]
1921 elif 'O_RDONLY' in flags:
1922 pass
1923 else:
1924 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1925 elif m_chdir:
1926 # We optimistically assume that unfinished chdir's are going to succeed
1927 dir = m_chdir.group(1)
1928 working_directories[pid] = mkPath(working_directories[pid], dir)
1929 elif m_clone:
1930 working_directories[m_clone.group(2)] = working_directories[pid]
1931 elif m_vfork:
1932 working_directories[m_vfork.group(2)] = working_directories[pid]
1933 elif re_strace_open_resumed.match(content):
1934 pass
1935 elif re_strace_chdir_resumed.match(content):
1936 pass
1937 elif re_strace_vfork_unfinished.match(content):
1938 pass
1939 elif re_strace_clone_unfinished.match(content):
1940 pass
1941 elif re_strace_ignore_sigchild.match(content):
1942 pass
1943 elif re_strace_ignore_sigvtalarm.match(content):
1944 pass
1945 elif re_strace_ignore_sigint.match(content):
1946 pass
1947 elif re_strace_ignore_sigfpe.match(content):
1948 pass
1949 elif re_strace_ignore_sigsegv.match(content):
1950 pass
1951 elif re_strace_ignore_sigpipe.match(content):
1952 pass
1953 else:
1954 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1955
1956 def checkForFilesWrittenProblems(file):
1957 foundProblem = False
1958
1959 files_written_inverted = {}
1960 for t in files_written.keys():
1961 for f in files_written[t]:
1962 try:
1963 files_written_inverted[f].append(t)
1964 except:
1965 files_written_inverted[f] = [t]
1966
1967 for f in files_written_inverted.keys():
1968 if len(files_written_inverted[f]) > 1:
1969 if not foundProblem:
1970 foundProblem = True
1971 file.write("\n")
1972 file.write("\nSome files are written by multiple tests:\n")
1973 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
1974 if foundProblem:
1975 file.write("\n")
1976
1977 # -----
1978
1979 if len(files_written_not_removed) > 0:
1980 file.write("\n")
1981 file.write("\nSome files written but not removed:\n")
1982 tests = list(files_written_not_removed.keys())
1983 tests.sort()
1984 for t in tests:
1985 for f in files_written_not_removed[t]:
1986 file.write(" " + t + ": " + f + "\n")
1987 file.write("\n")
1988
1989 # -----
1990
1991 if len(bad_file_usages) > 0:
1992 file.write("\n")
1993 file.write("\nSome bad file usages:\n")
1994 tests = list(bad_file_usages.keys())
1995 tests.sort()
1996 for t in tests:
1997 for f in bad_file_usages[t]:
1998 file.write(" " + t + ": " + f + "\n")
1999 file.write("\n")
2000
2001 # -----------------------------------------------------------------------------
2002 # checking if ghostscript is available for checking the output of hp2ps
2003
2004 def genGSCmd(psfile):
2005 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2006
2007 def gsNotWorking():
2008 global gs_working
2009 print("GhostScript not available for hp2ps tests")
2010
2011 global gs_working
2012 gs_working = 0
2013 if config.have_profiling:
2014 if config.gs != '':
2015 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2016 if resultGood == 0:
2017 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
2018 if resultBad != 0:
2019 print("GhostScript available for hp2ps tests")
2020 gs_working = 1;
2021 else:
2022 gsNotWorking();
2023 else:
2024 gsNotWorking();
2025 else:
2026 gsNotWorking();
2027
2028 def rm_no_fail( file ):
2029 try:
2030 os.remove( file )
2031 finally:
2032 return
2033
2034 def add_suffix( name, suffix ):
2035 if suffix == '':
2036 return name
2037 else:
2038 return name + '.' + suffix
2039
2040 def add_hs_lhs_suffix(name):
2041 if getTestOpts().c_src:
2042 return add_suffix(name, 'c')
2043 elif getTestOpts().cmm_src:
2044 return add_suffix(name, 'cmm')
2045 elif getTestOpts().objc_src:
2046 return add_suffix(name, 'm')
2047 elif getTestOpts().objcpp_src:
2048 return add_suffix(name, 'mm')
2049 elif getTestOpts().literate:
2050 return add_suffix(name, 'lhs')
2051 else:
2052 return add_suffix(name, 'hs')
2053
2054 def replace_suffix( name, suffix ):
2055 base, suf = os.path.splitext(name)
2056 return base + '.' + suffix
2057
2058 def in_testdir( name ):
2059 return (getTestOpts().testdir + '/' + name)
2060
2061 def qualify( name, suff ):
2062 return in_testdir(add_suffix(name, suff))
2063
2064
2065 # Finding the sample output. The filename is of the form
2066 #
2067 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
2068 #
2069 # and we pick the most specific version available. The <version> is
2070 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2071 # more fine-grained control use if_compiler_lt().
2072 #
2073 def platform_wordsize_qualify( name, suff ):
2074
2075 basepath = qualify(name, suff)
2076
2077 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2078 for (platformSpecific, plat) in [(1, '-' + config.platform),
2079 (1, '-' + config.os),
2080 (0, '')]
2081 for ws in ['-ws-' + config.wordsize, '']
2082 for comp in ['-' + config.compiler_type, '']
2083 for vers in ['-' + config.compiler_maj_version, '']]
2084
2085 dir = glob.glob(basepath + '*')
2086 dir = [normalise_slashes_(d) for d in dir]
2087
2088 for (platformSpecific, f) in paths:
2089 if f in dir:
2090 return (platformSpecific,f)
2091
2092 return (0, basepath)
2093
2094 # Clean up prior to the test, so that we can't spuriously conclude
2095 # that it passed on the basis of old run outputs.
2096 def pretest_cleanup(name):
2097 if getTestOpts().outputdir != None:
2098 odir = in_testdir(getTestOpts().outputdir)
2099 try:
2100 shutil.rmtree(odir)
2101 except:
2102 pass
2103 os.mkdir(odir)
2104
2105 rm_no_fail(qualify(name,'interp.stderr'))
2106 rm_no_fail(qualify(name,'interp.stdout'))
2107 rm_no_fail(qualify(name,'comp.stderr'))
2108 rm_no_fail(qualify(name,'comp.stdout'))
2109 rm_no_fail(qualify(name,'run.stderr'))
2110 rm_no_fail(qualify(name,'run.stdout'))
2111 rm_no_fail(qualify(name,'tix'))
2112 rm_no_fail(qualify(name,'exe.tix'))
2113 # simple_build zaps the following:
2114 # rm_nofail(qualify("o"))
2115 # rm_nofail(qualify(""))
2116 # not interested in the return code
2117
2118 # -----------------------------------------------------------------------------
2119 # Return a list of all the files ending in '.T' below directories roots.
2120
2121 def findTFiles(roots):
2122 # It would be better to use os.walk, but that
2123 # gives backslashes on Windows, which trip the
2124 # testsuite later :-(
2125 return [filename for root in roots for filename in findTFiles_(root)]
2126
2127 def findTFiles_(path):
2128 if os.path.isdir(path):
2129 paths = [path + '/' + x for x in os.listdir(path)]
2130 return findTFiles(paths)
2131 elif path[-2:] == '.T':
2132 return [path]
2133 else:
2134 return []
2135
2136 # -----------------------------------------------------------------------------
2137 # Output a test summary to the specified file object
2138
2139 def summary(t, file):
2140
2141 file.write('\n')
2142 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures])
2143 file.write('OVERALL SUMMARY for test run started at '
2144 + time.strftime("%c %Z", t.start_time) + '\n'
2145 + str(datetime.timedelta(seconds=
2146 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2147 + ' spent to go through\n'
2148 + repr(t.total_tests).rjust(8)
2149 + ' total tests, which gave rise to\n'
2150 + repr(t.total_test_cases).rjust(8)
2151 + ' test cases, of which\n'
2152 + repr(t.n_tests_skipped).rjust(8)
2153 + ' were skipped\n'
2154 + '\n'
2155 + repr(t.n_missing_libs).rjust(8)
2156 + ' had missing libraries\n'
2157 + repr(t.n_expected_passes).rjust(8)
2158 + ' expected passes\n'
2159 + repr(t.n_expected_failures).rjust(8)
2160 + ' expected failures\n'
2161 + '\n'
2162 + repr(t.n_framework_failures).rjust(8)
2163 + ' caused framework failures\n'
2164 + repr(t.n_unexpected_passes).rjust(8)
2165 + ' unexpected passes\n'
2166 + repr(t.n_unexpected_failures).rjust(8)
2167 + ' unexpected failures\n'
2168 + '\n')
2169
2170 if t.n_unexpected_passes > 0:
2171 file.write('Unexpected passes:\n')
2172 printPassingTestInfosSummary(file, t.unexpected_passes)
2173
2174 if t.n_unexpected_failures > 0:
2175 file.write('Unexpected failures:\n')
2176 printFailingTestInfosSummary(file, t.unexpected_failures)
2177
2178 if config.check_files_written:
2179 checkForFilesWrittenProblems(file)
2180
2181 if stopping():
2182 file.write('WARNING: Testsuite run was terminated early\n')
2183
2184 def printUnexpectedTests(file, testInfoss):
2185 unexpected = []
2186 for testInfos in testInfoss:
2187 directories = testInfos.keys()
2188 for directory in directories:
2189 tests = list(testInfos[directory].keys())
2190 unexpected += tests
2191 if unexpected != []:
2192 file.write('Unexpected results from:\n')
2193 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2194 file.write('\n')
2195
2196 def printPassingTestInfosSummary(file, testInfos):
2197 directories = list(testInfos.keys())
2198 directories.sort()
2199 maxDirLen = max(len(x) for x in directories)
2200 for directory in directories:
2201 tests = list(testInfos[directory].keys())
2202 tests.sort()
2203 for test in tests:
2204 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2205 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2206 file.write('\n')
2207
2208 def printFailingTestInfosSummary(file, testInfos):
2209 directories = list(testInfos.keys())
2210 directories.sort()
2211 maxDirLen = max(len(d) for d in directories)
2212 for directory in directories:
2213 tests = list(testInfos[directory].keys())
2214 tests.sort()
2215 for test in tests:
2216 reasons = testInfos[directory][test].keys()
2217 for reason in reasons:
2218 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2219 ' [' + reason + ']' + \
2220 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2221 file.write('\n')
2222
2223 def getStdout(cmd):
2224 if have_subprocess:
2225 p = subprocess.Popen(cmd,
2226 stdout=subprocess.PIPE,
2227 stderr=subprocess.PIPE)
2228 (stdout, stderr) = p.communicate()
2229 r = p.wait()
2230 if r != 0:
2231 raise Exception("Command failed: " + str(cmd))
2232 if stderr != '':
2233 raise Exception("stderr from command: " + str(cmd))
2234 return stdout
2235 else:
2236 raise Exception("Need subprocess to get stdout, but don't have it")