testlib: Get rid of two_normalisers
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20
21 have_subprocess = False
22 try:
23 import subprocess
24 have_subprocess = True
25 except:
26 print("Warning: subprocess not found, will fall back to spawnv")
27
28 from testglobals import *
29 from testutil import *
30
31 if config.use_threads:
32 import threading
33 try:
34 import thread
35 except ImportError: # Python 3
36 import _thread as thread
37
38 global wantToStop
39 wantToStop = False
40 def stopNow():
41 global wantToStop
42 wantToStop = True
43 def stopping():
44 return wantToStop
45
46 # Options valid for the current test only (these get reset to
47 # testdir_testopts after each test).
48
49 global testopts_local
50 if config.use_threads:
51 testopts_local = threading.local()
52 else:
53 class TestOpts_Local:
54 pass
55 testopts_local = TestOpts_Local()
56
57 def getTestOpts():
58 return testopts_local.x
59
60 def setLocalTestOpts(opts):
61 global testopts_local
62 testopts_local.x=opts
63
64 def isStatsTest():
65 opts = getTestOpts()
66 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
67
68
69 # This can be called at the top of a file of tests, to set default test options
70 # for the following tests.
71 def setTestOpts( f ):
72 global thisdir_settings
73 thisdir_settings = [thisdir_settings, f]
74
75 # -----------------------------------------------------------------------------
76 # Canned setup functions for common cases. eg. for a test you might say
77 #
78 # test('test001', normal, compile, [''])
79 #
80 # to run it without any options, but change it to
81 #
82 # test('test001', expect_fail, compile, [''])
83 #
84 # to expect failure for this test.
85
86 def normal( name, opts ):
87 return;
88
89 def skip( name, opts ):
90 opts.skip = 1
91
92 def expect_fail( name, opts ):
93 opts.expect = 'fail';
94
95 def reqlib( lib ):
96 return lambda name, opts, l=lib: _reqlib (name, opts, l )
97
98 # Cache the results of looking to see if we have a library or not.
99 # This makes quite a difference, especially on Windows.
100 have_lib = {}
101
102 def _reqlib( name, opts, lib ):
103 if lib in have_lib:
104 got_it = have_lib[lib]
105 else:
106 if have_subprocess:
107 # By preference we use subprocess, as the alternative uses
108 # /dev/null which mingw doesn't have.
109 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
110 stdout=subprocess.PIPE,
111 stderr=subprocess.PIPE)
112 # read from stdout and stderr to avoid blocking due to
113 # buffers filling
114 p.communicate()
115 r = p.wait()
116 else:
117 r = os.system(config.ghc_pkg + ' describe ' + lib
118 + ' > /dev/null 2> /dev/null')
119 got_it = r == 0
120 have_lib[lib] = got_it
121
122 if not got_it:
123 opts.expect = 'missing-lib'
124
125 def req_profiling( name, opts ):
126 if not config.have_profiling:
127 opts.expect = 'fail'
128
129 def req_shared_libs( name, opts ):
130 if not config.have_shared_libs:
131 opts.expect = 'fail'
132
133 def req_interp( name, opts ):
134 if not config.have_interp:
135 opts.expect = 'fail'
136
137 def req_smp( name, opts ):
138 if not config.have_smp:
139 opts.expect = 'fail'
140
141 def ignore_output( name, opts ):
142 opts.ignore_output = 1
143
144 def no_stdin( name, opts ):
145 opts.no_stdin = 1
146
147 def combined_output( name, opts ):
148 opts.combined_output = True
149
150 # -----
151
152 def expect_fail_for( ways ):
153 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
154
155 def _expect_fail_for( name, opts, ways ):
156 opts.expect_fail_for = ways
157
158 def expect_broken( bug ):
159 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
160
161 def _expect_broken( name, opts, bug ):
162 record_broken(name, opts, bug)
163 opts.expect = 'fail';
164
165 def expect_broken_for( bug, ways ):
166 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
167
168 def _expect_broken_for( name, opts, bug, ways ):
169 record_broken(name, opts, bug)
170 opts.expect_fail_for = ways
171
172 def record_broken(name, opts, bug):
173 global brokens
174 me = (bug, opts.testdir, name)
175 if not me in brokens:
176 brokens.append(me)
177
178 # -----
179
180 def omit_ways( ways ):
181 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
182
183 def _omit_ways( name, opts, ways ):
184 opts.omit_ways = ways
185
186 # -----
187
188 def only_ways( ways ):
189 return lambda name, opts, w=ways: _only_ways( name, opts, w )
190
191 def _only_ways( name, opts, ways ):
192 opts.only_ways = ways
193
194 # -----
195
196 def extra_ways( ways ):
197 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
198
199 def _extra_ways( name, opts, ways ):
200 opts.extra_ways = ways
201
202 # -----
203
204 def omit_compiler_types( compiler_types ):
205 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
206
207 def _omit_compiler_types( name, opts, compiler_types ):
208 if config.compiler_type in compiler_types:
209 opts.skip = 1
210
211 # -----
212
213 def only_compiler_types( compiler_types ):
214 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
215
216 def _only_compiler_types( name, opts, compiler_types ):
217 if config.compiler_type not in compiler_types:
218 opts.skip = 1
219
220 # -----
221
222 def set_stdin( file ):
223 return lambda name, opts, f=file: _set_stdin(name, opts, f);
224
225 def _set_stdin( name, opts, f ):
226 opts.stdin = f
227
228 # -----
229
230 def exit_code( val ):
231 return lambda name, opts, v=val: _exit_code(name, opts, v);
232
233 def _exit_code( name, opts, v ):
234 opts.exit_code = v
235
236 def signal_exit_code( val ):
237 if opsys('solaris2'):
238 return exit_code( val );
239 else:
240 # When application running on Linux receives fatal error
241 # signal, then its exit code is encoded as 128 + signal
242 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
243 # I assume that Mac OS X behaves in the same way at least Mac
244 # OS X builder behavior suggests this.
245 return exit_code( val+128 );
246
247 # -----
248
249 def timeout_multiplier( val ):
250 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
251
252 def _timeout_multiplier( name, opts, v ):
253 opts.timeout_multiplier = v
254
255 # -----
256
257 def extra_run_opts( val ):
258 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
259
260 def _extra_run_opts( name, opts, v ):
261 opts.extra_run_opts = v
262
263 # -----
264
265 def extra_hc_opts( val ):
266 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
267
268 def _extra_hc_opts( name, opts, v ):
269 opts.extra_hc_opts = v
270
271 # -----
272
273 def extra_clean( files ):
274 return lambda name, opts, v=files: _extra_clean(name, opts, v);
275
276 def _extra_clean( name, opts, v ):
277 opts.clean_files = v
278
279 # -----
280
281 def stats_num_field( field, expecteds ):
282 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
283
284 def _stats_num_field( name, opts, field, expecteds ):
285 if field in opts.stats_range_fields:
286 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
287
288 if type(expecteds) is list:
289 for (b, expected, dev) in expecteds:
290 if b:
291 opts.stats_range_fields[field] = (expected, dev)
292 return
293 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
294
295 else:
296 (expected, dev) = expecteds
297 opts.stats_range_fields[field] = (expected, dev)
298
299 def compiler_stats_num_field( field, expecteds ):
300 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
301
302 def _compiler_stats_num_field( name, opts, field, expecteds ):
303 if field in opts.compiler_stats_range_fields:
304 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
305
306 # Compiler performance numbers change when debugging is on, making the results
307 # useless and confusing. Therefore, skip if debugging is on.
308 if compiler_debugged():
309 skip(name, opts)
310
311 for (b, expected, dev) in expecteds:
312 if b:
313 opts.compiler_stats_range_fields[field] = (expected, dev)
314 return
315
316 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
317
318 # -----
319
320 def when(b, f):
321 # When list_brokens is on, we want to see all expect_broken calls,
322 # so we always do f
323 if b or config.list_broken:
324 return f
325 else:
326 return normal
327
328 def unless(b, f):
329 return when(not b, f)
330
331 def doing_ghci():
332 return 'ghci' in config.run_ways
333
334 def ghci_dynamic( ):
335 return config.ghc_dynamic
336
337 def fast():
338 return config.fast
339
340 def platform( plat ):
341 return config.platform == plat
342
343 def opsys( os ):
344 return config.os == os
345
346 def arch( arch ):
347 return config.arch == arch
348
349 def wordsize( ws ):
350 return config.wordsize == str(ws)
351
352 def msys( ):
353 return config.msys
354
355 def cygwin( ):
356 return config.cygwin
357
358 def have_vanilla( ):
359 return config.have_vanilla
360
361 def have_dynamic( ):
362 return config.have_dynamic
363
364 def have_profiling( ):
365 return config.have_profiling
366
367 def in_tree_compiler( ):
368 return config.in_tree_compiler
369
370 def compiler_type( compiler ):
371 return config.compiler_type == compiler
372
373 def compiler_lt( compiler, version ):
374 return config.compiler_type == compiler and \
375 version_lt(config.compiler_version, version)
376
377 def compiler_le( compiler, version ):
378 return config.compiler_type == compiler and \
379 version_le(config.compiler_version, version)
380
381 def compiler_gt( compiler, version ):
382 return config.compiler_type == compiler and \
383 version_gt(config.compiler_version, version)
384
385 def compiler_ge( compiler, version ):
386 return config.compiler_type == compiler and \
387 version_ge(config.compiler_version, version)
388
389 def unregisterised( ):
390 return config.unregisterised
391
392 def compiler_profiled( ):
393 return config.compiler_profiled
394
395 def compiler_debugged( ):
396 return config.compiler_debugged
397
398 def tag( t ):
399 return t in config.compiler_tags
400
401 # ---
402
403 def namebase( nb ):
404 return lambda opts, nb=nb: _namebase(opts, nb)
405
406 def _namebase( opts, nb ):
407 opts.with_namebase = nb
408
409 # ---
410
411 def high_memory_usage(name, opts):
412 opts.alone = True
413
414 # If a test is for a multi-CPU race, then running the test alone
415 # increases the chance that we'll actually see it.
416 def multi_cpu_race(name, opts):
417 opts.alone = True
418
419 # ---
420 def literate( name, opts ):
421 opts.literate = 1;
422
423 def c_src( name, opts ):
424 opts.c_src = 1;
425
426 def objc_src( name, opts ):
427 opts.objc_src = 1;
428
429 def objcpp_src( name, opts ):
430 opts.objcpp_src = 1;
431
432 def cmm_src( name, opts ):
433 opts.cmm_src = 1;
434
435 def outputdir( odir ):
436 return lambda name, opts, d=odir: _outputdir(name, opts, d)
437
438 def _outputdir( name, opts, odir ):
439 opts.outputdir = odir;
440
441 # ----
442
443 def pre_cmd( cmd ):
444 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
445
446 def _pre_cmd( name, opts, cmd ):
447 opts.pre_cmd = cmd
448
449 # ----
450
451 def clean_cmd( cmd ):
452 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
453
454 def _clean_cmd( name, opts, cmd ):
455 opts.clean_cmd = cmd
456
457 # ----
458
459 def cmd_prefix( prefix ):
460 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
461
462 def _cmd_prefix( name, opts, prefix ):
463 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
464
465 # ----
466
467 def cmd_wrapper( fun ):
468 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
469
470 def _cmd_wrapper( name, opts, fun ):
471 opts.cmd_wrapper = fun
472
473 # ----
474
475 def compile_cmd_prefix( prefix ):
476 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
477
478 def _compile_cmd_prefix( name, opts, prefix ):
479 opts.compile_cmd_prefix = prefix
480
481 # ----
482
483 def check_stdout( f ):
484 return lambda name, opts, f=f: _check_stdout(name, opts, f)
485
486 def _check_stdout( name, opts, f ):
487 opts.check_stdout = f
488
489 # ----
490
491 def normalise_slashes( name, opts ):
492 opts.extra_normaliser = normalise_slashes_
493
494 def normalise_exe( name, opts ):
495 opts.extra_normaliser = normalise_exe_
496
497 def normalise_fun( *fs ):
498 return lambda name, opts: _normalise_fun(name, opts, fs)
499
500 def _normalise_fun( name, opts, *fs ):
501 opts.extra_normaliser = join_normalisers(fs)
502
503 def normalise_errmsg_fun( *fs ):
504 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
505
506 def _normalise_errmsg_fun( name, opts, *fs ):
507 opts.extra_errmsg_normaliser = join_normalisers(fs)
508
509 def join_normalisers(*a):
510 """
511 Compose functions, flattening sequences.
512
513 join_normalisers(f1,[f2,f3],f4)
514
515 is the same as
516
517 lambda x: f1(f2(f3(f4(x))))
518 """
519
520 def flatten(l):
521 """
522 Taken from http://stackoverflow.com/a/2158532/946226
523 """
524 for el in l:
525 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
526 for sub in flatten(el):
527 yield sub
528 else:
529 yield el
530
531 a = flatten(a)
532
533 fn = lambda x:x # identity function
534 for f in a:
535 assert callable(f)
536 fn = lambda x,f=f,fn=fn: fn(f(x))
537 return fn
538
539 # ----
540 # Function for composing two opt-fns together
541
542 def executeSetups(fs, name, opts):
543 if type(fs) is list:
544 # If we have a list of setups, then execute each one
545 for f in fs:
546 executeSetups(f, name, opts)
547 else:
548 # fs is a single function, so just apply it
549 fs(name, opts)
550
551 # -----------------------------------------------------------------------------
552 # The current directory of tests
553
554 def newTestDir( dir ):
555 global thisdir_settings
556 # reset the options for this test directory
557 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
558
559 def _newTestDir( name, opts, dir ):
560 opts.testdir = dir
561 opts.compiler_always_flags = config.compiler_always_flags
562
563 # -----------------------------------------------------------------------------
564 # Actually doing tests
565
566 parallelTests = []
567 aloneTests = []
568 allTestNames = set([])
569
570 def runTest (opts, name, func, args):
571 ok = 0
572
573 if config.use_threads:
574 t.thread_pool.acquire()
575 try:
576 while config.threads<(t.running_threads+1):
577 t.thread_pool.wait()
578 t.running_threads = t.running_threads+1
579 ok=1
580 t.thread_pool.release()
581 thread.start_new_thread(test_common_thread, (name, opts, func, args))
582 except:
583 if not ok:
584 t.thread_pool.release()
585 else:
586 test_common_work (name, opts, func, args)
587
588 # name :: String
589 # setup :: TestOpts -> IO ()
590 def test (name, setup, func, args):
591 global aloneTests
592 global parallelTests
593 global allTestNames
594 global thisdir_settings
595 if name in allTestNames:
596 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
597 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
598 framework_fail(name, 'bad_name', 'This test has an invalid name')
599
600 # Make a deep copy of the default_testopts, as we need our own copy
601 # of any dictionaries etc inside it. Otherwise, if one test modifies
602 # them, all tests will see the modified version!
603 myTestOpts = copy.deepcopy(default_testopts)
604
605 executeSetups([thisdir_settings, setup], name, myTestOpts)
606
607 thisTest = lambda : runTest(myTestOpts, name, func, args)
608 if myTestOpts.alone:
609 aloneTests.append(thisTest)
610 else:
611 parallelTests.append(thisTest)
612 allTestNames.add(name)
613
614 if config.use_threads:
615 def test_common_thread(name, opts, func, args):
616 t.lock.acquire()
617 try:
618 test_common_work(name,opts,func,args)
619 finally:
620 t.lock.release()
621 t.thread_pool.acquire()
622 t.running_threads = t.running_threads - 1
623 t.thread_pool.notify()
624 t.thread_pool.release()
625
626 def get_package_cache_timestamp():
627 if config.package_conf_cache_file == '':
628 return 0.0
629 else:
630 try:
631 return os.stat(config.package_conf_cache_file).st_mtime
632 except:
633 return 0.0
634
635
636 def test_common_work (name, opts, func, args):
637 try:
638 t.total_tests = t.total_tests+1
639 setLocalTestOpts(opts)
640
641 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
642
643 # All the ways we might run this test
644 if func == compile or func == multimod_compile:
645 all_ways = config.compile_ways
646 elif func == compile_and_run or func == multimod_compile_and_run:
647 all_ways = config.run_ways
648 elif func == ghci_script:
649 if 'ghci' in config.run_ways:
650 all_ways = ['ghci']
651 else:
652 all_ways = []
653 else:
654 all_ways = ['normal']
655
656 # A test itself can request extra ways by setting opts.extra_ways
657 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
658
659 t.total_test_cases = t.total_test_cases + len(all_ways)
660
661 ok_way = lambda way: \
662 not getTestOpts().skip \
663 and (config.only == [] or name in config.only) \
664 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
665 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
666 and (not (config.skip_perf_tests and isStatsTest())) \
667 and way not in getTestOpts().omit_ways
668
669 # Which ways we are asked to skip
670 do_ways = list(filter (ok_way,all_ways))
671
672 # In fast mode, we skip all but one way
673 if config.fast and len(do_ways) > 0:
674 do_ways = [do_ways[0]]
675
676 if not config.clean_only:
677 # Run the required tests...
678 for way in do_ways:
679 if stopping():
680 break
681 do_test (name, way, func, args)
682
683 for way in all_ways:
684 if way not in do_ways:
685 skiptest (name,way)
686
687 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
688 pretest_cleanup(name)
689 clean([name + suff for suff in [
690 '', '.exe', '.exe.manifest', '.genscript',
691 '.stderr.normalised', '.stdout.normalised',
692 '.run.stderr.normalised', '.run.stdout.normalised',
693 '.comp.stderr.normalised', '.comp.stdout.normalised',
694 '.interp.stderr.normalised', '.interp.stdout.normalised',
695 '.stats', '.comp.stats',
696 '.hi', '.o', '.prof', '.exe.prof', '.hc',
697 '_stub.h', '_stub.c', '_stub.o',
698 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
699
700 if func == multi_compile or func == multi_compile_fail:
701 extra_mods = args[1]
702 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
703 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
704
705
706 clean(getTestOpts().clean_files)
707
708 if getTestOpts().outputdir != None:
709 odir = in_testdir(getTestOpts().outputdir)
710 try:
711 shutil.rmtree(odir)
712 except:
713 pass
714
715 try:
716 shutil.rmtree(in_testdir('.hpc.' + name))
717 except:
718 pass
719
720 try:
721 cleanCmd = getTestOpts().clean_cmd
722 if cleanCmd != None:
723 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
724 if result != 0:
725 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
726 except:
727 framework_fail(name, 'cleaning', 'clean-command exception')
728
729 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
730
731 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
732 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
733
734 try:
735 for f in files_written[name]:
736 if os.path.exists(f):
737 try:
738 if not f in files_written_not_removed[name]:
739 files_written_not_removed[name].append(f)
740 except:
741 files_written_not_removed[name] = [f]
742 except:
743 pass
744 except Exception as e:
745 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
746
747 def clean(strs):
748 for str in strs:
749 for name in glob.glob(in_testdir(str)):
750 clean_full_path(name)
751
752 def clean_full_path(name):
753 try:
754 # Remove files...
755 os.remove(name)
756 except OSError as e1:
757 try:
758 # ... and empty directories
759 os.rmdir(name)
760 except OSError as e2:
761 # We don't want to fail here, but we do want to know
762 # what went wrong, so print out the exceptions.
763 # ENOENT isn't a problem, though, as we clean files
764 # that don't necessarily exist.
765 if e1.errno != errno.ENOENT:
766 print(e1)
767 if e2.errno != errno.ENOENT:
768 print(e2)
769
770 def do_test(name, way, func, args):
771 full_name = name + '(' + way + ')'
772
773 try:
774 if_verbose(2, "=====> %s %d of %d %s " % \
775 (full_name, t.total_tests, len(allTestNames), \
776 [t.n_unexpected_passes, \
777 t.n_unexpected_failures, \
778 t.n_framework_failures]))
779
780 if config.use_threads:
781 t.lock.release()
782
783 try:
784 preCmd = getTestOpts().pre_cmd
785 if preCmd != None:
786 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
787 if result != 0:
788 framework_fail(name, way, 'pre-command failed: ' + str(result))
789 except:
790 framework_fail(name, way, 'pre-command exception')
791
792 try:
793 result = func(*[name,way] + args)
794 finally:
795 if config.use_threads:
796 t.lock.acquire()
797
798 if getTestOpts().expect != 'pass' and \
799 getTestOpts().expect != 'fail' and \
800 getTestOpts().expect != 'missing-lib':
801 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
802
803 try:
804 passFail = result['passFail']
805 except:
806 passFail = 'No passFail found'
807
808 if passFail == 'pass':
809 if getTestOpts().expect == 'pass' \
810 and way not in getTestOpts().expect_fail_for:
811 t.n_expected_passes = t.n_expected_passes + 1
812 if name in t.expected_passes:
813 t.expected_passes[name].append(way)
814 else:
815 t.expected_passes[name] = [way]
816 else:
817 if_verbose(1, '*** unexpected pass for %s' % full_name)
818 t.n_unexpected_passes = t.n_unexpected_passes + 1
819 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
820 elif passFail == 'fail':
821 if getTestOpts().expect == 'pass' \
822 and way not in getTestOpts().expect_fail_for:
823 if_verbose(1, '*** unexpected failure for %s' % full_name)
824 t.n_unexpected_failures = t.n_unexpected_failures + 1
825 reason = result['reason']
826 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
827 else:
828 if getTestOpts().expect == 'missing-lib':
829 t.n_missing_libs = t.n_missing_libs + 1
830 if name in t.missing_libs:
831 t.missing_libs[name].append(way)
832 else:
833 t.missing_libs[name] = [way]
834 else:
835 t.n_expected_failures = t.n_expected_failures + 1
836 if name in t.expected_failures:
837 t.expected_failures[name].append(way)
838 else:
839 t.expected_failures[name] = [way]
840 else:
841 framework_fail(name, way, 'bad result ' + passFail)
842 except KeyboardInterrupt:
843 stopNow()
844 except:
845 framework_fail(name, way, 'do_test exception')
846 traceback.print_exc()
847
848 def addPassingTestInfo (testInfos, directory, name, way):
849 directory = re.sub('^\\.[/\\\\]', '', directory)
850
851 if not directory in testInfos:
852 testInfos[directory] = {}
853
854 if not name in testInfos[directory]:
855 testInfos[directory][name] = []
856
857 testInfos[directory][name].append(way)
858
859 def addFailingTestInfo (testInfos, directory, name, reason, way):
860 directory = re.sub('^\\.[/\\\\]', '', directory)
861
862 if not directory in testInfos:
863 testInfos[directory] = {}
864
865 if not name in testInfos[directory]:
866 testInfos[directory][name] = {}
867
868 if not reason in testInfos[directory][name]:
869 testInfos[directory][name][reason] = []
870
871 testInfos[directory][name][reason].append(way)
872
873 def skiptest (name, way):
874 # print 'Skipping test \"', name, '\"'
875 t.n_tests_skipped = t.n_tests_skipped + 1
876 if name in t.tests_skipped:
877 t.tests_skipped[name].append(way)
878 else:
879 t.tests_skipped[name] = [way]
880
881 def framework_fail( name, way, reason ):
882 full_name = name + '(' + way + ')'
883 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
884 t.n_framework_failures = t.n_framework_failures + 1
885 if name in t.framework_failures:
886 t.framework_failures[name].append(way)
887 else:
888 t.framework_failures[name] = [way]
889
890 def badResult(result):
891 try:
892 if result['passFail'] == 'pass':
893 return False
894 return True
895 except:
896 return True
897
898 def passed():
899 return {'passFail': 'pass'}
900
901 def failBecause(reason):
902 return {'passFail': 'fail', 'reason': reason}
903
904 # -----------------------------------------------------------------------------
905 # Generic command tests
906
907 # A generic command test is expected to run and exit successfully.
908 #
909 # The expected exit code can be changed via exit_code() as normal, and
910 # the expected stdout/stderr are stored in <testname>.stdout and
911 # <testname>.stderr. The output of the command can be ignored
912 # altogether by using run_command_ignore_output instead of
913 # run_command.
914
915 def run_command( name, way, cmd ):
916 return simple_run( name, '', cmd, '' )
917
918 # -----------------------------------------------------------------------------
919 # GHCi tests
920
921 def ghci_script_without_flag(flag):
922 def apply(name, way, script):
923 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
924 return ghci_script_override_default_flags(overrides)(name, way, script)
925
926 return apply
927
928 def ghci_script_override_default_flags(overrides):
929 def apply(name, way, script):
930 return ghci_script(name, way, script, overrides)
931
932 return apply
933
934 def ghci_script( name, way, script, override_flags = None ):
935 # Use overriden default flags when given
936 if override_flags:
937 default_flags = override_flags
938 else:
939 default_flags = getTestOpts().compiler_always_flags
940
941 # filter out -fforce-recomp from compiler_always_flags, because we're
942 # actually testing the recompilation behaviour in the GHCi tests.
943 flags = [f for f in default_flags if f != '-fforce-recomp']
944 flags.append(getTestOpts().extra_hc_opts)
945 if getTestOpts().outputdir != None:
946 flags.extend(["-outputdir", getTestOpts().outputdir])
947
948 # We pass HC and HC_OPTS as environment variables, so that the
949 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
950 cmd = "HC='" + config.compiler + "' " + \
951 "HC_OPTS='" + ' '.join(flags) + "' " + \
952 "'" + config.compiler + "'" + \
953 ' --interactive -v0 -ignore-dot-ghci ' + \
954 ' '.join(flags)
955
956 getTestOpts().stdin = script
957 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
958
959 # -----------------------------------------------------------------------------
960 # Compile-only tests
961
962 def compile_override_default_flags(overrides):
963 def apply(name, way, extra_opts):
964 return do_compile(name, way, 0, '', [], extra_opts, overrides)
965
966 return apply
967
968 def compile_fail_override_default_flags(overrides):
969 def apply(name, way, extra_opts):
970 return do_compile(name, way, 1, '', [], extra_opts, overrides)
971
972 return apply
973
974 def compile_without_flag(flag):
975 def apply(name, way, extra_opts):
976 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
977 return compile_override_default_flags(overrides)(name, way, extra_opts)
978
979 return apply
980
981 def compile_fail_without_flag(flag):
982 def apply(name, way, extra_opts):
983 overrides = filter(lambda f: f != flag, getTestOpts().compiler_always_flags)
984 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
985
986 return apply
987
988 def compile( name, way, extra_hc_opts ):
989 return do_compile( name, way, 0, '', [], extra_hc_opts )
990
991 def compile_fail( name, way, extra_hc_opts ):
992 return do_compile( name, way, 1, '', [], extra_hc_opts )
993
994 def multimod_compile( name, way, top_mod, extra_hc_opts ):
995 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
996
997 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
998 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
999
1000 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1001 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1002
1003 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1004 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1005
1006 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1007 # print 'Compile only, extra args = ', extra_hc_opts
1008 pretest_cleanup(name)
1009
1010 result = extras_build( way, extra_mods, extra_hc_opts )
1011 if badResult(result):
1012 return result
1013 extra_hc_opts = result['hc_opts']
1014
1015 force = 0
1016 if extra_mods:
1017 force = 1
1018 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1019
1020 if badResult(result):
1021 return result
1022
1023 # the actual stderr should always match the expected, regardless
1024 # of whether we expected the compilation to fail or not (successful
1025 # compilations may generate warnings).
1026
1027 if getTestOpts().with_namebase == None:
1028 namebase = name
1029 else:
1030 namebase = getTestOpts().with_namebase
1031
1032 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1033 actual_stderr_file = qualify(name, 'comp.stderr')
1034
1035 if not compare_outputs('stderr',
1036 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1037 normalise_errmsg,
1038 normalise_whitespace),
1039 expected_stderr_file, actual_stderr_file):
1040 return failBecause('stderr mismatch')
1041
1042 # no problems found, this test passed
1043 return passed()
1044
1045 def compile_cmp_asm( name, way, extra_hc_opts ):
1046 print('Compile only, extra args = ', extra_hc_opts)
1047 pretest_cleanup(name)
1048 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1049
1050 if badResult(result):
1051 return result
1052
1053 # the actual stderr should always match the expected, regardless
1054 # of whether we expected the compilation to fail or not (successful
1055 # compilations may generate warnings).
1056
1057 if getTestOpts().with_namebase == None:
1058 namebase = name
1059 else:
1060 namebase = getTestOpts().with_namebase
1061
1062 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
1063 actual_asm_file = qualify(name, 's')
1064
1065 if not compare_outputs('asm', join_normalisers(normalise_errmsg, normalise_asm), \
1066 expected_asm_file, actual_asm_file):
1067 return failBecause('asm mismatch')
1068
1069 # no problems found, this test passed
1070 return passed()
1071
1072 # -----------------------------------------------------------------------------
1073 # Compile-and-run tests
1074
1075 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1076 # print 'Compile and run, extra args = ', extra_hc_opts
1077 pretest_cleanup(name)
1078
1079 result = extras_build( way, extra_mods, extra_hc_opts )
1080 if badResult(result):
1081 return result
1082 extra_hc_opts = result['hc_opts']
1083
1084 if way == 'ghci': # interpreted...
1085 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1086 else: # compiled...
1087 force = 0
1088 if extra_mods:
1089 force = 1
1090
1091 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1092 if badResult(result):
1093 return result
1094
1095 cmd = './' + name;
1096
1097 # we don't check the compiler's stderr for a compile-and-run test
1098 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1099
1100 def compile_and_run( name, way, extra_hc_opts ):
1101 return compile_and_run__( name, way, '', [], extra_hc_opts)
1102
1103 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1104 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1105
1106 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1107 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1108
1109 def stats( name, way, stats_file ):
1110 opts = getTestOpts()
1111 return checkStats(name, way, stats_file, opts.stats_range_fields)
1112
1113 # -----------------------------------------------------------------------------
1114 # Check -t stats info
1115
1116 def checkStats(name, way, stats_file, range_fields):
1117 full_name = name + '(' + way + ')'
1118
1119 result = passed()
1120 if len(range_fields) > 0:
1121 f = open(in_testdir(stats_file))
1122 contents = f.read()
1123 f.close()
1124
1125 for (field, (expected, dev)) in range_fields.items():
1126 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1127 if m == None:
1128 print('Failed to find field: ', field)
1129 result = failBecause('no such stats field')
1130 val = int(m.group(1))
1131
1132 lowerBound = trunc( expected * ((100 - float(dev))/100))
1133 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1134
1135 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1136
1137 if val < lowerBound:
1138 print(field, 'value is too low:')
1139 print('(If this is because you have improved GHC, please')
1140 print('update the test so that GHC doesn\'t regress again)')
1141 result = failBecause('stat too good')
1142 if val > upperBound:
1143 print(field, 'value is too high:')
1144 result = failBecause('stat not good enough')
1145
1146 if val < lowerBound or val > upperBound or config.verbose >= 4:
1147 valStr = str(val)
1148 valLen = len(valStr)
1149 expectedStr = str(expected)
1150 expectedLen = len(expectedStr)
1151 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1152
1153 def display(descr, val, extra):
1154 print(descr, str(val).rjust(length), extra)
1155
1156 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1157 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1158 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1159 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1160 if val != expected:
1161 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1162
1163 return result
1164
1165 # -----------------------------------------------------------------------------
1166 # Build a single-module program
1167
1168 def extras_build( way, extra_mods, extra_hc_opts ):
1169 for modopts in extra_mods:
1170 mod, opts = modopts
1171 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1172 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1173 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1174 if badResult(result):
1175 return result
1176
1177 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1178
1179
1180 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1181 opts = getTestOpts()
1182 errname = add_suffix(name, 'comp.stderr')
1183 rm_no_fail( qualify(errname, '') )
1184
1185 if top_mod != '':
1186 srcname = top_mod
1187 rm_no_fail( qualify(name, '') )
1188 base, suf = os.path.splitext(top_mod)
1189 rm_no_fail( qualify(base, '') )
1190 rm_no_fail( qualify(base, 'exe') )
1191 elif addsuf:
1192 srcname = add_hs_lhs_suffix(name)
1193 rm_no_fail( qualify(name, '') )
1194 else:
1195 srcname = name
1196 rm_no_fail( qualify(name, 'o') )
1197
1198 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1199
1200 to_do = ''
1201 if top_mod != '':
1202 to_do = '--make '
1203 if link:
1204 to_do = to_do + '-o ' + name
1205 elif link:
1206 to_do = '-o ' + name
1207 elif opts.compile_to_hc:
1208 to_do = '-C'
1209 else:
1210 to_do = '-c' # just compile
1211
1212 stats_file = name + '.comp.stats'
1213 if len(opts.compiler_stats_range_fields) > 0:
1214 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1215
1216 # Required by GHC 7.3+, harmless for earlier versions:
1217 if (getTestOpts().c_src or
1218 getTestOpts().objc_src or
1219 getTestOpts().objcpp_src or
1220 getTestOpts().cmm_src):
1221 extra_hc_opts += ' -no-hs-main '
1222
1223 if getTestOpts().compile_cmd_prefix == '':
1224 cmd_prefix = ''
1225 else:
1226 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1227
1228 if override_flags:
1229 comp_flags = copy.copy(override_flags)
1230 else:
1231 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1232
1233 if noforce:
1234 comp_flags = [f for f in comp_flags if f != '-fforce-recomp']
1235 if getTestOpts().outputdir != None:
1236 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1237
1238 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1239 + config.compiler + "' " \
1240 + ' '.join(comp_flags) + ' ' \
1241 + to_do + ' ' + srcname + ' ' \
1242 + ' '.join(config.way_flags(name)[way]) + ' ' \
1243 + extra_hc_opts + ' ' \
1244 + opts.extra_hc_opts + ' ' \
1245 + '>' + errname + ' 2>&1'
1246
1247 result = runCmdFor(name, cmd)
1248
1249 if result != 0 and not should_fail:
1250 actual_stderr = qualify(name, 'comp.stderr')
1251 if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:')
1252 if_verbose_dump(1,actual_stderr)
1253
1254 # ToDo: if the sub-shell was killed by ^C, then exit
1255
1256 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1257
1258 if badResult(statsResult):
1259 return statsResult
1260
1261 if should_fail:
1262 if result == 0:
1263 return failBecause('exit code 0')
1264 else:
1265 if result != 0:
1266 return failBecause('exit code non-0')
1267
1268 return passed()
1269
1270 # -----------------------------------------------------------------------------
1271 # Run a program and check its output
1272 #
1273 # If testname.stdin exists, route input from that, else
1274 # from /dev/null. Route output to testname.run.stdout and
1275 # testname.run.stderr. Returns the exit code of the run.
1276
1277 def simple_run( name, way, prog, args ):
1278 opts = getTestOpts()
1279
1280 # figure out what to use for stdin
1281 if opts.stdin != '':
1282 use_stdin = opts.stdin
1283 else:
1284 stdin_file = add_suffix(name, 'stdin')
1285 if os.path.exists(in_testdir(stdin_file)):
1286 use_stdin = stdin_file
1287 else:
1288 use_stdin = '/dev/null'
1289
1290 run_stdout = add_suffix(name,'run.stdout')
1291 run_stderr = add_suffix(name,'run.stderr')
1292
1293 rm_no_fail(qualify(name,'run.stdout'))
1294 rm_no_fail(qualify(name,'run.stderr'))
1295 rm_no_fail(qualify(name, 'hp'))
1296 rm_no_fail(qualify(name,'ps'))
1297 rm_no_fail(qualify(name, 'prof'))
1298
1299 my_rts_flags = rts_flags(way)
1300
1301 stats_file = name + '.stats'
1302 if len(opts.stats_range_fields) > 0:
1303 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1304
1305 if opts.no_stdin:
1306 stdin_comes_from = ''
1307 else:
1308 stdin_comes_from = ' <' + use_stdin
1309
1310 if opts.combined_output:
1311 redirection = ' >' + run_stdout \
1312 + ' 2>&1'
1313 else:
1314 redirection = ' >' + run_stdout \
1315 + ' 2>' + run_stderr
1316
1317 cmd = prog + ' ' + args + ' ' \
1318 + my_rts_flags + ' ' \
1319 + stdin_comes_from \
1320 + redirection
1321
1322 if opts.cmd_wrapper != None:
1323 cmd = opts.cmd_wrapper(cmd);
1324
1325 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1326
1327 # run the command
1328 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1329
1330 exit_code = result >> 8
1331 signal = result & 0xff
1332
1333 # check the exit code
1334 if exit_code != opts.exit_code:
1335 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1336 dump_stdout(name)
1337 dump_stderr(name)
1338 return failBecause('bad exit code')
1339
1340 check_hp = my_rts_flags.find("-h") != -1
1341 check_prof = my_rts_flags.find("-p") != -1
1342
1343 if not opts.ignore_output:
1344 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1345 bad_stdout = not check_stdout_ok(name)
1346 if bad_stderr:
1347 return failBecause('bad stderr')
1348 if bad_stdout:
1349 return failBecause('bad stdout')
1350 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1351 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1352 return failBecause('bad heap profile')
1353 if check_prof and not check_prof_ok(name):
1354 return failBecause('bad profile')
1355
1356 return checkStats(name, way, stats_file, opts.stats_range_fields)
1357
1358 def rts_flags(way):
1359 if (way == ''):
1360 return ''
1361 else:
1362 args = config.way_rts_flags[way]
1363
1364 if args == []:
1365 return ''
1366 else:
1367 return '+RTS ' + ' '.join(args) + ' -RTS'
1368
1369 # -----------------------------------------------------------------------------
1370 # Run a program in the interpreter and check its output
1371
1372 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1373 outname = add_suffix(name, 'interp.stdout')
1374 errname = add_suffix(name, 'interp.stderr')
1375 rm_no_fail(outname)
1376 rm_no_fail(errname)
1377 rm_no_fail(name)
1378
1379 if (top_mod == ''):
1380 srcname = add_hs_lhs_suffix(name)
1381 else:
1382 srcname = top_mod
1383
1384 scriptname = add_suffix(name, 'genscript')
1385 qscriptname = in_testdir(scriptname)
1386 rm_no_fail(qscriptname)
1387
1388 delimiter = '===== program output begins here\n'
1389
1390 script = open(qscriptname, 'w')
1391 if not compile_only:
1392 # set the prog name and command-line args to match the compiled
1393 # environment.
1394 script.write(':set prog ' + name + '\n')
1395 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1396 # Add marker lines to the stdout and stderr output files, so we
1397 # can separate GHCi's output from the program's.
1398 script.write(':! echo ' + delimiter)
1399 script.write(':! echo 1>&2 ' + delimiter)
1400 # Set stdout to be line-buffered to match the compiled environment.
1401 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1402 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1403 # in the event of an exception as for the compiled program.
1404 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1405 script.close()
1406
1407 # figure out what to use for stdin
1408 if getTestOpts().stdin != '':
1409 stdin_file = in_testdir(getTestOpts().stdin)
1410 else:
1411 stdin_file = qualify(name, 'stdin')
1412
1413 if os.path.exists(stdin_file):
1414 stdin = open(stdin_file, 'r')
1415 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1416
1417 script.close()
1418
1419 flags = copy.copy(getTestOpts().compiler_always_flags)
1420 if getTestOpts().outputdir != None:
1421 flags.extend(["-outputdir", getTestOpts().outputdir])
1422
1423 cmd = "'" + config.compiler + "' " \
1424 + ' '.join(flags) + ' ' \
1425 + srcname + ' ' \
1426 + ' '.join(config.way_flags(name)[way]) + ' ' \
1427 + extra_hc_opts + ' ' \
1428 + getTestOpts().extra_hc_opts + ' ' \
1429 + '<' + scriptname + ' 1>' + outname + ' 2>' + errname
1430
1431 if getTestOpts().cmd_wrapper != None:
1432 cmd = getTestOpts().cmd_wrapper(cmd);
1433
1434 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1435
1436 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1437
1438 exit_code = result >> 8
1439 signal = result & 0xff
1440
1441 # split the stdout into compilation/program output
1442 split_file(in_testdir(outname), delimiter,
1443 qualify(name, 'comp.stdout'),
1444 qualify(name, 'run.stdout'))
1445 split_file(in_testdir(errname), delimiter,
1446 qualify(name, 'comp.stderr'),
1447 qualify(name, 'run.stderr'))
1448
1449 # check the exit code
1450 if exit_code != getTestOpts().exit_code:
1451 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1452 dump_stdout(name)
1453 dump_stderr(name)
1454 return failBecause('bad exit code')
1455
1456 # ToDo: if the sub-shell was killed by ^C, then exit
1457
1458 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1459 check_stdout_ok(name)):
1460 return passed()
1461 else:
1462 return failBecause('bad stdout or stderr')
1463
1464
1465 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1466 infile = open(in_fn)
1467 out1 = open(out1_fn, 'w')
1468 out2 = open(out2_fn, 'w')
1469
1470 line = infile.readline()
1471 line = re.sub('\r', '', line) # ignore Windows EOL
1472 while (re.sub('^\s*','',line) != delimiter and line != ''):
1473 out1.write(line)
1474 line = infile.readline()
1475 line = re.sub('\r', '', line)
1476 out1.close()
1477
1478 line = infile.readline()
1479 while (line != ''):
1480 out2.write(line)
1481 line = infile.readline()
1482 out2.close()
1483
1484 # -----------------------------------------------------------------------------
1485 # Utils
1486
1487 def check_stdout_ok( name ):
1488 if getTestOpts().with_namebase == None:
1489 namebase = name
1490 else:
1491 namebase = getTestOpts().with_namebase
1492
1493 actual_stdout_file = qualify(name, 'run.stdout')
1494 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1495
1496 def norm(str):
1497 if platform_specific:
1498 return str
1499 else:
1500 return normalise_output(str)
1501
1502 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1503
1504 check_stdout = getTestOpts().check_stdout
1505 if check_stdout:
1506 return check_stdout(actual_stdout_file, extra_norm)
1507
1508 return compare_outputs('stdout', \
1509 extra_norm, \
1510 expected_stdout_file, actual_stdout_file)
1511
1512 def dump_stdout( name ):
1513 print('Stdout:')
1514 print(read_no_crs(qualify(name, 'run.stdout')))
1515
1516 def check_stderr_ok( name ):
1517 if getTestOpts().with_namebase == None:
1518 namebase = name
1519 else:
1520 namebase = getTestOpts().with_namebase
1521
1522 actual_stderr_file = qualify(name, 'run.stderr')
1523 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1524
1525 def norm(str):
1526 if platform_specific:
1527 return str
1528 else:
1529 return normalise_errmsg(str)
1530
1531 return compare_outputs('stderr', \
1532 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1533 expected_stderr_file, actual_stderr_file)
1534
1535 def dump_stderr( name ):
1536 print("Stderr:")
1537 print(read_no_crs(qualify(name, 'run.stderr')))
1538
1539 def read_no_crs(file):
1540 str = ''
1541 try:
1542 h = open(file)
1543 str = h.read()
1544 h.close
1545 except:
1546 # On Windows, if the program fails very early, it seems the
1547 # files stdout/stderr are redirected to may not get created
1548 pass
1549 return re.sub('\r', '', str)
1550
1551 def write_file(file, str):
1552 h = open(file, 'w')
1553 h.write(str)
1554 h.close
1555
1556 def check_hp_ok(name):
1557
1558 # do not qualify for hp2ps because we should be in the right directory
1559 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1560
1561 hp2psResult = runCmdExitCode(hp2psCmd)
1562
1563 actual_ps_file = qualify(name, 'ps')
1564
1565 if(hp2psResult == 0):
1566 if (os.path.exists(actual_ps_file)):
1567 if gs_working:
1568 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1569 if (gsResult == 0):
1570 return (True)
1571 else:
1572 print("hp2ps output for " + name + "is not valid PostScript")
1573 else: return (True) # assume postscript is valid without ghostscript
1574 else:
1575 print("hp2ps did not generate PostScript for " + name)
1576 return (False)
1577 else:
1578 print("hp2ps error when processing heap profile for " + name)
1579 return(False)
1580
1581 def check_prof_ok(name):
1582
1583 prof_file = qualify(name,'prof')
1584
1585 if not os.path.exists(prof_file):
1586 print(prof_file + " does not exist")
1587 return(False)
1588
1589 if os.path.getsize(qualify(name,'prof')) == 0:
1590 print(prof_file + " is empty")
1591 return(False)
1592
1593 if getTestOpts().with_namebase == None:
1594 namebase = name
1595 else:
1596 namebase = getTestOpts().with_namebase
1597
1598 (platform_specific, expected_prof_file) = \
1599 platform_wordsize_qualify(namebase, 'prof.sample')
1600
1601 # sample prof file is not required
1602 if not os.path.exists(expected_prof_file):
1603 return True
1604 else:
1605 return compare_outputs('prof', \
1606 join_normalisers(normalise_whitespace,normalise_prof), \
1607 expected_prof_file, prof_file)
1608
1609 # Compare expected output to actual output, and optionally accept the
1610 # new output. Returns true if output matched or was accepted, false
1611 # otherwise.
1612 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1613 if os.path.exists(expected_file):
1614 expected_raw = read_no_crs(expected_file)
1615 # print "norm:", normaliser(expected_raw)
1616 expected_str = normaliser(expected_raw)
1617 expected_file_for_diff = expected_file
1618 else:
1619 expected_str = ''
1620 expected_file_for_diff = '/dev/null'
1621
1622 actual_raw = read_no_crs(actual_file)
1623 actual_str = normaliser(actual_raw)
1624
1625 if expected_str == actual_str:
1626 return 1
1627 else:
1628 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1629
1630 if expected_file_for_diff == '/dev/null':
1631 expected_normalised_file = '/dev/null'
1632 else:
1633 expected_normalised_file = expected_file + ".normalised"
1634 write_file(expected_normalised_file, expected_str)
1635
1636 actual_normalised_file = actual_file + ".normalised"
1637 write_file(actual_normalised_file, actual_str)
1638
1639 # Ignore whitespace when diffing. We should only get to this
1640 # point if there are non-whitespace differences
1641 #
1642 # Note we are diffing the *actual* output, not the normalised
1643 # output. The normalised output may have whitespace squashed
1644 # (including newlines) so the diff would be hard to read.
1645 # This does mean that the diff might contain changes that
1646 # would be normalised away.
1647 if (config.verbose >= 1):
1648 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1649 ' ' + actual_file )
1650
1651 # If for some reason there were no non-whitespace differences,
1652 # then do a full diff
1653 if r == 0:
1654 r = os.system( 'diff -u ' + expected_file_for_diff + \
1655 ' ' + actual_file )
1656
1657 if config.accept:
1658 if_verbose(1, 'Accepting new output.')
1659 write_file(expected_file, actual_raw)
1660 return 1
1661 else:
1662 return 0
1663
1664
1665 def normalise_whitespace( str ):
1666 # Merge contiguous whitespace characters into a single space.
1667 str = re.sub('[ \t\n]+', ' ', str)
1668 return str
1669
1670 def normalise_errmsg( str ):
1671 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1672 # the colon is there because it appears in error messages; this
1673 # hacky solution is used in place of more sophisticated filename
1674 # mangling
1675 str = re.sub('([^\\s])\\.exe', '\\1', str)
1676 # normalise slashes, minimise Windows/Unix filename differences
1677 str = re.sub('\\\\', '/', str)
1678 # The inplace ghc's are called ghc-stage[123] to avoid filename
1679 # collisions, so we need to normalise that to just "ghc"
1680 str = re.sub('ghc-stage[123]', 'ghc', str)
1681 # Error messages simetimes contain integer implementation package
1682 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1683 return str
1684
1685 # normalise a .prof file, so that we can reasonably compare it against
1686 # a sample. This doesn't compare any of the actual profiling data,
1687 # only the shape of the profile and the number of entries.
1688 def normalise_prof (str):
1689 # strip everything up to the line beginning "COST CENTRE"
1690 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1691
1692 # strip results for CAFs, these tend to change unpredictably
1693 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1694
1695 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1696 # sometimes under MAIN.
1697 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1698
1699 # We have somthing like this:
1700
1701 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1702 # k Main 204 1 0.0 0.0 0.0 0.0
1703 # foo Main 205 1 0.0 0.0 0.0 0.0
1704 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1705
1706 # then we remove all the specific profiling data, leaving only the
1707 # cost centre name, module, and entries, to end up with this:
1708
1709 # MAIN MAIN 0
1710 # k Main 1
1711 # foo Main 1
1712 # foo.bar Main 1
1713
1714 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1715 return str
1716
1717 def normalise_slashes_( str ):
1718 str = re.sub('\\\\', '/', str)
1719 return str
1720
1721 def normalise_exe_( str ):
1722 str = re.sub('\.exe', '', str)
1723 return str
1724
1725 def normalise_output( str ):
1726 # Remove a .exe extension (for Windows)
1727 # This can occur in error messages generated by the program.
1728 str = re.sub('([^\\s])\\.exe', '\\1', str)
1729 return str
1730
1731 def normalise_asm( str ):
1732 lines = str.split('\n')
1733 # Only keep instructions and labels not starting with a dot.
1734 metadata = re.compile('^[ \t]*\\..*$')
1735 out = []
1736 for line in lines:
1737 # Drop metadata directives (e.g. ".type")
1738 if not metadata.match(line):
1739 line = re.sub('@plt', '', line)
1740 instr = line.lstrip().split()
1741 # Drop empty lines.
1742 if not instr:
1743 continue
1744 # Drop operands, except for call instructions.
1745 elif instr[0] == 'call':
1746 out.append(instr[0] + ' ' + instr[1])
1747 else:
1748 out.append(instr[0])
1749 out = '\n'.join(out)
1750 return out
1751
1752 def if_verbose( n, s ):
1753 if config.verbose >= n:
1754 print(s)
1755
1756 def if_verbose_dump( n, f ):
1757 if config.verbose >= n:
1758 try:
1759 print(open(f).read())
1760 except:
1761 print('')
1762
1763 def rawSystem(cmd_and_args):
1764 # We prefer subprocess.call to os.spawnv as the latter
1765 # seems to send its arguments through a shell or something
1766 # with the Windows (non-cygwin) python. An argument "a b c"
1767 # turns into three arguments ["a", "b", "c"].
1768
1769 # However, subprocess is new in python 2.4, so fall back to
1770 # using spawnv if we don't have it
1771
1772 if have_subprocess:
1773 return subprocess.call(cmd_and_args)
1774 else:
1775 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1776
1777 # Note that this doesn't handle the timeout itself; it is just used for
1778 # commands that have timeout handling built-in.
1779 def rawSystemWithTimeout(cmd_and_args):
1780 r = rawSystem(cmd_and_args)
1781 if r == 98:
1782 # The python timeout program uses 98 to signal that ^C was pressed
1783 stopNow()
1784 return r
1785
1786 # cmd is a complex command in Bourne-shell syntax
1787 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1788 # Hence it must ultimately be run by a Bourne shell
1789 #
1790 # Mostly it invokes the command wrapped in 'timeout' thus
1791 # timeout 300 'cd . && ...blah blah'
1792 # so it's timeout's job to invoke the Bourne shell
1793 #
1794 # But watch out for the case when there is no timeout program!
1795 # Then, when using the native Python, os.system will invoke the cmd shell
1796
1797 def runCmd( cmd ):
1798 if_verbose( 3, cmd )
1799 r = 0
1800 if config.os == 'mingw32':
1801 # On MinGW, we will always have timeout
1802 assert config.timeout_prog!=''
1803
1804 if config.timeout_prog != '':
1805 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1806 else:
1807 r = os.system(cmd)
1808 return r << 8
1809
1810 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1811 if_verbose( 3, cmd )
1812 r = 0
1813 if config.os == 'mingw32':
1814 # On MinGW, we will always have timeout
1815 assert config.timeout_prog!=''
1816 timeout = int(ceil(config.timeout * timeout_multiplier))
1817
1818 if config.timeout_prog != '':
1819 if config.check_files_written:
1820 fn = name + ".strace"
1821 r = rawSystemWithTimeout(
1822 ["strace", "-o", fn, "-fF",
1823 "-e", "creat,open,chdir,clone,vfork",
1824 config.timeout_prog, str(timeout), cmd])
1825 addTestFilesWritten(name, fn)
1826 rm_no_fail(fn)
1827 else:
1828 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1829 else:
1830 r = os.system(cmd)
1831 return r << 8
1832
1833 def runCmdExitCode( cmd ):
1834 return (runCmd(cmd) >> 8);
1835
1836
1837 # -----------------------------------------------------------------------------
1838 # checking for files being written to by multiple tests
1839
1840 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1841 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1842 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1843 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1844 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1845 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1846 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1847 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1848 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1849 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1850 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1851 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1852 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1853 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1854 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1855 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1856 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1857
1858 # Files that are read or written but shouldn't be:
1859 # * ghci_history shouldn't be read or written by tests
1860 # * things under package.conf.d shouldn't be written by tests
1861 bad_file_usages = {}
1862
1863 # Mapping from tests to the list of files that they write
1864 files_written = {}
1865
1866 # Mapping from tests to the list of files that they write but don't clean
1867 files_written_not_removed = {}
1868
1869 def add_bad_file_usage(name, file):
1870 try:
1871 if not file in bad_file_usages[name]:
1872 bad_file_usages[name].append(file)
1873 except:
1874 bad_file_usages[name] = [file]
1875
1876 def mkPath(curdir, path):
1877 # Given the current full directory is 'curdir', what is the full
1878 # path to 'path'?
1879 return os.path.realpath(os.path.join(curdir, path))
1880
1881 def addTestFilesWritten(name, fn):
1882 if config.use_threads:
1883 with t.lockFilesWritten:
1884 addTestFilesWrittenHelper(name, fn)
1885 else:
1886 addTestFilesWrittenHelper(name, fn)
1887
1888 def addTestFilesWrittenHelper(name, fn):
1889 started = False
1890 working_directories = {}
1891
1892 with open(fn, 'r') as f:
1893 for line in f:
1894 m_pid = re_strace_pid.match(line)
1895 if m_pid:
1896 pid = m_pid.group(1)
1897 content = m_pid.group(2)
1898 elif re_strace_unavailable.match(line):
1899 next
1900 else:
1901 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1902
1903 m_open = re_strace_open.match(content)
1904 m_chdir = re_strace_chdir.match(content)
1905 m_clone = re_strace_clone.match(content)
1906 m_vfork = re_strace_vfork.match(content)
1907
1908 if not started:
1909 working_directories[pid] = os.getcwd()
1910 started = True
1911
1912 if m_open:
1913 file = m_open.group(1)
1914 file = mkPath(working_directories[pid], file)
1915 if file.endswith("ghci_history"):
1916 add_bad_file_usage(name, file)
1917 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1918 flags = m_open.group(2).split('|')
1919 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1920 if re.match('package\.conf\.d', file):
1921 add_bad_file_usage(name, file)
1922 else:
1923 try:
1924 if not file in files_written[name]:
1925 files_written[name].append(file)
1926 except:
1927 files_written[name] = [file]
1928 elif 'O_RDONLY' in flags:
1929 pass
1930 else:
1931 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1932 elif m_chdir:
1933 # We optimistically assume that unfinished chdir's are going to succeed
1934 dir = m_chdir.group(1)
1935 working_directories[pid] = mkPath(working_directories[pid], dir)
1936 elif m_clone:
1937 working_directories[m_clone.group(2)] = working_directories[pid]
1938 elif m_vfork:
1939 working_directories[m_vfork.group(2)] = working_directories[pid]
1940 elif re_strace_open_resumed.match(content):
1941 pass
1942 elif re_strace_chdir_resumed.match(content):
1943 pass
1944 elif re_strace_vfork_unfinished.match(content):
1945 pass
1946 elif re_strace_clone_unfinished.match(content):
1947 pass
1948 elif re_strace_ignore_sigchild.match(content):
1949 pass
1950 elif re_strace_ignore_sigvtalarm.match(content):
1951 pass
1952 elif re_strace_ignore_sigint.match(content):
1953 pass
1954 elif re_strace_ignore_sigfpe.match(content):
1955 pass
1956 elif re_strace_ignore_sigsegv.match(content):
1957 pass
1958 elif re_strace_ignore_sigpipe.match(content):
1959 pass
1960 else:
1961 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1962
1963 def checkForFilesWrittenProblems(file):
1964 foundProblem = False
1965
1966 files_written_inverted = {}
1967 for t in files_written.keys():
1968 for f in files_written[t]:
1969 try:
1970 files_written_inverted[f].append(t)
1971 except:
1972 files_written_inverted[f] = [t]
1973
1974 for f in files_written_inverted.keys():
1975 if len(files_written_inverted[f]) > 1:
1976 if not foundProblem:
1977 foundProblem = True
1978 file.write("\n")
1979 file.write("\nSome files are written by multiple tests:\n")
1980 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
1981 if foundProblem:
1982 file.write("\n")
1983
1984 # -----
1985
1986 if len(files_written_not_removed) > 0:
1987 file.write("\n")
1988 file.write("\nSome files written but not removed:\n")
1989 tests = list(files_written_not_removed.keys())
1990 tests.sort()
1991 for t in tests:
1992 for f in files_written_not_removed[t]:
1993 file.write(" " + t + ": " + f + "\n")
1994 file.write("\n")
1995
1996 # -----
1997
1998 if len(bad_file_usages) > 0:
1999 file.write("\n")
2000 file.write("\nSome bad file usages:\n")
2001 tests = list(bad_file_usages.keys())
2002 tests.sort()
2003 for t in tests:
2004 for f in bad_file_usages[t]:
2005 file.write(" " + t + ": " + f + "\n")
2006 file.write("\n")
2007
2008 # -----------------------------------------------------------------------------
2009 # checking if ghostscript is available for checking the output of hp2ps
2010
2011 def genGSCmd(psfile):
2012 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2013
2014 def gsNotWorking():
2015 global gs_working
2016 print("GhostScript not available for hp2ps tests")
2017
2018 global gs_working
2019 gs_working = 0
2020 if config.have_profiling:
2021 if config.gs != '':
2022 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2023 if resultGood == 0:
2024 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
2025 if resultBad != 0:
2026 print("GhostScript available for hp2ps tests")
2027 gs_working = 1;
2028 else:
2029 gsNotWorking();
2030 else:
2031 gsNotWorking();
2032 else:
2033 gsNotWorking();
2034
2035 def rm_no_fail( file ):
2036 try:
2037 os.remove( file )
2038 finally:
2039 return
2040
2041 def add_suffix( name, suffix ):
2042 if suffix == '':
2043 return name
2044 else:
2045 return name + '.' + suffix
2046
2047 def add_hs_lhs_suffix(name):
2048 if getTestOpts().c_src:
2049 return add_suffix(name, 'c')
2050 elif getTestOpts().cmm_src:
2051 return add_suffix(name, 'cmm')
2052 elif getTestOpts().objc_src:
2053 return add_suffix(name, 'm')
2054 elif getTestOpts().objcpp_src:
2055 return add_suffix(name, 'mm')
2056 elif getTestOpts().literate:
2057 return add_suffix(name, 'lhs')
2058 else:
2059 return add_suffix(name, 'hs')
2060
2061 def replace_suffix( name, suffix ):
2062 base, suf = os.path.splitext(name)
2063 return base + '.' + suffix
2064
2065 def in_testdir( name ):
2066 return (getTestOpts().testdir + '/' + name)
2067
2068 def qualify( name, suff ):
2069 return in_testdir(add_suffix(name, suff))
2070
2071
2072 # Finding the sample output. The filename is of the form
2073 #
2074 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
2075 #
2076 # and we pick the most specific version available. The <version> is
2077 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2078 # more fine-grained control use if_compiler_lt().
2079 #
2080 def platform_wordsize_qualify( name, suff ):
2081
2082 basepath = qualify(name, suff)
2083
2084 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2085 for (platformSpecific, plat) in [(1, '-' + config.platform),
2086 (1, '-' + config.os),
2087 (0, '')]
2088 for ws in ['-ws-' + config.wordsize, '']
2089 for comp in ['-' + config.compiler_type, '']
2090 for vers in ['-' + config.compiler_maj_version, '']]
2091
2092 dir = glob.glob(basepath + '*')
2093 dir = [normalise_slashes_(d) for d in dir]
2094
2095 for (platformSpecific, f) in paths:
2096 if f in dir:
2097 return (platformSpecific,f)
2098
2099 return (0, basepath)
2100
2101 # Clean up prior to the test, so that we can't spuriously conclude
2102 # that it passed on the basis of old run outputs.
2103 def pretest_cleanup(name):
2104 if getTestOpts().outputdir != None:
2105 odir = in_testdir(getTestOpts().outputdir)
2106 try:
2107 shutil.rmtree(odir)
2108 except:
2109 pass
2110 os.mkdir(odir)
2111
2112 rm_no_fail(qualify(name,'interp.stderr'))
2113 rm_no_fail(qualify(name,'interp.stdout'))
2114 rm_no_fail(qualify(name,'comp.stderr'))
2115 rm_no_fail(qualify(name,'comp.stdout'))
2116 rm_no_fail(qualify(name,'run.stderr'))
2117 rm_no_fail(qualify(name,'run.stdout'))
2118 rm_no_fail(qualify(name,'tix'))
2119 rm_no_fail(qualify(name,'exe.tix'))
2120 # simple_build zaps the following:
2121 # rm_nofail(qualify("o"))
2122 # rm_nofail(qualify(""))
2123 # not interested in the return code
2124
2125 # -----------------------------------------------------------------------------
2126 # Return a list of all the files ending in '.T' below directories roots.
2127
2128 def findTFiles(roots):
2129 # It would be better to use os.walk, but that
2130 # gives backslashes on Windows, which trip the
2131 # testsuite later :-(
2132 return [filename for root in roots for filename in findTFiles_(root)]
2133
2134 def findTFiles_(path):
2135 if os.path.isdir(path):
2136 paths = [path + '/' + x for x in os.listdir(path)]
2137 return findTFiles(paths)
2138 elif path[-2:] == '.T':
2139 return [path]
2140 else:
2141 return []
2142
2143 # -----------------------------------------------------------------------------
2144 # Output a test summary to the specified file object
2145
2146 def summary(t, file):
2147
2148 file.write('\n')
2149 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures])
2150 file.write('OVERALL SUMMARY for test run started at '
2151 + time.strftime("%c %Z", t.start_time) + '\n'
2152 + str(datetime.timedelta(seconds=
2153 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2154 + ' spent to go through\n'
2155 + repr(t.total_tests).rjust(8)
2156 + ' total tests, which gave rise to\n'
2157 + repr(t.total_test_cases).rjust(8)
2158 + ' test cases, of which\n'
2159 + repr(t.n_tests_skipped).rjust(8)
2160 + ' were skipped\n'
2161 + '\n'
2162 + repr(t.n_missing_libs).rjust(8)
2163 + ' had missing libraries\n'
2164 + repr(t.n_expected_passes).rjust(8)
2165 + ' expected passes\n'
2166 + repr(t.n_expected_failures).rjust(8)
2167 + ' expected failures\n'
2168 + '\n'
2169 + repr(t.n_framework_failures).rjust(8)
2170 + ' caused framework failures\n'
2171 + repr(t.n_unexpected_passes).rjust(8)
2172 + ' unexpected passes\n'
2173 + repr(t.n_unexpected_failures).rjust(8)
2174 + ' unexpected failures\n'
2175 + '\n')
2176
2177 if t.n_unexpected_passes > 0:
2178 file.write('Unexpected passes:\n')
2179 printPassingTestInfosSummary(file, t.unexpected_passes)
2180
2181 if t.n_unexpected_failures > 0:
2182 file.write('Unexpected failures:\n')
2183 printFailingTestInfosSummary(file, t.unexpected_failures)
2184
2185 if config.check_files_written:
2186 checkForFilesWrittenProblems(file)
2187
2188 if stopping():
2189 file.write('WARNING: Testsuite run was terminated early\n')
2190
2191 def printUnexpectedTests(file, testInfoss):
2192 unexpected = []
2193 for testInfos in testInfoss:
2194 directories = testInfos.keys()
2195 for directory in directories:
2196 tests = list(testInfos[directory].keys())
2197 unexpected += tests
2198 if unexpected != []:
2199 file.write('Unexpected results from:\n')
2200 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2201 file.write('\n')
2202
2203 def printPassingTestInfosSummary(file, testInfos):
2204 directories = list(testInfos.keys())
2205 directories.sort()
2206 maxDirLen = max(len(x) for x in directories)
2207 for directory in directories:
2208 tests = list(testInfos[directory].keys())
2209 tests.sort()
2210 for test in tests:
2211 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2212 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2213 file.write('\n')
2214
2215 def printFailingTestInfosSummary(file, testInfos):
2216 directories = list(testInfos.keys())
2217 directories.sort()
2218 maxDirLen = max(len(d) for d in directories)
2219 for directory in directories:
2220 tests = list(testInfos[directory].keys())
2221 tests.sort()
2222 for test in tests:
2223 reasons = testInfos[directory][test].keys()
2224 for reason in reasons:
2225 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2226 ' [' + reason + ']' + \
2227 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2228 file.write('\n')
2229
2230 def getStdout(cmd):
2231 if have_subprocess:
2232 p = subprocess.Popen(cmd,
2233 stdout=subprocess.PIPE,
2234 stderr=subprocess.PIPE)
2235 (stdout, stderr) = p.communicate()
2236 r = p.wait()
2237 if r != 0:
2238 raise Exception("Command failed: " + str(cmd))
2239 if stderr != '':
2240 raise Exception("stderr from command: " + str(cmd))
2241 return stdout
2242 else:
2243 raise Exception("Need subprocess to get stdout, but don't have it")