The test runner now also works under the msys-native Python.
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20
21 have_subprocess = False
22 try:
23 import subprocess
24 have_subprocess = True
25 except:
26 print("Warning: subprocess not found, will fall back to spawnv")
27
28 from testglobals import *
29 from testutil import *
30
31 if config.use_threads:
32 import threading
33 try:
34 import thread
35 except ImportError: # Python 3
36 import _thread as thread
37
38 global wantToStop
39 wantToStop = False
40 def stopNow():
41 global wantToStop
42 wantToStop = True
43 def stopping():
44 return wantToStop
45
46 # Options valid for the current test only (these get reset to
47 # testdir_testopts after each test).
48
49 global testopts_local
50 if config.use_threads:
51 testopts_local = threading.local()
52 else:
53 class TestOpts_Local:
54 pass
55 testopts_local = TestOpts_Local()
56
57 def getTestOpts():
58 return testopts_local.x
59
60 def setLocalTestOpts(opts):
61 global testopts_local
62 testopts_local.x=opts
63
64 def isStatsTest():
65 opts = getTestOpts()
66 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
67
68
69 # This can be called at the top of a file of tests, to set default test options
70 # for the following tests.
71 def setTestOpts( f ):
72 global thisdir_settings
73 thisdir_settings = [thisdir_settings, f]
74
75 # -----------------------------------------------------------------------------
76 # Canned setup functions for common cases. eg. for a test you might say
77 #
78 # test('test001', normal, compile, [''])
79 #
80 # to run it without any options, but change it to
81 #
82 # test('test001', expect_fail, compile, [''])
83 #
84 # to expect failure for this test.
85
86 def normal( name, opts ):
87 return;
88
89 def skip( name, opts ):
90 opts.skip = 1
91
92 def expect_fail( name, opts ):
93 opts.expect = 'fail';
94
95 def reqlib( lib ):
96 return lambda name, opts, l=lib: _reqlib (name, opts, l )
97
98 # Cache the results of looking to see if we have a library or not.
99 # This makes quite a difference, especially on Windows.
100 have_lib = {}
101
102 def _reqlib( name, opts, lib ):
103 if lib in have_lib:
104 got_it = have_lib[lib]
105 else:
106 if have_subprocess:
107 # By preference we use subprocess, as the alternative uses
108 # /dev/null which mingw doesn't have.
109 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
110 stdout=subprocess.PIPE,
111 stderr=subprocess.PIPE)
112 # read from stdout and stderr to avoid blocking due to
113 # buffers filling
114 p.communicate()
115 r = p.wait()
116 else:
117 r = os.system(config.ghc_pkg + ' describe ' + lib
118 + ' > /dev/null 2> /dev/null')
119 got_it = r == 0
120 have_lib[lib] = got_it
121
122 if not got_it:
123 opts.expect = 'missing-lib'
124
125 def req_profiling( name, opts ):
126 if not config.have_profiling:
127 opts.expect = 'fail'
128
129 def req_shared_libs( name, opts ):
130 if not config.have_shared_libs:
131 opts.expect = 'fail'
132
133 def req_interp( name, opts ):
134 if not config.have_interp:
135 opts.expect = 'fail'
136
137 def req_smp( name, opts ):
138 if not config.have_smp:
139 opts.expect = 'fail'
140
141 def ignore_output( name, opts ):
142 opts.ignore_output = 1
143
144 def no_stdin( name, opts ):
145 opts.no_stdin = 1
146
147 def combined_output( name, opts ):
148 opts.combined_output = True
149
150 # -----
151
152 def expect_fail_for( ways ):
153 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
154
155 def _expect_fail_for( name, opts, ways ):
156 opts.expect_fail_for = ways
157
158 def expect_broken( bug ):
159 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
160
161 def _expect_broken( name, opts, bug ):
162 record_broken(name, opts, bug)
163 opts.expect = 'fail';
164
165 def expect_broken_for( bug, ways ):
166 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
167
168 def _expect_broken_for( name, opts, bug, ways ):
169 record_broken(name, opts, bug)
170 opts.expect_fail_for = ways
171
172 def record_broken(name, opts, bug):
173 global brokens
174 me = (bug, opts.testdir, name)
175 if not me in brokens:
176 brokens.append(me)
177
178 # -----
179
180 def omit_ways( ways ):
181 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
182
183 def _omit_ways( name, opts, ways ):
184 opts.omit_ways = ways
185
186 # -----
187
188 def only_ways( ways ):
189 return lambda name, opts, w=ways: _only_ways( name, opts, w )
190
191 def _only_ways( name, opts, ways ):
192 opts.only_ways = ways
193
194 # -----
195
196 def extra_ways( ways ):
197 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
198
199 def _extra_ways( name, opts, ways ):
200 opts.extra_ways = ways
201
202 # -----
203
204 def omit_compiler_types( compiler_types ):
205 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
206
207 def _omit_compiler_types( name, opts, compiler_types ):
208 if config.compiler_type in compiler_types:
209 opts.skip = 1
210
211 # -----
212
213 def only_compiler_types( compiler_types ):
214 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
215
216 def _only_compiler_types( name, opts, compiler_types ):
217 if config.compiler_type not in compiler_types:
218 opts.skip = 1
219
220 # -----
221
222 def set_stdin( file ):
223 return lambda name, opts, f=file: _set_stdin(name, opts, f);
224
225 def _set_stdin( name, opts, f ):
226 opts.stdin = f
227
228 # -----
229
230 def exit_code( val ):
231 return lambda name, opts, v=val: _exit_code(name, opts, v);
232
233 def _exit_code( name, opts, v ):
234 opts.exit_code = v
235
236 def signal_exit_code( val ):
237 if opsys('solaris2'):
238 return exit_code( val );
239 else:
240 # When application running on Linux receives fatal error
241 # signal, then its exit code is encoded as 128 + signal
242 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
243 # I assume that Mac OS X behaves in the same way at least Mac
244 # OS X builder behavior suggests this.
245 return exit_code( val+128 );
246
247 # -----
248
249 def timeout_multiplier( val ):
250 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
251
252 def _timeout_multiplier( name, opts, v ):
253 opts.timeout_multiplier = v
254
255 # -----
256
257 def extra_run_opts( val ):
258 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
259
260 def _extra_run_opts( name, opts, v ):
261 opts.extra_run_opts = v
262
263 # -----
264
265 def extra_hc_opts( val ):
266 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
267
268 def _extra_hc_opts( name, opts, v ):
269 opts.extra_hc_opts = v
270
271 # -----
272
273 def extra_clean( files ):
274 return lambda name, opts, v=files: _extra_clean(name, opts, v);
275
276 def _extra_clean( name, opts, v ):
277 opts.clean_files = v
278
279 # -----
280
281 def stats_num_field( field, expecteds ):
282 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
283
284 def _stats_num_field( name, opts, field, expecteds ):
285 if field in opts.stats_range_fields:
286 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
287
288 if type(expecteds) is list:
289 for (b, expected, dev) in expecteds:
290 if b:
291 opts.stats_range_fields[field] = (expected, dev)
292 return
293 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
294
295 else:
296 (expected, dev) = expecteds
297 opts.stats_range_fields[field] = (expected, dev)
298
299 def compiler_stats_num_field( field, expecteds ):
300 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
301
302 def _compiler_stats_num_field( name, opts, field, expecteds ):
303 if field in opts.compiler_stats_range_fields:
304 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
305
306 # Compiler performance numbers change when debugging is on, making the results
307 # useless and confusing. Therefore, skip if debugging is on.
308 if compiler_debugged():
309 skip(name, opts)
310
311 for (b, expected, dev) in expecteds:
312 if b:
313 opts.compiler_stats_range_fields[field] = (expected, dev)
314 return
315
316 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
317
318 # -----
319
320 def when(b, f):
321 # When list_brokens is on, we want to see all expect_broken calls,
322 # so we always do f
323 if b or config.list_broken:
324 return f
325 else:
326 return normal
327
328 def unless(b, f):
329 return when(not b, f)
330
331 def doing_ghci():
332 return 'ghci' in config.run_ways
333
334 def ghci_dynamic( ):
335 return config.ghc_dynamic
336
337 def fast():
338 return config.fast
339
340 def platform( plat ):
341 return config.platform == plat
342
343 def opsys( os ):
344 return config.os == os
345
346 def arch( arch ):
347 return config.arch == arch
348
349 def wordsize( ws ):
350 return config.wordsize == str(ws)
351
352 def msys( ):
353 return config.msys
354
355 def cygwin( ):
356 return config.cygwin
357
358 def have_vanilla( ):
359 return config.have_vanilla
360
361 def have_dynamic( ):
362 return config.have_dynamic
363
364 def have_profiling( ):
365 return config.have_profiling
366
367 def in_tree_compiler( ):
368 return config.in_tree_compiler
369
370 def compiler_type( compiler ):
371 return config.compiler_type == compiler
372
373 def compiler_lt( compiler, version ):
374 return config.compiler_type == compiler and \
375 version_lt(config.compiler_version, version)
376
377 def compiler_le( compiler, version ):
378 return config.compiler_type == compiler and \
379 version_le(config.compiler_version, version)
380
381 def compiler_gt( compiler, version ):
382 return config.compiler_type == compiler and \
383 version_gt(config.compiler_version, version)
384
385 def compiler_ge( compiler, version ):
386 return config.compiler_type == compiler and \
387 version_ge(config.compiler_version, version)
388
389 def unregisterised( ):
390 return config.unregisterised
391
392 def compiler_profiled( ):
393 return config.compiler_profiled
394
395 def compiler_debugged( ):
396 return config.compiler_debugged
397
398 def tag( t ):
399 return t in config.compiler_tags
400
401 # ---
402
403 def namebase( nb ):
404 return lambda opts, nb=nb: _namebase(opts, nb)
405
406 def _namebase( opts, nb ):
407 opts.with_namebase = nb
408
409 # ---
410
411 def high_memory_usage(name, opts):
412 opts.alone = True
413
414 # If a test is for a multi-CPU race, then running the test alone
415 # increases the chance that we'll actually see it.
416 def multi_cpu_race(name, opts):
417 opts.alone = True
418
419 # ---
420 def literate( name, opts ):
421 opts.literate = 1;
422
423 def c_src( name, opts ):
424 opts.c_src = 1;
425
426 def objc_src( name, opts ):
427 opts.objc_src = 1;
428
429 def objcpp_src( name, opts ):
430 opts.objcpp_src = 1;
431
432 def cmm_src( name, opts ):
433 opts.cmm_src = 1;
434
435 def outputdir( odir ):
436 return lambda name, opts, d=odir: _outputdir(name, opts, d)
437
438 def _outputdir( name, opts, odir ):
439 opts.outputdir = odir;
440
441 # ----
442
443 def pre_cmd( cmd ):
444 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
445
446 def _pre_cmd( name, opts, cmd ):
447 opts.pre_cmd = cmd
448
449 # ----
450
451 def clean_cmd( cmd ):
452 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
453
454 def _clean_cmd( name, opts, cmd ):
455 opts.clean_cmd = cmd
456
457 # ----
458
459 def cmd_prefix( prefix ):
460 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
461
462 def _cmd_prefix( name, opts, prefix ):
463 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
464
465 # ----
466
467 def cmd_wrapper( fun ):
468 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
469
470 def _cmd_wrapper( name, opts, fun ):
471 opts.cmd_wrapper = fun
472
473 # ----
474
475 def compile_cmd_prefix( prefix ):
476 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
477
478 def _compile_cmd_prefix( name, opts, prefix ):
479 opts.compile_cmd_prefix = prefix
480
481 # ----
482
483 def check_stdout( f ):
484 return lambda name, opts, f=f: _check_stdout(name, opts, f)
485
486 def _check_stdout( name, opts, f ):
487 opts.check_stdout = f
488
489 # ----
490
491 def normalise_slashes( name, opts ):
492 opts.extra_normaliser = normalise_slashes_
493
494 def normalise_exe( name, opts ):
495 opts.extra_normaliser = normalise_exe_
496
497 def normalise_fun( *fs ):
498 return lambda name, opts: _normalise_fun(name, opts, fs)
499
500 def _normalise_fun( name, opts, *fs ):
501 opts.extra_normaliser = join_normalisers(fs)
502
503 def normalise_errmsg_fun( *fs ):
504 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
505
506 def _normalise_errmsg_fun( name, opts, *fs ):
507 opts.extra_errmsg_normaliser = join_normalisers(fs)
508
509 def join_normalisers(*a):
510 """
511 Compose functions, flattening sequences.
512
513 join_normalisers(f1,[f2,f3],f4)
514
515 is the same as
516
517 lambda x: f1(f2(f3(f4(x))))
518 """
519
520 def flatten(l):
521 """
522 Taken from http://stackoverflow.com/a/2158532/946226
523 """
524 for el in l:
525 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
526 for sub in flatten(el):
527 yield sub
528 else:
529 yield el
530
531 a = flatten(a)
532
533 fn = lambda x:x # identity function
534 for f in a:
535 assert callable(f)
536 fn = lambda x,f=f,fn=fn: fn(f(x))
537 return fn
538
539 # ----
540 # Function for composing two opt-fns together
541
542 def executeSetups(fs, name, opts):
543 if type(fs) is list:
544 # If we have a list of setups, then execute each one
545 for f in fs:
546 executeSetups(f, name, opts)
547 else:
548 # fs is a single function, so just apply it
549 fs(name, opts)
550
551 # -----------------------------------------------------------------------------
552 # The current directory of tests
553
554 def newTestDir( dir ):
555 global thisdir_settings
556 # reset the options for this test directory
557 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
558
559 def _newTestDir( name, opts, dir ):
560 opts.testdir = dir
561 opts.compiler_always_flags = config.compiler_always_flags
562
563 # -----------------------------------------------------------------------------
564 # Actually doing tests
565
566 parallelTests = []
567 aloneTests = []
568 allTestNames = set([])
569
570 def runTest (opts, name, func, args):
571 ok = 0
572
573 if config.use_threads:
574 t.thread_pool.acquire()
575 try:
576 while config.threads<(t.running_threads+1):
577 t.thread_pool.wait()
578 t.running_threads = t.running_threads+1
579 ok=1
580 t.thread_pool.release()
581 thread.start_new_thread(test_common_thread, (name, opts, func, args))
582 except:
583 if not ok:
584 t.thread_pool.release()
585 else:
586 test_common_work (name, opts, func, args)
587
588 # name :: String
589 # setup :: TestOpts -> IO ()
590 def test (name, setup, func, args):
591 global aloneTests
592 global parallelTests
593 global allTestNames
594 global thisdir_settings
595 if name in allTestNames:
596 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
597 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
598 framework_fail(name, 'bad_name', 'This test has an invalid name')
599
600 # Make a deep copy of the default_testopts, as we need our own copy
601 # of any dictionaries etc inside it. Otherwise, if one test modifies
602 # them, all tests will see the modified version!
603 myTestOpts = copy.deepcopy(default_testopts)
604
605 executeSetups([thisdir_settings, setup], name, myTestOpts)
606
607 thisTest = lambda : runTest(myTestOpts, name, func, args)
608 if myTestOpts.alone:
609 aloneTests.append(thisTest)
610 else:
611 parallelTests.append(thisTest)
612 allTestNames.add(name)
613
614 if config.use_threads:
615 def test_common_thread(name, opts, func, args):
616 t.lock.acquire()
617 try:
618 test_common_work(name,opts,func,args)
619 finally:
620 t.lock.release()
621 t.thread_pool.acquire()
622 t.running_threads = t.running_threads - 1
623 t.thread_pool.notify()
624 t.thread_pool.release()
625
626 def get_package_cache_timestamp():
627 if config.package_conf_cache_file == '':
628 return 0.0
629 else:
630 try:
631 return os.stat(config.package_conf_cache_file).st_mtime
632 except:
633 return 0.0
634
635
636 def test_common_work (name, opts, func, args):
637 try:
638 t.total_tests = t.total_tests+1
639 setLocalTestOpts(opts)
640
641 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
642
643 # All the ways we might run this test
644 if func == compile or func == multimod_compile:
645 all_ways = config.compile_ways
646 elif func == compile_and_run or func == multimod_compile_and_run:
647 all_ways = config.run_ways
648 elif func == ghci_script:
649 if 'ghci' in config.run_ways:
650 all_ways = ['ghci']
651 else:
652 all_ways = []
653 else:
654 all_ways = ['normal']
655
656 # A test itself can request extra ways by setting opts.extra_ways
657 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
658
659 t.total_test_cases = t.total_test_cases + len(all_ways)
660
661 ok_way = lambda way: \
662 not getTestOpts().skip \
663 and (config.only == [] or name in config.only) \
664 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
665 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
666 and (not (config.skip_perf_tests and isStatsTest())) \
667 and way not in getTestOpts().omit_ways
668
669 # Which ways we are asked to skip
670 do_ways = list(filter (ok_way,all_ways))
671
672 # In fast mode, we skip all but one way
673 if config.fast and len(do_ways) > 0:
674 do_ways = [do_ways[0]]
675
676 if not config.clean_only:
677 # Run the required tests...
678 for way in do_ways:
679 if stopping():
680 break
681 do_test (name, way, func, args)
682
683 for way in all_ways:
684 if way not in do_ways:
685 skiptest (name,way)
686
687 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
688 pretest_cleanup(name)
689 clean([name + suff for suff in [
690 '', '.exe', '.exe.manifest', '.genscript',
691 '.stderr.normalised', '.stdout.normalised',
692 '.run.stderr.normalised', '.run.stdout.normalised',
693 '.comp.stderr.normalised', '.comp.stdout.normalised',
694 '.interp.stderr.normalised', '.interp.stdout.normalised',
695 '.stats', '.comp.stats',
696 '.hi', '.o', '.prof', '.exe.prof', '.hc',
697 '_stub.h', '_stub.c', '_stub.o',
698 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
699
700 if func == multi_compile or func == multi_compile_fail:
701 extra_mods = args[1]
702 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
703 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
704
705
706 clean(getTestOpts().clean_files)
707
708 if getTestOpts().outputdir != None:
709 odir = in_testdir(getTestOpts().outputdir)
710 try:
711 shutil.rmtree(odir)
712 except:
713 pass
714
715 try:
716 shutil.rmtree(in_testdir('.hpc.' + name))
717 except:
718 pass
719
720 try:
721 cleanCmd = getTestOpts().clean_cmd
722 if cleanCmd != None:
723 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
724 if result != 0:
725 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
726 except:
727 framework_fail(name, 'cleaning', 'clean-command exception')
728
729 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
730
731 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
732 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
733
734 try:
735 for f in files_written[name]:
736 if os.path.exists(f):
737 try:
738 if not f in files_written_not_removed[name]:
739 files_written_not_removed[name].append(f)
740 except:
741 files_written_not_removed[name] = [f]
742 except:
743 pass
744 except Exception as e:
745 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
746
747 def clean(strs):
748 for str in strs:
749 for name in glob.glob(in_testdir(str)):
750 clean_full_path(name)
751
752 def clean_full_path(name):
753 try:
754 # Remove files...
755 os.remove(name)
756 except OSError as e1:
757 try:
758 # ... and empty directories
759 os.rmdir(name)
760 except OSError as e2:
761 # We don't want to fail here, but we do want to know
762 # what went wrong, so print out the exceptions.
763 # ENOENT isn't a problem, though, as we clean files
764 # that don't necessarily exist.
765 if e1.errno != errno.ENOENT:
766 print(e1)
767 if e2.errno != errno.ENOENT:
768 print(e2)
769
770 def do_test(name, way, func, args):
771 full_name = name + '(' + way + ')'
772
773 try:
774 if_verbose(2, "=====> %s %d of %d %s " % \
775 (full_name, t.total_tests, len(allTestNames), \
776 [t.n_unexpected_passes, \
777 t.n_unexpected_failures, \
778 t.n_framework_failures]))
779
780 if config.use_threads:
781 t.lock.release()
782
783 try:
784 preCmd = getTestOpts().pre_cmd
785 if preCmd != None:
786 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
787 if result != 0:
788 framework_fail(name, way, 'pre-command failed: ' + str(result))
789 except:
790 framework_fail(name, way, 'pre-command exception')
791
792 try:
793 result = func(*[name,way] + args)
794 finally:
795 if config.use_threads:
796 t.lock.acquire()
797
798 if getTestOpts().expect != 'pass' and \
799 getTestOpts().expect != 'fail' and \
800 getTestOpts().expect != 'missing-lib':
801 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
802
803 try:
804 passFail = result['passFail']
805 except:
806 passFail = 'No passFail found'
807
808 if passFail == 'pass':
809 if getTestOpts().expect == 'pass' \
810 and way not in getTestOpts().expect_fail_for:
811 t.n_expected_passes = t.n_expected_passes + 1
812 if name in t.expected_passes:
813 t.expected_passes[name].append(way)
814 else:
815 t.expected_passes[name] = [way]
816 else:
817 if_verbose(1, '*** unexpected pass for %s' % full_name)
818 t.n_unexpected_passes = t.n_unexpected_passes + 1
819 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
820 elif passFail == 'fail':
821 if getTestOpts().expect == 'pass' \
822 and way not in getTestOpts().expect_fail_for:
823 reason = result['reason']
824 tag = result.get('tag')
825 if tag == 'stat':
826 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
827 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
828 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
829 else:
830 if_verbose(1, '*** unexpected failure for %s' % full_name)
831 t.n_unexpected_failures = t.n_unexpected_failures + 1
832 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
833 else:
834 if getTestOpts().expect == 'missing-lib':
835 t.n_missing_libs = t.n_missing_libs + 1
836 if name in t.missing_libs:
837 t.missing_libs[name].append(way)
838 else:
839 t.missing_libs[name] = [way]
840 else:
841 t.n_expected_failures = t.n_expected_failures + 1
842 if name in t.expected_failures:
843 t.expected_failures[name].append(way)
844 else:
845 t.expected_failures[name] = [way]
846 else:
847 framework_fail(name, way, 'bad result ' + passFail)
848 except KeyboardInterrupt:
849 stopNow()
850 except:
851 framework_fail(name, way, 'do_test exception')
852 traceback.print_exc()
853
854 def addPassingTestInfo (testInfos, directory, name, way):
855 directory = re.sub('^\\.[/\\\\]', '', directory)
856
857 if not directory in testInfos:
858 testInfos[directory] = {}
859
860 if not name in testInfos[directory]:
861 testInfos[directory][name] = []
862
863 testInfos[directory][name].append(way)
864
865 def addFailingTestInfo (testInfos, directory, name, reason, way):
866 directory = re.sub('^\\.[/\\\\]', '', directory)
867
868 if not directory in testInfos:
869 testInfos[directory] = {}
870
871 if not name in testInfos[directory]:
872 testInfos[directory][name] = {}
873
874 if not reason in testInfos[directory][name]:
875 testInfos[directory][name][reason] = []
876
877 testInfos[directory][name][reason].append(way)
878
879 def skiptest (name, way):
880 # print 'Skipping test \"', name, '\"'
881 t.n_tests_skipped = t.n_tests_skipped + 1
882 if name in t.tests_skipped:
883 t.tests_skipped[name].append(way)
884 else:
885 t.tests_skipped[name] = [way]
886
887 def framework_fail( name, way, reason ):
888 full_name = name + '(' + way + ')'
889 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
890 t.n_framework_failures = t.n_framework_failures + 1
891 if name in t.framework_failures:
892 t.framework_failures[name].append(way)
893 else:
894 t.framework_failures[name] = [way]
895
896 def badResult(result):
897 try:
898 if result['passFail'] == 'pass':
899 return False
900 return True
901 except:
902 return True
903
904 def passed():
905 return {'passFail': 'pass'}
906
907 def failBecause(reason, tag=None):
908 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
909
910 # -----------------------------------------------------------------------------
911 # Generic command tests
912
913 # A generic command test is expected to run and exit successfully.
914 #
915 # The expected exit code can be changed via exit_code() as normal, and
916 # the expected stdout/stderr are stored in <testname>.stdout and
917 # <testname>.stderr. The output of the command can be ignored
918 # altogether by using run_command_ignore_output instead of
919 # run_command.
920
921 def run_command( name, way, cmd ):
922 return simple_run( name, '', cmd, '' )
923
924 # -----------------------------------------------------------------------------
925 # GHCi tests
926
927 def ghci_script_without_flag(flag):
928 def apply(name, way, script):
929 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
930 return ghci_script_override_default_flags(overrides)(name, way, script)
931
932 return apply
933
934 def ghci_script_override_default_flags(overrides):
935 def apply(name, way, script):
936 return ghci_script(name, way, script, overrides)
937
938 return apply
939
940 def ghci_script( name, way, script, override_flags = None ):
941 # Use overriden default flags when given
942 if override_flags is not None:
943 default_flags = override_flags
944 else:
945 default_flags = getTestOpts().compiler_always_flags
946
947 # filter out -fforce-recomp from compiler_always_flags, because we're
948 # actually testing the recompilation behaviour in the GHCi tests.
949 flags = [f for f in default_flags if f != '-fforce-recomp']
950 flags.append(getTestOpts().extra_hc_opts)
951 if getTestOpts().outputdir != None:
952 flags.extend(["-outputdir", getTestOpts().outputdir])
953
954 # We pass HC and HC_OPTS as environment variables, so that the
955 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
956 cmd = "HC='" + config.compiler + "' " + \
957 "HC_OPTS='" + ' '.join(flags) + "' " + \
958 "'" + config.compiler + "'" + \
959 ' --interactive -v0 -ignore-dot-ghci ' + \
960 ' '.join(flags)
961
962 getTestOpts().stdin = script
963 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
964
965 # -----------------------------------------------------------------------------
966 # Compile-only tests
967
968 def compile_override_default_flags(overrides):
969 def apply(name, way, extra_opts):
970 return do_compile(name, way, 0, '', [], extra_opts, overrides)
971
972 return apply
973
974 def compile_fail_override_default_flags(overrides):
975 def apply(name, way, extra_opts):
976 return do_compile(name, way, 1, '', [], extra_opts, overrides)
977
978 return apply
979
980 def compile_without_flag(flag):
981 def apply(name, way, extra_opts):
982 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
983 return compile_override_default_flags(overrides)(name, way, extra_opts)
984
985 return apply
986
987 def compile_fail_without_flag(flag):
988 def apply(name, way, extra_opts):
989 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
990 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
991
992 return apply
993
994 def compile( name, way, extra_hc_opts ):
995 return do_compile( name, way, 0, '', [], extra_hc_opts )
996
997 def compile_fail( name, way, extra_hc_opts ):
998 return do_compile( name, way, 1, '', [], extra_hc_opts )
999
1000 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1001 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1002
1003 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1004 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1005
1006 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1007 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1008
1009 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1010 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1011
1012 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1013 # print 'Compile only, extra args = ', extra_hc_opts
1014 pretest_cleanup(name)
1015
1016 result = extras_build( way, extra_mods, extra_hc_opts )
1017 if badResult(result):
1018 return result
1019 extra_hc_opts = result['hc_opts']
1020
1021 force = 0
1022 if extra_mods:
1023 force = 1
1024 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1025
1026 if badResult(result):
1027 return result
1028
1029 # the actual stderr should always match the expected, regardless
1030 # of whether we expected the compilation to fail or not (successful
1031 # compilations may generate warnings).
1032
1033 if getTestOpts().with_namebase == None:
1034 namebase = name
1035 else:
1036 namebase = getTestOpts().with_namebase
1037
1038 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1039 actual_stderr_file = qualify(name, 'comp.stderr')
1040
1041 if not compare_outputs('stderr',
1042 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1043 normalise_errmsg,
1044 normalise_whitespace),
1045 expected_stderr_file, actual_stderr_file):
1046 return failBecause('stderr mismatch')
1047
1048 # no problems found, this test passed
1049 return passed()
1050
1051 def compile_cmp_asm( name, way, extra_hc_opts ):
1052 print('Compile only, extra args = ', extra_hc_opts)
1053 pretest_cleanup(name)
1054 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1055
1056 if badResult(result):
1057 return result
1058
1059 # the actual stderr should always match the expected, regardless
1060 # of whether we expected the compilation to fail or not (successful
1061 # compilations may generate warnings).
1062
1063 if getTestOpts().with_namebase == None:
1064 namebase = name
1065 else:
1066 namebase = getTestOpts().with_namebase
1067
1068 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
1069 actual_asm_file = qualify(name, 's')
1070
1071 if not compare_outputs('asm', join_normalisers(normalise_errmsg, normalise_asm), \
1072 expected_asm_file, actual_asm_file):
1073 return failBecause('asm mismatch')
1074
1075 # no problems found, this test passed
1076 return passed()
1077
1078 # -----------------------------------------------------------------------------
1079 # Compile-and-run tests
1080
1081 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1082 # print 'Compile and run, extra args = ', extra_hc_opts
1083 pretest_cleanup(name)
1084
1085 result = extras_build( way, extra_mods, extra_hc_opts )
1086 if badResult(result):
1087 return result
1088 extra_hc_opts = result['hc_opts']
1089
1090 if way == 'ghci': # interpreted...
1091 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1092 else: # compiled...
1093 force = 0
1094 if extra_mods:
1095 force = 1
1096
1097 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1098 if badResult(result):
1099 return result
1100
1101 cmd = './' + name;
1102
1103 # we don't check the compiler's stderr for a compile-and-run test
1104 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1105
1106 def compile_and_run( name, way, extra_hc_opts ):
1107 return compile_and_run__( name, way, '', [], extra_hc_opts)
1108
1109 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1110 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1111
1112 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1113 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1114
1115 def stats( name, way, stats_file ):
1116 opts = getTestOpts()
1117 return checkStats(name, way, stats_file, opts.stats_range_fields)
1118
1119 # -----------------------------------------------------------------------------
1120 # Check -t stats info
1121
1122 def checkStats(name, way, stats_file, range_fields):
1123 full_name = name + '(' + way + ')'
1124
1125 result = passed()
1126 if len(range_fields) > 0:
1127 f = open(in_testdir(stats_file))
1128 contents = f.read()
1129 f.close()
1130
1131 for (field, (expected, dev)) in range_fields.items():
1132 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1133 if m == None:
1134 print('Failed to find field: ', field)
1135 result = failBecause('no such stats field')
1136 val = int(m.group(1))
1137
1138 lowerBound = trunc( expected * ((100 - float(dev))/100))
1139 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1140
1141 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1142
1143 if val < lowerBound:
1144 print(field, 'value is too low:')
1145 print('(If this is because you have improved GHC, please')
1146 print('update the test so that GHC doesn\'t regress again)')
1147 result = failBecause('stat too good', tag='stat')
1148 if val > upperBound:
1149 print(field, 'value is too high:')
1150 result = failBecause('stat not good enough', tag='stat')
1151
1152 if val < lowerBound or val > upperBound or config.verbose >= 4:
1153 valStr = str(val)
1154 valLen = len(valStr)
1155 expectedStr = str(expected)
1156 expectedLen = len(expectedStr)
1157 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1158
1159 def display(descr, val, extra):
1160 print(descr, str(val).rjust(length), extra)
1161
1162 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1163 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1164 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1165 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1166 if val != expected:
1167 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1168
1169 return result
1170
1171 # -----------------------------------------------------------------------------
1172 # Build a single-module program
1173
1174 def extras_build( way, extra_mods, extra_hc_opts ):
1175 for modopts in extra_mods:
1176 mod, opts = modopts
1177 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1178 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1179 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1180 if badResult(result):
1181 return result
1182
1183 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1184
1185
1186 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1187 opts = getTestOpts()
1188 errname = add_suffix(name, 'comp.stderr')
1189 rm_no_fail( qualify(errname, '') )
1190
1191 if top_mod != '':
1192 srcname = top_mod
1193 rm_no_fail( qualify(name, '') )
1194 base, suf = os.path.splitext(top_mod)
1195 rm_no_fail( qualify(base, '') )
1196 rm_no_fail( qualify(base, 'exe') )
1197 elif addsuf:
1198 srcname = add_hs_lhs_suffix(name)
1199 rm_no_fail( qualify(name, '') )
1200 else:
1201 srcname = name
1202 rm_no_fail( qualify(name, 'o') )
1203
1204 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1205
1206 to_do = ''
1207 if top_mod != '':
1208 to_do = '--make '
1209 if link:
1210 to_do = to_do + '-o ' + name
1211 elif link:
1212 to_do = '-o ' + name
1213 elif opts.compile_to_hc:
1214 to_do = '-C'
1215 else:
1216 to_do = '-c' # just compile
1217
1218 stats_file = name + '.comp.stats'
1219 if len(opts.compiler_stats_range_fields) > 0:
1220 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1221
1222 # Required by GHC 7.3+, harmless for earlier versions:
1223 if (getTestOpts().c_src or
1224 getTestOpts().objc_src or
1225 getTestOpts().objcpp_src or
1226 getTestOpts().cmm_src):
1227 extra_hc_opts += ' -no-hs-main '
1228
1229 if getTestOpts().compile_cmd_prefix == '':
1230 cmd_prefix = ''
1231 else:
1232 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1233
1234 if override_flags is not None:
1235 comp_flags = copy.copy(override_flags)
1236 else:
1237 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1238
1239 if noforce:
1240 comp_flags = [f for f in comp_flags if f != '-fforce-recomp']
1241 if getTestOpts().outputdir != None:
1242 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1243
1244 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1245 + config.compiler + "' " \
1246 + ' '.join(comp_flags) + ' ' \
1247 + to_do + ' ' + srcname + ' ' \
1248 + ' '.join(config.way_flags(name)[way]) + ' ' \
1249 + extra_hc_opts + ' ' \
1250 + opts.extra_hc_opts + ' ' \
1251 + '>' + errname + ' 2>&1'
1252
1253 result = runCmdFor(name, cmd)
1254
1255 if result != 0 and not should_fail:
1256 actual_stderr = qualify(name, 'comp.stderr')
1257 if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:')
1258 if_verbose_dump(1,actual_stderr)
1259
1260 # ToDo: if the sub-shell was killed by ^C, then exit
1261
1262 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1263
1264 if badResult(statsResult):
1265 return statsResult
1266
1267 if should_fail:
1268 if result == 0:
1269 return failBecause('exit code 0')
1270 else:
1271 if result != 0:
1272 return failBecause('exit code non-0')
1273
1274 return passed()
1275
1276 # -----------------------------------------------------------------------------
1277 # Run a program and check its output
1278 #
1279 # If testname.stdin exists, route input from that, else
1280 # from /dev/null. Route output to testname.run.stdout and
1281 # testname.run.stderr. Returns the exit code of the run.
1282
1283 def simple_run( name, way, prog, args ):
1284 opts = getTestOpts()
1285
1286 # figure out what to use for stdin
1287 if opts.stdin != '':
1288 use_stdin = opts.stdin
1289 else:
1290 stdin_file = add_suffix(name, 'stdin')
1291 if os.path.exists(in_testdir(stdin_file)):
1292 use_stdin = stdin_file
1293 else:
1294 use_stdin = '/dev/null'
1295
1296 run_stdout = add_suffix(name,'run.stdout')
1297 run_stderr = add_suffix(name,'run.stderr')
1298
1299 rm_no_fail(qualify(name,'run.stdout'))
1300 rm_no_fail(qualify(name,'run.stderr'))
1301 rm_no_fail(qualify(name, 'hp'))
1302 rm_no_fail(qualify(name,'ps'))
1303 rm_no_fail(qualify(name, 'prof'))
1304
1305 my_rts_flags = rts_flags(way)
1306
1307 stats_file = name + '.stats'
1308 if len(opts.stats_range_fields) > 0:
1309 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1310
1311 if opts.no_stdin:
1312 stdin_comes_from = ''
1313 else:
1314 stdin_comes_from = ' <' + use_stdin
1315
1316 if opts.combined_output:
1317 redirection = ' >' + run_stdout \
1318 + ' 2>&1'
1319 else:
1320 redirection = ' >' + run_stdout \
1321 + ' 2>' + run_stderr
1322
1323 cmd = prog + ' ' + args + ' ' \
1324 + my_rts_flags + ' ' \
1325 + stdin_comes_from \
1326 + redirection
1327
1328 if opts.cmd_wrapper != None:
1329 cmd = opts.cmd_wrapper(cmd);
1330
1331 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1332
1333 # run the command
1334 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1335
1336 exit_code = result >> 8
1337 signal = result & 0xff
1338
1339 # check the exit code
1340 if exit_code != opts.exit_code:
1341 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1342 dump_stdout(name)
1343 dump_stderr(name)
1344 return failBecause('bad exit code')
1345
1346 check_hp = my_rts_flags.find("-h") != -1
1347 check_prof = my_rts_flags.find("-p") != -1
1348
1349 if not opts.ignore_output:
1350 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1351 bad_stdout = not check_stdout_ok(name)
1352 if bad_stderr:
1353 return failBecause('bad stderr')
1354 if bad_stdout:
1355 return failBecause('bad stdout')
1356 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1357 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1358 return failBecause('bad heap profile')
1359 if check_prof and not check_prof_ok(name):
1360 return failBecause('bad profile')
1361
1362 return checkStats(name, way, stats_file, opts.stats_range_fields)
1363
1364 def rts_flags(way):
1365 if (way == ''):
1366 return ''
1367 else:
1368 args = config.way_rts_flags[way]
1369
1370 if args == []:
1371 return ''
1372 else:
1373 return '+RTS ' + ' '.join(args) + ' -RTS'
1374
1375 # -----------------------------------------------------------------------------
1376 # Run a program in the interpreter and check its output
1377
1378 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1379 outname = add_suffix(name, 'interp.stdout')
1380 errname = add_suffix(name, 'interp.stderr')
1381 rm_no_fail(outname)
1382 rm_no_fail(errname)
1383 rm_no_fail(name)
1384
1385 if (top_mod == ''):
1386 srcname = add_hs_lhs_suffix(name)
1387 else:
1388 srcname = top_mod
1389
1390 scriptname = add_suffix(name, 'genscript')
1391 qscriptname = in_testdir(scriptname)
1392 rm_no_fail(qscriptname)
1393
1394 delimiter = '===== program output begins here\n'
1395
1396 script = open(qscriptname, 'w')
1397 if not compile_only:
1398 # set the prog name and command-line args to match the compiled
1399 # environment.
1400 script.write(':set prog ' + name + '\n')
1401 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1402 # Add marker lines to the stdout and stderr output files, so we
1403 # can separate GHCi's output from the program's.
1404 script.write(':! echo ' + delimiter)
1405 script.write(':! echo 1>&2 ' + delimiter)
1406 # Set stdout to be line-buffered to match the compiled environment.
1407 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1408 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1409 # in the event of an exception as for the compiled program.
1410 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1411 script.close()
1412
1413 # figure out what to use for stdin
1414 if getTestOpts().stdin != '':
1415 stdin_file = in_testdir(getTestOpts().stdin)
1416 else:
1417 stdin_file = qualify(name, 'stdin')
1418
1419 if os.path.exists(stdin_file):
1420 stdin = open(stdin_file, 'r')
1421 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1422
1423 script.close()
1424
1425 flags = copy.copy(getTestOpts().compiler_always_flags)
1426 if getTestOpts().outputdir != None:
1427 flags.extend(["-outputdir", getTestOpts().outputdir])
1428
1429 cmd = "'" + config.compiler + "' " \
1430 + ' '.join(flags) + ' ' \
1431 + srcname + ' ' \
1432 + ' '.join(config.way_flags(name)[way]) + ' ' \
1433 + extra_hc_opts + ' ' \
1434 + getTestOpts().extra_hc_opts + ' ' \
1435 + '<' + scriptname + ' 1>' + outname + ' 2>' + errname
1436
1437 if getTestOpts().cmd_wrapper != None:
1438 cmd = getTestOpts().cmd_wrapper(cmd);
1439
1440 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1441
1442 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1443
1444 exit_code = result >> 8
1445 signal = result & 0xff
1446
1447 # split the stdout into compilation/program output
1448 split_file(in_testdir(outname), delimiter,
1449 qualify(name, 'comp.stdout'),
1450 qualify(name, 'run.stdout'))
1451 split_file(in_testdir(errname), delimiter,
1452 qualify(name, 'comp.stderr'),
1453 qualify(name, 'run.stderr'))
1454
1455 # check the exit code
1456 if exit_code != getTestOpts().exit_code:
1457 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1458 dump_stdout(name)
1459 dump_stderr(name)
1460 return failBecause('bad exit code')
1461
1462 # ToDo: if the sub-shell was killed by ^C, then exit
1463
1464 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1465 check_stdout_ok(name)):
1466 return passed()
1467 else:
1468 return failBecause('bad stdout or stderr')
1469
1470
1471 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1472 infile = open(in_fn)
1473 out1 = open(out1_fn, 'w')
1474 out2 = open(out2_fn, 'w')
1475
1476 line = infile.readline()
1477 line = re.sub('\r', '', line) # ignore Windows EOL
1478 while (re.sub('^\s*','',line) != delimiter and line != ''):
1479 out1.write(line)
1480 line = infile.readline()
1481 line = re.sub('\r', '', line)
1482 out1.close()
1483
1484 line = infile.readline()
1485 while (line != ''):
1486 out2.write(line)
1487 line = infile.readline()
1488 out2.close()
1489
1490 # -----------------------------------------------------------------------------
1491 # Utils
1492
1493 def check_stdout_ok( name ):
1494 if getTestOpts().with_namebase == None:
1495 namebase = name
1496 else:
1497 namebase = getTestOpts().with_namebase
1498
1499 actual_stdout_file = qualify(name, 'run.stdout')
1500 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1501
1502 def norm(str):
1503 if platform_specific:
1504 return str
1505 else:
1506 return normalise_output(str)
1507
1508 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1509
1510 check_stdout = getTestOpts().check_stdout
1511 if check_stdout:
1512 return check_stdout(actual_stdout_file, extra_norm)
1513
1514 return compare_outputs('stdout', \
1515 extra_norm, \
1516 expected_stdout_file, actual_stdout_file)
1517
1518 def dump_stdout( name ):
1519 print('Stdout:')
1520 print(read_no_crs(qualify(name, 'run.stdout')))
1521
1522 def check_stderr_ok( name ):
1523 if getTestOpts().with_namebase == None:
1524 namebase = name
1525 else:
1526 namebase = getTestOpts().with_namebase
1527
1528 actual_stderr_file = qualify(name, 'run.stderr')
1529 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1530
1531 def norm(str):
1532 if platform_specific:
1533 return str
1534 else:
1535 return normalise_errmsg(str)
1536
1537 return compare_outputs('stderr', \
1538 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1539 expected_stderr_file, actual_stderr_file)
1540
1541 def dump_stderr( name ):
1542 print("Stderr:")
1543 print(read_no_crs(qualify(name, 'run.stderr')))
1544
1545 def read_no_crs(file):
1546 str = ''
1547 try:
1548 h = open(file)
1549 str = h.read()
1550 h.close
1551 except:
1552 # On Windows, if the program fails very early, it seems the
1553 # files stdout/stderr are redirected to may not get created
1554 pass
1555 return re.sub('\r', '', str)
1556
1557 def write_file(file, str):
1558 h = open(file, 'w')
1559 h.write(str)
1560 h.close
1561
1562 def check_hp_ok(name):
1563
1564 # do not qualify for hp2ps because we should be in the right directory
1565 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1566
1567 hp2psResult = runCmdExitCode(hp2psCmd)
1568
1569 actual_ps_file = qualify(name, 'ps')
1570
1571 if(hp2psResult == 0):
1572 if (os.path.exists(actual_ps_file)):
1573 if gs_working:
1574 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1575 if (gsResult == 0):
1576 return (True)
1577 else:
1578 print("hp2ps output for " + name + "is not valid PostScript")
1579 else: return (True) # assume postscript is valid without ghostscript
1580 else:
1581 print("hp2ps did not generate PostScript for " + name)
1582 return (False)
1583 else:
1584 print("hp2ps error when processing heap profile for " + name)
1585 return(False)
1586
1587 def check_prof_ok(name):
1588
1589 prof_file = qualify(name,'prof')
1590
1591 if not os.path.exists(prof_file):
1592 print(prof_file + " does not exist")
1593 return(False)
1594
1595 if os.path.getsize(qualify(name,'prof')) == 0:
1596 print(prof_file + " is empty")
1597 return(False)
1598
1599 if getTestOpts().with_namebase == None:
1600 namebase = name
1601 else:
1602 namebase = getTestOpts().with_namebase
1603
1604 (platform_specific, expected_prof_file) = \
1605 platform_wordsize_qualify(namebase, 'prof.sample')
1606
1607 # sample prof file is not required
1608 if not os.path.exists(expected_prof_file):
1609 return True
1610 else:
1611 return compare_outputs('prof', \
1612 join_normalisers(normalise_whitespace,normalise_prof), \
1613 expected_prof_file, prof_file)
1614
1615 # Compare expected output to actual output, and optionally accept the
1616 # new output. Returns true if output matched or was accepted, false
1617 # otherwise.
1618 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1619 if os.path.exists(expected_file):
1620 expected_raw = read_no_crs(expected_file)
1621 # print "norm:", normaliser(expected_raw)
1622 expected_str = normaliser(expected_raw)
1623 expected_file_for_diff = expected_file
1624 else:
1625 expected_str = ''
1626 expected_file_for_diff = '/dev/null'
1627
1628 actual_raw = read_no_crs(actual_file)
1629 actual_str = normaliser(actual_raw)
1630
1631 if expected_str == actual_str:
1632 return 1
1633 else:
1634 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1635
1636 if expected_file_for_diff == '/dev/null':
1637 expected_normalised_file = '/dev/null'
1638 else:
1639 expected_normalised_file = expected_file + ".normalised"
1640 write_file(expected_normalised_file, expected_str)
1641
1642 actual_normalised_file = actual_file + ".normalised"
1643 write_file(actual_normalised_file, actual_str)
1644
1645 # Ignore whitespace when diffing. We should only get to this
1646 # point if there are non-whitespace differences
1647 #
1648 # Note we are diffing the *actual* output, not the normalised
1649 # output. The normalised output may have whitespace squashed
1650 # (including newlines) so the diff would be hard to read.
1651 # This does mean that the diff might contain changes that
1652 # would be normalised away.
1653 if (config.verbose >= 1):
1654 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1655 ' ' + actual_file )
1656
1657 # If for some reason there were no non-whitespace differences,
1658 # then do a full diff
1659 if r == 0:
1660 r = os.system( 'diff -u ' + expected_file_for_diff + \
1661 ' ' + actual_file )
1662
1663 if config.accept:
1664 if_verbose(1, 'Accepting new output.')
1665 write_file(expected_file, actual_raw)
1666 return 1
1667 else:
1668 return 0
1669
1670
1671 def normalise_whitespace( str ):
1672 # Merge contiguous whitespace characters into a single space.
1673 str = re.sub('[ \t\n]+', ' ', str)
1674 return str
1675
1676 def normalise_errmsg( str ):
1677 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1678 # the colon is there because it appears in error messages; this
1679 # hacky solution is used in place of more sophisticated filename
1680 # mangling
1681 str = re.sub('([^\\s])\\.exe', '\\1', str)
1682 # normalise slashes, minimise Windows/Unix filename differences
1683 str = re.sub('\\\\', '/', str)
1684 # The inplace ghc's are called ghc-stage[123] to avoid filename
1685 # collisions, so we need to normalise that to just "ghc"
1686 str = re.sub('ghc-stage[123]', 'ghc', str)
1687 # Error messages simetimes contain integer implementation package
1688 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1689 return str
1690
1691 # normalise a .prof file, so that we can reasonably compare it against
1692 # a sample. This doesn't compare any of the actual profiling data,
1693 # only the shape of the profile and the number of entries.
1694 def normalise_prof (str):
1695 # strip everything up to the line beginning "COST CENTRE"
1696 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1697
1698 # strip results for CAFs, these tend to change unpredictably
1699 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1700
1701 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1702 # sometimes under MAIN.
1703 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1704
1705 # We have somthing like this:
1706
1707 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1708 # k Main 204 1 0.0 0.0 0.0 0.0
1709 # foo Main 205 1 0.0 0.0 0.0 0.0
1710 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1711
1712 # then we remove all the specific profiling data, leaving only the
1713 # cost centre name, module, and entries, to end up with this:
1714
1715 # MAIN MAIN 0
1716 # k Main 1
1717 # foo Main 1
1718 # foo.bar Main 1
1719
1720 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1721 return str
1722
1723 def normalise_slashes_( str ):
1724 str = re.sub('\\\\', '/', str)
1725 return str
1726
1727 def normalise_exe_( str ):
1728 str = re.sub('\.exe', '', str)
1729 return str
1730
1731 def normalise_output( str ):
1732 # Remove a .exe extension (for Windows)
1733 # This can occur in error messages generated by the program.
1734 str = re.sub('([^\\s])\\.exe', '\\1', str)
1735 return str
1736
1737 def normalise_asm( str ):
1738 lines = str.split('\n')
1739 # Only keep instructions and labels not starting with a dot.
1740 metadata = re.compile('^[ \t]*\\..*$')
1741 out = []
1742 for line in lines:
1743 # Drop metadata directives (e.g. ".type")
1744 if not metadata.match(line):
1745 line = re.sub('@plt', '', line)
1746 instr = line.lstrip().split()
1747 # Drop empty lines.
1748 if not instr:
1749 continue
1750 # Drop operands, except for call instructions.
1751 elif instr[0] == 'call':
1752 out.append(instr[0] + ' ' + instr[1])
1753 else:
1754 out.append(instr[0])
1755 out = '\n'.join(out)
1756 return out
1757
1758 def if_verbose( n, s ):
1759 if config.verbose >= n:
1760 print(s)
1761
1762 def if_verbose_dump( n, f ):
1763 if config.verbose >= n:
1764 try:
1765 print(open(f).read())
1766 except:
1767 print('')
1768
1769 def rawSystem(cmd_and_args):
1770 # We prefer subprocess.call to os.spawnv as the latter
1771 # seems to send its arguments through a shell or something
1772 # with the Windows (non-cygwin) python. An argument "a b c"
1773 # turns into three arguments ["a", "b", "c"].
1774
1775 # However, subprocess is new in python 2.4, so fall back to
1776 # using spawnv if we don't have it
1777
1778 if have_subprocess:
1779 return subprocess.call(cmd_and_args)
1780 else:
1781 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1782
1783 # When running under native msys Python, any invocations of non-msys binaries,
1784 # including timeout.exe, will have their arguments munged according to some
1785 # heuristics, which leads to malformed command lines (#9626). The easiest way
1786 # to avoid problems is to invoke through /usr/bin/cmd which sidesteps argument
1787 # munging because it is a native msys application.
1788 def passThroughCmd(cmd_and_args):
1789 args = []
1790 # cmd needs a Windows-style path for its first argument.
1791 args.append(cmd_and_args[0].replace('/', '\\'))
1792 # Other arguments need to be quoted to deal with spaces.
1793 args.extend(['"%s"' % arg for arg in cmd_and_args[1:]])
1794 return ["cmd", "/c", " ".join(args)]
1795
1796 # Note that this doesn't handle the timeout itself; it is just used for
1797 # commands that have timeout handling built-in.
1798 def rawSystemWithTimeout(cmd_and_args):
1799 if config.os == 'mingw32' and sys.executable.startswith('/usr'):
1800 # This is only needed when running under msys python.
1801 cmd_and_args = passThroughCmd(cmd_and_args)
1802 r = rawSystem(cmd_and_args)
1803 if r == 98:
1804 # The python timeout program uses 98 to signal that ^C was pressed
1805 stopNow()
1806 return r
1807
1808 # cmd is a complex command in Bourne-shell syntax
1809 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1810 # Hence it must ultimately be run by a Bourne shell
1811 #
1812 # Mostly it invokes the command wrapped in 'timeout' thus
1813 # timeout 300 'cd . && ...blah blah'
1814 # so it's timeout's job to invoke the Bourne shell
1815 #
1816 # But watch out for the case when there is no timeout program!
1817 # Then, when using the native Python, os.system will invoke the cmd shell
1818
1819 def runCmd( cmd ):
1820 if_verbose( 3, cmd )
1821 r = 0
1822 if config.os == 'mingw32':
1823 # On MinGW, we will always have timeout
1824 assert config.timeout_prog!=''
1825
1826 if config.timeout_prog != '':
1827 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1828 else:
1829 r = os.system(cmd)
1830 return r << 8
1831
1832 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1833 if_verbose( 3, cmd )
1834 r = 0
1835 if config.os == 'mingw32':
1836 # On MinGW, we will always have timeout
1837 assert config.timeout_prog!=''
1838 timeout = int(ceil(config.timeout * timeout_multiplier))
1839
1840 if config.timeout_prog != '':
1841 if config.check_files_written:
1842 fn = name + ".strace"
1843 r = rawSystemWithTimeout(
1844 ["strace", "-o", fn, "-fF",
1845 "-e", "creat,open,chdir,clone,vfork",
1846 config.timeout_prog, str(timeout), cmd])
1847 addTestFilesWritten(name, fn)
1848 rm_no_fail(fn)
1849 else:
1850 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1851 else:
1852 r = os.system(cmd)
1853 return r << 8
1854
1855 def runCmdExitCode( cmd ):
1856 return (runCmd(cmd) >> 8);
1857
1858
1859 # -----------------------------------------------------------------------------
1860 # checking for files being written to by multiple tests
1861
1862 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1863 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1864 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1865 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1866 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1867 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1868 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1869 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1870 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1871 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1872 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1873 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1874 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1875 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1876 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1877 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1878 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1879
1880 # Files that are read or written but shouldn't be:
1881 # * ghci_history shouldn't be read or written by tests
1882 # * things under package.conf.d shouldn't be written by tests
1883 bad_file_usages = {}
1884
1885 # Mapping from tests to the list of files that they write
1886 files_written = {}
1887
1888 # Mapping from tests to the list of files that they write but don't clean
1889 files_written_not_removed = {}
1890
1891 def add_bad_file_usage(name, file):
1892 try:
1893 if not file in bad_file_usages[name]:
1894 bad_file_usages[name].append(file)
1895 except:
1896 bad_file_usages[name] = [file]
1897
1898 def mkPath(curdir, path):
1899 # Given the current full directory is 'curdir', what is the full
1900 # path to 'path'?
1901 return os.path.realpath(os.path.join(curdir, path))
1902
1903 def addTestFilesWritten(name, fn):
1904 if config.use_threads:
1905 with t.lockFilesWritten:
1906 addTestFilesWrittenHelper(name, fn)
1907 else:
1908 addTestFilesWrittenHelper(name, fn)
1909
1910 def addTestFilesWrittenHelper(name, fn):
1911 started = False
1912 working_directories = {}
1913
1914 with open(fn, 'r') as f:
1915 for line in f:
1916 m_pid = re_strace_pid.match(line)
1917 if m_pid:
1918 pid = m_pid.group(1)
1919 content = m_pid.group(2)
1920 elif re_strace_unavailable.match(line):
1921 next
1922 else:
1923 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1924
1925 m_open = re_strace_open.match(content)
1926 m_chdir = re_strace_chdir.match(content)
1927 m_clone = re_strace_clone.match(content)
1928 m_vfork = re_strace_vfork.match(content)
1929
1930 if not started:
1931 working_directories[pid] = os.getcwd()
1932 started = True
1933
1934 if m_open:
1935 file = m_open.group(1)
1936 file = mkPath(working_directories[pid], file)
1937 if file.endswith("ghci_history"):
1938 add_bad_file_usage(name, file)
1939 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1940 flags = m_open.group(2).split('|')
1941 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1942 if re.match('package\.conf\.d', file):
1943 add_bad_file_usage(name, file)
1944 else:
1945 try:
1946 if not file in files_written[name]:
1947 files_written[name].append(file)
1948 except:
1949 files_written[name] = [file]
1950 elif 'O_RDONLY' in flags:
1951 pass
1952 else:
1953 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1954 elif m_chdir:
1955 # We optimistically assume that unfinished chdir's are going to succeed
1956 dir = m_chdir.group(1)
1957 working_directories[pid] = mkPath(working_directories[pid], dir)
1958 elif m_clone:
1959 working_directories[m_clone.group(2)] = working_directories[pid]
1960 elif m_vfork:
1961 working_directories[m_vfork.group(2)] = working_directories[pid]
1962 elif re_strace_open_resumed.match(content):
1963 pass
1964 elif re_strace_chdir_resumed.match(content):
1965 pass
1966 elif re_strace_vfork_unfinished.match(content):
1967 pass
1968 elif re_strace_clone_unfinished.match(content):
1969 pass
1970 elif re_strace_ignore_sigchild.match(content):
1971 pass
1972 elif re_strace_ignore_sigvtalarm.match(content):
1973 pass
1974 elif re_strace_ignore_sigint.match(content):
1975 pass
1976 elif re_strace_ignore_sigfpe.match(content):
1977 pass
1978 elif re_strace_ignore_sigsegv.match(content):
1979 pass
1980 elif re_strace_ignore_sigpipe.match(content):
1981 pass
1982 else:
1983 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1984
1985 def checkForFilesWrittenProblems(file):
1986 foundProblem = False
1987
1988 files_written_inverted = {}
1989 for t in files_written.keys():
1990 for f in files_written[t]:
1991 try:
1992 files_written_inverted[f].append(t)
1993 except:
1994 files_written_inverted[f] = [t]
1995
1996 for f in files_written_inverted.keys():
1997 if len(files_written_inverted[f]) > 1:
1998 if not foundProblem:
1999 foundProblem = True
2000 file.write("\n")
2001 file.write("\nSome files are written by multiple tests:\n")
2002 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
2003 if foundProblem:
2004 file.write("\n")
2005
2006 # -----
2007
2008 if len(files_written_not_removed) > 0:
2009 file.write("\n")
2010 file.write("\nSome files written but not removed:\n")
2011 tests = list(files_written_not_removed.keys())
2012 tests.sort()
2013 for t in tests:
2014 for f in files_written_not_removed[t]:
2015 file.write(" " + t + ": " + f + "\n")
2016 file.write("\n")
2017
2018 # -----
2019
2020 if len(bad_file_usages) > 0:
2021 file.write("\n")
2022 file.write("\nSome bad file usages:\n")
2023 tests = list(bad_file_usages.keys())
2024 tests.sort()
2025 for t in tests:
2026 for f in bad_file_usages[t]:
2027 file.write(" " + t + ": " + f + "\n")
2028 file.write("\n")
2029
2030 # -----------------------------------------------------------------------------
2031 # checking if ghostscript is available for checking the output of hp2ps
2032
2033 def genGSCmd(psfile):
2034 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2035
2036 def gsNotWorking():
2037 global gs_working
2038 print("GhostScript not available for hp2ps tests")
2039
2040 global gs_working
2041 gs_working = 0
2042 if config.have_profiling:
2043 if config.gs != '':
2044 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2045 if resultGood == 0:
2046 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
2047 if resultBad != 0:
2048 print("GhostScript available for hp2ps tests")
2049 gs_working = 1;
2050 else:
2051 gsNotWorking();
2052 else:
2053 gsNotWorking();
2054 else:
2055 gsNotWorking();
2056
2057 def rm_no_fail( file ):
2058 try:
2059 os.remove( file )
2060 finally:
2061 return
2062
2063 def add_suffix( name, suffix ):
2064 if suffix == '':
2065 return name
2066 else:
2067 return name + '.' + suffix
2068
2069 def add_hs_lhs_suffix(name):
2070 if getTestOpts().c_src:
2071 return add_suffix(name, 'c')
2072 elif getTestOpts().cmm_src:
2073 return add_suffix(name, 'cmm')
2074 elif getTestOpts().objc_src:
2075 return add_suffix(name, 'm')
2076 elif getTestOpts().objcpp_src:
2077 return add_suffix(name, 'mm')
2078 elif getTestOpts().literate:
2079 return add_suffix(name, 'lhs')
2080 else:
2081 return add_suffix(name, 'hs')
2082
2083 def replace_suffix( name, suffix ):
2084 base, suf = os.path.splitext(name)
2085 return base + '.' + suffix
2086
2087 def in_testdir( name ):
2088 return (getTestOpts().testdir + '/' + name)
2089
2090 def qualify( name, suff ):
2091 return in_testdir(add_suffix(name, suff))
2092
2093
2094 # Finding the sample output. The filename is of the form
2095 #
2096 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
2097 #
2098 # and we pick the most specific version available. The <version> is
2099 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2100 # more fine-grained control use if_compiler_lt().
2101 #
2102 def platform_wordsize_qualify( name, suff ):
2103
2104 basepath = qualify(name, suff)
2105
2106 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2107 for (platformSpecific, plat) in [(1, '-' + config.platform),
2108 (1, '-' + config.os),
2109 (0, '')]
2110 for ws in ['-ws-' + config.wordsize, '']
2111 for comp in ['-' + config.compiler_type, '']
2112 for vers in ['-' + config.compiler_maj_version, '']]
2113
2114 dir = glob.glob(basepath + '*')
2115 dir = [normalise_slashes_(d) for d in dir]
2116
2117 for (platformSpecific, f) in paths:
2118 if f in dir:
2119 return (platformSpecific,f)
2120
2121 return (0, basepath)
2122
2123 # Clean up prior to the test, so that we can't spuriously conclude
2124 # that it passed on the basis of old run outputs.
2125 def pretest_cleanup(name):
2126 if getTestOpts().outputdir != None:
2127 odir = in_testdir(getTestOpts().outputdir)
2128 try:
2129 shutil.rmtree(odir)
2130 except:
2131 pass
2132 os.mkdir(odir)
2133
2134 rm_no_fail(qualify(name,'interp.stderr'))
2135 rm_no_fail(qualify(name,'interp.stdout'))
2136 rm_no_fail(qualify(name,'comp.stderr'))
2137 rm_no_fail(qualify(name,'comp.stdout'))
2138 rm_no_fail(qualify(name,'run.stderr'))
2139 rm_no_fail(qualify(name,'run.stdout'))
2140 rm_no_fail(qualify(name,'tix'))
2141 rm_no_fail(qualify(name,'exe.tix'))
2142 # simple_build zaps the following:
2143 # rm_nofail(qualify("o"))
2144 # rm_nofail(qualify(""))
2145 # not interested in the return code
2146
2147 # -----------------------------------------------------------------------------
2148 # Return a list of all the files ending in '.T' below directories roots.
2149
2150 def findTFiles(roots):
2151 # It would be better to use os.walk, but that
2152 # gives backslashes on Windows, which trip the
2153 # testsuite later :-(
2154 return [filename for root in roots for filename in findTFiles_(root)]
2155
2156 def findTFiles_(path):
2157 if os.path.isdir(path):
2158 paths = [path + '/' + x for x in os.listdir(path)]
2159 return findTFiles(paths)
2160 elif path[-2:] == '.T':
2161 return [path]
2162 else:
2163 return []
2164
2165 # -----------------------------------------------------------------------------
2166 # Output a test summary to the specified file object
2167
2168 def summary(t, file):
2169
2170 file.write('\n')
2171 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2172 file.write('OVERALL SUMMARY for test run started at '
2173 + time.strftime("%c %Z", t.start_time) + '\n'
2174 + str(datetime.timedelta(seconds=
2175 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2176 + ' spent to go through\n'
2177 + repr(t.total_tests).rjust(8)
2178 + ' total tests, which gave rise to\n'
2179 + repr(t.total_test_cases).rjust(8)
2180 + ' test cases, of which\n'
2181 + repr(t.n_tests_skipped).rjust(8)
2182 + ' were skipped\n'
2183 + '\n'
2184 + repr(t.n_missing_libs).rjust(8)
2185 + ' had missing libraries\n'
2186 + repr(t.n_expected_passes).rjust(8)
2187 + ' expected passes\n'
2188 + repr(t.n_expected_failures).rjust(8)
2189 + ' expected failures\n'
2190 + '\n'
2191 + repr(t.n_framework_failures).rjust(8)
2192 + ' caused framework failures\n'
2193 + repr(t.n_unexpected_passes).rjust(8)
2194 + ' unexpected passes\n'
2195 + repr(t.n_unexpected_failures).rjust(8)
2196 + ' unexpected failures\n'
2197 + repr(t.n_unexpected_stat_failures).rjust(8)
2198 + ' unexpected stat failures\n'
2199 + '\n')
2200
2201 if t.n_unexpected_passes > 0:
2202 file.write('Unexpected passes:\n')
2203 printPassingTestInfosSummary(file, t.unexpected_passes)
2204
2205 if t.n_unexpected_failures > 0:
2206 file.write('Unexpected failures:\n')
2207 printFailingTestInfosSummary(file, t.unexpected_failures)
2208
2209 if t.n_unexpected_stat_failures > 0:
2210 file.write('Unexpected stat failures:\n')
2211 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2212
2213 if config.check_files_written:
2214 checkForFilesWrittenProblems(file)
2215
2216 if stopping():
2217 file.write('WARNING: Testsuite run was terminated early\n')
2218
2219 def printUnexpectedTests(file, testInfoss):
2220 unexpected = []
2221 for testInfos in testInfoss:
2222 directories = testInfos.keys()
2223 for directory in directories:
2224 tests = list(testInfos[directory].keys())
2225 unexpected += tests
2226 if unexpected != []:
2227 file.write('Unexpected results from:\n')
2228 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2229 file.write('\n')
2230
2231 def printPassingTestInfosSummary(file, testInfos):
2232 directories = list(testInfos.keys())
2233 directories.sort()
2234 maxDirLen = max(len(x) for x in directories)
2235 for directory in directories:
2236 tests = list(testInfos[directory].keys())
2237 tests.sort()
2238 for test in tests:
2239 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2240 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2241 file.write('\n')
2242
2243 def printFailingTestInfosSummary(file, testInfos):
2244 directories = list(testInfos.keys())
2245 directories.sort()
2246 maxDirLen = max(len(d) for d in directories)
2247 for directory in directories:
2248 tests = list(testInfos[directory].keys())
2249 tests.sort()
2250 for test in tests:
2251 reasons = testInfos[directory][test].keys()
2252 for reason in reasons:
2253 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2254 ' [' + reason + ']' + \
2255 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2256 file.write('\n')
2257
2258 def getStdout(cmd):
2259 if have_subprocess:
2260 p = subprocess.Popen(cmd,
2261 stdout=subprocess.PIPE,
2262 stderr=subprocess.PIPE)
2263 (stdout, stderr) = p.communicate()
2264 r = p.wait()
2265 if r != 0:
2266 raise Exception("Command failed: " + str(cmd))
2267 if stderr != '':
2268 raise Exception("stderr from command: " + str(cmd))
2269 return stdout
2270 else:
2271 raise Exception("Need subprocess to get stdout, but don't have it")