testsuite: format commands using config dict
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20
21 have_subprocess = False
22 try:
23 import subprocess
24 have_subprocess = True
25 except:
26 print("Warning: subprocess not found, will fall back to spawnv")
27
28 from testglobals import *
29 from testutil import *
30
31 if config.use_threads:
32 import threading
33 try:
34 import thread
35 except ImportError: # Python 3
36 import _thread as thread
37
38 global wantToStop
39 wantToStop = False
40 def stopNow():
41 global wantToStop
42 wantToStop = True
43 def stopping():
44 return wantToStop
45
46 # Options valid for the current test only (these get reset to
47 # testdir_testopts after each test).
48
49 global testopts_local
50 if config.use_threads:
51 testopts_local = threading.local()
52 else:
53 class TestOpts_Local:
54 pass
55 testopts_local = TestOpts_Local()
56
57 def getTestOpts():
58 return testopts_local.x
59
60 def setLocalTestOpts(opts):
61 global testopts_local
62 testopts_local.x=opts
63
64 def isStatsTest():
65 opts = getTestOpts()
66 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
67
68
69 # This can be called at the top of a file of tests, to set default test options
70 # for the following tests.
71 def setTestOpts( f ):
72 global thisdir_settings
73 thisdir_settings = [thisdir_settings, f]
74
75 # -----------------------------------------------------------------------------
76 # Canned setup functions for common cases. eg. for a test you might say
77 #
78 # test('test001', normal, compile, [''])
79 #
80 # to run it without any options, but change it to
81 #
82 # test('test001', expect_fail, compile, [''])
83 #
84 # to expect failure for this test.
85
86 def normal( name, opts ):
87 return;
88
89 def skip( name, opts ):
90 opts.skip = 1
91
92 def expect_fail( name, opts ):
93 opts.expect = 'fail';
94
95 def reqlib( lib ):
96 return lambda name, opts, l=lib: _reqlib (name, opts, l )
97
98 # Cache the results of looking to see if we have a library or not.
99 # This makes quite a difference, especially on Windows.
100 have_lib = {}
101
102 def _reqlib( name, opts, lib ):
103 if lib in have_lib:
104 got_it = have_lib[lib]
105 else:
106 if have_subprocess:
107 # By preference we use subprocess, as the alternative uses
108 # /dev/null which mingw doesn't have.
109 p = subprocess.Popen([config.ghc_pkg, '--no-user-package-db', 'describe', lib],
110 stdout=subprocess.PIPE,
111 stderr=subprocess.PIPE)
112 # read from stdout and stderr to avoid blocking due to
113 # buffers filling
114 p.communicate()
115 r = p.wait()
116 else:
117 r = os.system(config.ghc_pkg + ' --no-user-package-db describe '
118 + lib + ' > /dev/null 2> /dev/null')
119 got_it = r == 0
120 have_lib[lib] = got_it
121
122 if not got_it:
123 opts.expect = 'missing-lib'
124
125 def req_profiling( name, opts ):
126 if not config.have_profiling:
127 opts.expect = 'fail'
128
129 def req_shared_libs( name, opts ):
130 if not config.have_shared_libs:
131 opts.expect = 'fail'
132
133 def req_interp( name, opts ):
134 if not config.have_interp:
135 opts.expect = 'fail'
136
137 def req_smp( name, opts ):
138 if not config.have_smp:
139 opts.expect = 'fail'
140
141 def ignore_output( name, opts ):
142 opts.ignore_output = 1
143
144 def no_stdin( name, opts ):
145 opts.no_stdin = 1
146
147 def combined_output( name, opts ):
148 opts.combined_output = True
149
150 # -----
151
152 def expect_fail_for( ways ):
153 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
154
155 def _expect_fail_for( name, opts, ways ):
156 opts.expect_fail_for = ways
157
158 def expect_broken( bug ):
159 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
160
161 def _expect_broken( name, opts, bug ):
162 record_broken(name, opts, bug)
163 opts.expect = 'fail';
164
165 def expect_broken_for( bug, ways ):
166 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
167
168 def _expect_broken_for( name, opts, bug, ways ):
169 record_broken(name, opts, bug)
170 opts.expect_fail_for = ways
171
172 def record_broken(name, opts, bug):
173 global brokens
174 me = (bug, opts.testdir, name)
175 if not me in brokens:
176 brokens.append(me)
177
178 # -----
179
180 def omit_ways( ways ):
181 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
182
183 def _omit_ways( name, opts, ways ):
184 opts.omit_ways = ways
185
186 # -----
187
188 def only_ways( ways ):
189 return lambda name, opts, w=ways: _only_ways( name, opts, w )
190
191 def _only_ways( name, opts, ways ):
192 opts.only_ways = ways
193
194 # -----
195
196 def extra_ways( ways ):
197 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
198
199 def _extra_ways( name, opts, ways ):
200 opts.extra_ways = ways
201
202 # -----
203
204 def omit_compiler_types( compiler_types ):
205 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
206
207 def _omit_compiler_types( name, opts, compiler_types ):
208 if config.compiler_type in compiler_types:
209 opts.skip = 1
210
211 # -----
212
213 def only_compiler_types( compiler_types ):
214 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
215
216 def _only_compiler_types( name, opts, compiler_types ):
217 if config.compiler_type not in compiler_types:
218 opts.skip = 1
219
220 # -----
221
222 def set_stdin( file ):
223 return lambda name, opts, f=file: _set_stdin(name, opts, f);
224
225 def _set_stdin( name, opts, f ):
226 opts.stdin = f
227
228 # -----
229
230 def exit_code( val ):
231 return lambda name, opts, v=val: _exit_code(name, opts, v);
232
233 def _exit_code( name, opts, v ):
234 opts.exit_code = v
235
236 def signal_exit_code( val ):
237 if opsys('solaris2'):
238 return exit_code( val );
239 else:
240 # When application running on Linux receives fatal error
241 # signal, then its exit code is encoded as 128 + signal
242 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
243 # I assume that Mac OS X behaves in the same way at least Mac
244 # OS X builder behavior suggests this.
245 return exit_code( val+128 );
246
247 # -----
248
249 def timeout_multiplier( val ):
250 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
251
252 def _timeout_multiplier( name, opts, v ):
253 opts.timeout_multiplier = v
254
255 # -----
256
257 def extra_run_opts( val ):
258 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
259
260 def _extra_run_opts( name, opts, v ):
261 opts.extra_run_opts = v
262
263 # -----
264
265 def extra_hc_opts( val ):
266 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
267
268 def _extra_hc_opts( name, opts, v ):
269 opts.extra_hc_opts = v
270
271 # -----
272
273 def extra_clean( files ):
274 return lambda name, opts, v=files: _extra_clean(name, opts, v);
275
276 def _extra_clean( name, opts, v ):
277 opts.clean_files = v
278
279 # -----
280
281 def stats_num_field( field, expecteds ):
282 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
283
284 def _stats_num_field( name, opts, field, expecteds ):
285 if field in opts.stats_range_fields:
286 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
287
288 if type(expecteds) is list:
289 for (b, expected, dev) in expecteds:
290 if b:
291 opts.stats_range_fields[field] = (expected, dev)
292 return
293 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
294
295 else:
296 (expected, dev) = expecteds
297 opts.stats_range_fields[field] = (expected, dev)
298
299 def compiler_stats_num_field( field, expecteds ):
300 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
301
302 def _compiler_stats_num_field( name, opts, field, expecteds ):
303 if field in opts.compiler_stats_range_fields:
304 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
305
306 # Compiler performance numbers change when debugging is on, making the results
307 # useless and confusing. Therefore, skip if debugging is on.
308 if compiler_debugged():
309 skip(name, opts)
310
311 for (b, expected, dev) in expecteds:
312 if b:
313 opts.compiler_stats_range_fields[field] = (expected, dev)
314 return
315
316 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
317
318 # -----
319
320 def when(b, f):
321 # When list_brokens is on, we want to see all expect_broken calls,
322 # so we always do f
323 if b or config.list_broken:
324 return f
325 else:
326 return normal
327
328 def unless(b, f):
329 return when(not b, f)
330
331 def doing_ghci():
332 return 'ghci' in config.run_ways
333
334 def ghci_dynamic( ):
335 return config.ghc_dynamic
336
337 def fast():
338 return config.fast
339
340 def platform( plat ):
341 return config.platform == plat
342
343 def opsys( os ):
344 return config.os == os
345
346 def arch( arch ):
347 return config.arch == arch
348
349 def wordsize( ws ):
350 return config.wordsize == str(ws)
351
352 def msys( ):
353 return config.msys
354
355 def cygwin( ):
356 return config.cygwin
357
358 def have_vanilla( ):
359 return config.have_vanilla
360
361 def have_dynamic( ):
362 return config.have_dynamic
363
364 def have_profiling( ):
365 return config.have_profiling
366
367 def in_tree_compiler( ):
368 return config.in_tree_compiler
369
370 def compiler_type( compiler ):
371 return config.compiler_type == compiler
372
373 def compiler_lt( compiler, version ):
374 return config.compiler_type == compiler and \
375 version_lt(config.compiler_version, version)
376
377 def compiler_le( compiler, version ):
378 return config.compiler_type == compiler and \
379 version_le(config.compiler_version, version)
380
381 def compiler_gt( compiler, version ):
382 return config.compiler_type == compiler and \
383 version_gt(config.compiler_version, version)
384
385 def compiler_ge( compiler, version ):
386 return config.compiler_type == compiler and \
387 version_ge(config.compiler_version, version)
388
389 def unregisterised( ):
390 return config.unregisterised
391
392 def compiler_profiled( ):
393 return config.compiler_profiled
394
395 def compiler_debugged( ):
396 return config.compiler_debugged
397
398 def tag( t ):
399 return t in config.compiler_tags
400
401 # ---
402
403 def namebase( nb ):
404 return lambda opts, nb=nb: _namebase(opts, nb)
405
406 def _namebase( opts, nb ):
407 opts.with_namebase = nb
408
409 # ---
410
411 def high_memory_usage(name, opts):
412 opts.alone = True
413
414 # If a test is for a multi-CPU race, then running the test alone
415 # increases the chance that we'll actually see it.
416 def multi_cpu_race(name, opts):
417 opts.alone = True
418
419 # ---
420 def literate( name, opts ):
421 opts.literate = 1;
422
423 def c_src( name, opts ):
424 opts.c_src = 1;
425
426 def objc_src( name, opts ):
427 opts.objc_src = 1;
428
429 def objcpp_src( name, opts ):
430 opts.objcpp_src = 1;
431
432 def cmm_src( name, opts ):
433 opts.cmm_src = 1;
434
435 def outputdir( odir ):
436 return lambda name, opts, d=odir: _outputdir(name, opts, d)
437
438 def _outputdir( name, opts, odir ):
439 opts.outputdir = odir;
440
441 # ----
442
443 def pre_cmd( cmd ):
444 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
445
446 def _pre_cmd( name, opts, cmd ):
447 opts.pre_cmd = cmd
448
449 # ----
450
451 def clean_cmd( cmd ):
452 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
453
454 def _clean_cmd( name, opts, cmd ):
455 opts.clean_cmd = cmd
456
457 # ----
458
459 def cmd_prefix( prefix ):
460 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
461
462 def _cmd_prefix( name, opts, prefix ):
463 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
464
465 # ----
466
467 def cmd_wrapper( fun ):
468 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
469
470 def _cmd_wrapper( name, opts, fun ):
471 opts.cmd_wrapper = fun
472
473 # ----
474
475 def compile_cmd_prefix( prefix ):
476 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
477
478 def _compile_cmd_prefix( name, opts, prefix ):
479 opts.compile_cmd_prefix = prefix
480
481 # ----
482
483 def check_stdout( f ):
484 return lambda name, opts, f=f: _check_stdout(name, opts, f)
485
486 def _check_stdout( name, opts, f ):
487 opts.check_stdout = f
488
489 # ----
490
491 def normalise_slashes( name, opts ):
492 opts.extra_normaliser = normalise_slashes_
493
494 def normalise_exe( name, opts ):
495 opts.extra_normaliser = normalise_exe_
496
497 def normalise_fun( *fs ):
498 return lambda name, opts: _normalise_fun(name, opts, fs)
499
500 def _normalise_fun( name, opts, *fs ):
501 opts.extra_normaliser = join_normalisers(fs)
502
503 def normalise_errmsg_fun( *fs ):
504 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
505
506 def _normalise_errmsg_fun( name, opts, *fs ):
507 opts.extra_errmsg_normaliser = join_normalisers(fs)
508
509 def join_normalisers(*a):
510 """
511 Compose functions, flattening sequences.
512
513 join_normalisers(f1,[f2,f3],f4)
514
515 is the same as
516
517 lambda x: f1(f2(f3(f4(x))))
518 """
519
520 def flatten(l):
521 """
522 Taken from http://stackoverflow.com/a/2158532/946226
523 """
524 for el in l:
525 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
526 for sub in flatten(el):
527 yield sub
528 else:
529 yield el
530
531 a = flatten(a)
532
533 fn = lambda x:x # identity function
534 for f in a:
535 assert callable(f)
536 fn = lambda x,f=f,fn=fn: fn(f(x))
537 return fn
538
539 # ----
540 # Function for composing two opt-fns together
541
542 def executeSetups(fs, name, opts):
543 if type(fs) is list:
544 # If we have a list of setups, then execute each one
545 for f in fs:
546 executeSetups(f, name, opts)
547 else:
548 # fs is a single function, so just apply it
549 fs(name, opts)
550
551 # -----------------------------------------------------------------------------
552 # The current directory of tests
553
554 def newTestDir( dir ):
555 global thisdir_settings
556 # reset the options for this test directory
557 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
558
559 def _newTestDir( name, opts, dir ):
560 opts.testdir = dir
561 opts.compiler_always_flags = config.compiler_always_flags
562
563 # -----------------------------------------------------------------------------
564 # Actually doing tests
565
566 parallelTests = []
567 aloneTests = []
568 allTestNames = set([])
569
570 def runTest (opts, name, func, args):
571 ok = 0
572
573 if config.use_threads:
574 t.thread_pool.acquire()
575 try:
576 while config.threads<(t.running_threads+1):
577 t.thread_pool.wait()
578 t.running_threads = t.running_threads+1
579 ok=1
580 t.thread_pool.release()
581 thread.start_new_thread(test_common_thread, (name, opts, func, args))
582 except:
583 if not ok:
584 t.thread_pool.release()
585 else:
586 test_common_work (name, opts, func, args)
587
588 # name :: String
589 # setup :: TestOpts -> IO ()
590 def test (name, setup, func, args):
591 global aloneTests
592 global parallelTests
593 global allTestNames
594 global thisdir_settings
595 if name in allTestNames:
596 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
597 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
598 framework_fail(name, 'bad_name', 'This test has an invalid name')
599
600 # Make a deep copy of the default_testopts, as we need our own copy
601 # of any dictionaries etc inside it. Otherwise, if one test modifies
602 # them, all tests will see the modified version!
603 myTestOpts = copy.deepcopy(default_testopts)
604
605 executeSetups([thisdir_settings, setup], name, myTestOpts)
606
607 thisTest = lambda : runTest(myTestOpts, name, func, args)
608 if myTestOpts.alone:
609 aloneTests.append(thisTest)
610 else:
611 parallelTests.append(thisTest)
612 allTestNames.add(name)
613
614 if config.use_threads:
615 def test_common_thread(name, opts, func, args):
616 t.lock.acquire()
617 try:
618 test_common_work(name,opts,func,args)
619 finally:
620 t.lock.release()
621 t.thread_pool.acquire()
622 t.running_threads = t.running_threads - 1
623 t.thread_pool.notify()
624 t.thread_pool.release()
625
626 def get_package_cache_timestamp():
627 if config.package_conf_cache_file == '':
628 return 0.0
629 else:
630 try:
631 return os.stat(config.package_conf_cache_file).st_mtime
632 except:
633 return 0.0
634
635
636 def test_common_work (name, opts, func, args):
637 try:
638 t.total_tests = t.total_tests+1
639 setLocalTestOpts(opts)
640
641 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
642
643 # All the ways we might run this test
644 if func == compile or func == multimod_compile:
645 all_ways = config.compile_ways
646 elif func == compile_and_run or func == multimod_compile_and_run:
647 all_ways = config.run_ways
648 elif func == ghci_script:
649 if 'ghci' in config.run_ways:
650 all_ways = ['ghci']
651 else:
652 all_ways = []
653 else:
654 all_ways = ['normal']
655
656 # A test itself can request extra ways by setting opts.extra_ways
657 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
658
659 t.total_test_cases = t.total_test_cases + len(all_ways)
660
661 ok_way = lambda way: \
662 not getTestOpts().skip \
663 and (config.only == [] or name in config.only) \
664 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
665 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
666 and (not (config.skip_perf_tests and isStatsTest())) \
667 and way not in getTestOpts().omit_ways
668
669 # Which ways we are asked to skip
670 do_ways = list(filter (ok_way,all_ways))
671
672 # In fast mode, we skip all but one way
673 if config.fast and len(do_ways) > 0:
674 do_ways = [do_ways[0]]
675
676 if not config.clean_only:
677 # Run the required tests...
678 for way in do_ways:
679 if stopping():
680 break
681 do_test (name, way, func, args)
682
683 for way in all_ways:
684 if way not in do_ways:
685 skiptest (name,way)
686
687 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
688 pretest_cleanup(name)
689 clean([name + suff for suff in [
690 '', '.exe', '.exe.manifest', '.genscript',
691 '.stderr.normalised', '.stdout.normalised',
692 '.run.stderr.normalised', '.run.stdout.normalised',
693 '.comp.stderr.normalised', '.comp.stdout.normalised',
694 '.interp.stderr.normalised', '.interp.stdout.normalised',
695 '.stats', '.comp.stats',
696 '.hi', '.o', '.prof', '.exe.prof', '.hc',
697 '_stub.h', '_stub.c', '_stub.o',
698 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
699
700 if func == multi_compile or func == multi_compile_fail:
701 extra_mods = args[1]
702 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
703 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
704
705
706 clean(getTestOpts().clean_files)
707
708 if getTestOpts().outputdir != None:
709 odir = in_testdir(getTestOpts().outputdir)
710 try:
711 shutil.rmtree(odir)
712 except:
713 pass
714
715 try:
716 shutil.rmtree(in_testdir('.hpc.' + name))
717 except:
718 pass
719
720 try:
721 cleanCmd = getTestOpts().clean_cmd
722 if cleanCmd != None:
723 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
724 if result != 0:
725 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
726 except:
727 framework_fail(name, 'cleaning', 'clean-command exception')
728
729 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
730
731 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
732 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
733
734 try:
735 for f in files_written[name]:
736 if os.path.exists(f):
737 try:
738 if not f in files_written_not_removed[name]:
739 files_written_not_removed[name].append(f)
740 except:
741 files_written_not_removed[name] = [f]
742 except:
743 pass
744 except Exception as e:
745 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
746
747 def clean(strs):
748 for str in strs:
749 for name in glob.glob(in_testdir(str)):
750 clean_full_path(name)
751
752 def clean_full_path(name):
753 try:
754 # Remove files...
755 os.remove(name)
756 except OSError as e1:
757 try:
758 # ... and empty directories
759 os.rmdir(name)
760 except OSError as e2:
761 # We don't want to fail here, but we do want to know
762 # what went wrong, so print out the exceptions.
763 # ENOENT isn't a problem, though, as we clean files
764 # that don't necessarily exist.
765 if e1.errno != errno.ENOENT:
766 print(e1)
767 if e2.errno != errno.ENOENT:
768 print(e2)
769
770 def do_test(name, way, func, args):
771 full_name = name + '(' + way + ')'
772
773 try:
774 if_verbose(2, "=====> %s %d of %d %s " % \
775 (full_name, t.total_tests, len(allTestNames), \
776 [t.n_unexpected_passes, \
777 t.n_unexpected_failures, \
778 t.n_framework_failures]))
779
780 if config.use_threads:
781 t.lock.release()
782
783 try:
784 preCmd = getTestOpts().pre_cmd
785 if preCmd != None:
786 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
787 if result != 0:
788 framework_fail(name, way, 'pre-command failed: ' + str(result))
789 except:
790 framework_fail(name, way, 'pre-command exception')
791
792 try:
793 result = func(*[name,way] + args)
794 finally:
795 if config.use_threads:
796 t.lock.acquire()
797
798 if getTestOpts().expect != 'pass' and \
799 getTestOpts().expect != 'fail' and \
800 getTestOpts().expect != 'missing-lib':
801 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
802
803 try:
804 passFail = result['passFail']
805 except:
806 passFail = 'No passFail found'
807
808 if passFail == 'pass':
809 if getTestOpts().expect == 'pass' \
810 and way not in getTestOpts().expect_fail_for:
811 t.n_expected_passes = t.n_expected_passes + 1
812 if name in t.expected_passes:
813 t.expected_passes[name].append(way)
814 else:
815 t.expected_passes[name] = [way]
816 else:
817 if_verbose(1, '*** unexpected pass for %s' % full_name)
818 t.n_unexpected_passes = t.n_unexpected_passes + 1
819 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
820 elif passFail == 'fail':
821 if getTestOpts().expect == 'pass' \
822 and way not in getTestOpts().expect_fail_for:
823 reason = result['reason']
824 tag = result.get('tag')
825 if tag == 'stat':
826 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
827 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
828 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
829 else:
830 if_verbose(1, '*** unexpected failure for %s' % full_name)
831 t.n_unexpected_failures = t.n_unexpected_failures + 1
832 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
833 else:
834 if getTestOpts().expect == 'missing-lib':
835 t.n_missing_libs = t.n_missing_libs + 1
836 if name in t.missing_libs:
837 t.missing_libs[name].append(way)
838 else:
839 t.missing_libs[name] = [way]
840 else:
841 t.n_expected_failures = t.n_expected_failures + 1
842 if name in t.expected_failures:
843 t.expected_failures[name].append(way)
844 else:
845 t.expected_failures[name] = [way]
846 else:
847 framework_fail(name, way, 'bad result ' + passFail)
848 except KeyboardInterrupt:
849 stopNow()
850 except:
851 framework_fail(name, way, 'do_test exception')
852 traceback.print_exc()
853
854 def addPassingTestInfo (testInfos, directory, name, way):
855 directory = re.sub('^\\.[/\\\\]', '', directory)
856
857 if not directory in testInfos:
858 testInfos[directory] = {}
859
860 if not name in testInfos[directory]:
861 testInfos[directory][name] = []
862
863 testInfos[directory][name].append(way)
864
865 def addFailingTestInfo (testInfos, directory, name, reason, way):
866 directory = re.sub('^\\.[/\\\\]', '', directory)
867
868 if not directory in testInfos:
869 testInfos[directory] = {}
870
871 if not name in testInfos[directory]:
872 testInfos[directory][name] = {}
873
874 if not reason in testInfos[directory][name]:
875 testInfos[directory][name][reason] = []
876
877 testInfos[directory][name][reason].append(way)
878
879 def skiptest (name, way):
880 # print 'Skipping test \"', name, '\"'
881 t.n_tests_skipped = t.n_tests_skipped + 1
882 if name in t.tests_skipped:
883 t.tests_skipped[name].append(way)
884 else:
885 t.tests_skipped[name] = [way]
886
887 def framework_fail( name, way, reason ):
888 full_name = name + '(' + way + ')'
889 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
890 t.n_framework_failures = t.n_framework_failures + 1
891 if name in t.framework_failures:
892 t.framework_failures[name].append(way)
893 else:
894 t.framework_failures[name] = [way]
895
896 def badResult(result):
897 try:
898 if result['passFail'] == 'pass':
899 return False
900 return True
901 except:
902 return True
903
904 def passed():
905 return {'passFail': 'pass'}
906
907 def failBecause(reason, tag=None):
908 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
909
910 # -----------------------------------------------------------------------------
911 # Generic command tests
912
913 # A generic command test is expected to run and exit successfully.
914 #
915 # The expected exit code can be changed via exit_code() as normal, and
916 # the expected stdout/stderr are stored in <testname>.stdout and
917 # <testname>.stderr. The output of the command can be ignored
918 # altogether by using run_command_ignore_output instead of
919 # run_command.
920
921 def run_command( name, way, cmd ):
922 return simple_run( name, '', cmd, '' )
923
924 # -----------------------------------------------------------------------------
925 # GHCi tests
926
927 def ghci_script_without_flag(flag):
928 def apply(name, way, script):
929 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
930 return ghci_script_override_default_flags(overrides)(name, way, script)
931
932 return apply
933
934 def ghci_script_override_default_flags(overrides):
935 def apply(name, way, script):
936 return ghci_script(name, way, script, overrides)
937
938 return apply
939
940 def ghci_script( name, way, script, override_flags = None ):
941 # Use overriden default flags when given
942 if override_flags is not None:
943 default_flags = override_flags
944 else:
945 default_flags = getTestOpts().compiler_always_flags
946
947 # filter out -fforce-recomp from compiler_always_flags, because we're
948 # actually testing the recompilation behaviour in the GHCi tests.
949 flags = [f for f in default_flags if f != '-fforce-recomp']
950 flags.append(getTestOpts().extra_hc_opts)
951 if getTestOpts().outputdir != None:
952 flags.extend(["-outputdir", getTestOpts().outputdir])
953
954 # We pass HC and HC_OPTS as environment variables, so that the
955 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
956 cmd = "HC='" + config.compiler + "' " + \
957 "HC_OPTS='" + ' '.join(flags) + "' " + \
958 "'" + config.compiler + "'" + \
959 ' --interactive -v0 -ignore-dot-ghci ' + \
960 ' '.join(flags)
961
962 getTestOpts().stdin = script
963 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
964
965 # -----------------------------------------------------------------------------
966 # Compile-only tests
967
968 def compile_override_default_flags(overrides):
969 def apply(name, way, extra_opts):
970 return do_compile(name, way, 0, '', [], extra_opts, overrides)
971
972 return apply
973
974 def compile_fail_override_default_flags(overrides):
975 def apply(name, way, extra_opts):
976 return do_compile(name, way, 1, '', [], extra_opts, overrides)
977
978 return apply
979
980 def compile_without_flag(flag):
981 def apply(name, way, extra_opts):
982 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
983 return compile_override_default_flags(overrides)(name, way, extra_opts)
984
985 return apply
986
987 def compile_fail_without_flag(flag):
988 def apply(name, way, extra_opts):
989 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
990 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
991
992 return apply
993
994 def compile( name, way, extra_hc_opts ):
995 return do_compile( name, way, 0, '', [], extra_hc_opts )
996
997 def compile_fail( name, way, extra_hc_opts ):
998 return do_compile( name, way, 1, '', [], extra_hc_opts )
999
1000 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1001 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1002
1003 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1004 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1005
1006 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1007 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1008
1009 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1010 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1011
1012 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1013 # print 'Compile only, extra args = ', extra_hc_opts
1014 pretest_cleanup(name)
1015
1016 result = extras_build( way, extra_mods, extra_hc_opts )
1017 if badResult(result):
1018 return result
1019 extra_hc_opts = result['hc_opts']
1020
1021 force = 0
1022 if extra_mods:
1023 force = 1
1024 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1025
1026 if badResult(result):
1027 return result
1028
1029 # the actual stderr should always match the expected, regardless
1030 # of whether we expected the compilation to fail or not (successful
1031 # compilations may generate warnings).
1032
1033 if getTestOpts().with_namebase == None:
1034 namebase = name
1035 else:
1036 namebase = getTestOpts().with_namebase
1037
1038 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1039 actual_stderr_file = qualify(name, 'comp.stderr')
1040
1041 if not compare_outputs('stderr',
1042 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1043 normalise_errmsg,
1044 normalise_whitespace),
1045 expected_stderr_file, actual_stderr_file):
1046 return failBecause('stderr mismatch')
1047
1048 # no problems found, this test passed
1049 return passed()
1050
1051 def compile_cmp_asm( name, way, extra_hc_opts ):
1052 print('Compile only, extra args = ', extra_hc_opts)
1053 pretest_cleanup(name)
1054 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1055
1056 if badResult(result):
1057 return result
1058
1059 # the actual stderr should always match the expected, regardless
1060 # of whether we expected the compilation to fail or not (successful
1061 # compilations may generate warnings).
1062
1063 if getTestOpts().with_namebase == None:
1064 namebase = name
1065 else:
1066 namebase = getTestOpts().with_namebase
1067
1068 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
1069 actual_asm_file = qualify(name, 's')
1070
1071 if not compare_outputs('asm', join_normalisers(normalise_errmsg, normalise_asm), \
1072 expected_asm_file, actual_asm_file):
1073 return failBecause('asm mismatch')
1074
1075 # no problems found, this test passed
1076 return passed()
1077
1078 # -----------------------------------------------------------------------------
1079 # Compile-and-run tests
1080
1081 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1082 # print 'Compile and run, extra args = ', extra_hc_opts
1083 pretest_cleanup(name)
1084
1085 result = extras_build( way, extra_mods, extra_hc_opts )
1086 if badResult(result):
1087 return result
1088 extra_hc_opts = result['hc_opts']
1089
1090 if way == 'ghci': # interpreted...
1091 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1092 else: # compiled...
1093 force = 0
1094 if extra_mods:
1095 force = 1
1096
1097 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1098 if badResult(result):
1099 return result
1100
1101 cmd = './' + name;
1102
1103 # we don't check the compiler's stderr for a compile-and-run test
1104 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1105
1106 def compile_and_run( name, way, extra_hc_opts ):
1107 return compile_and_run__( name, way, '', [], extra_hc_opts)
1108
1109 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1110 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1111
1112 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1113 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1114
1115 def stats( name, way, stats_file ):
1116 opts = getTestOpts()
1117 return checkStats(name, way, stats_file, opts.stats_range_fields)
1118
1119 # -----------------------------------------------------------------------------
1120 # Check -t stats info
1121
1122 def checkStats(name, way, stats_file, range_fields):
1123 full_name = name + '(' + way + ')'
1124
1125 result = passed()
1126 if len(range_fields) > 0:
1127 f = open(in_testdir(stats_file))
1128 contents = f.read()
1129 f.close()
1130
1131 for (field, (expected, dev)) in range_fields.items():
1132 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1133 if m == None:
1134 print('Failed to find field: ', field)
1135 result = failBecause('no such stats field')
1136 val = int(m.group(1))
1137
1138 lowerBound = trunc( expected * ((100 - float(dev))/100))
1139 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1140
1141 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1142
1143 if val < lowerBound:
1144 print(field, 'value is too low:')
1145 print('(If this is because you have improved GHC, please')
1146 print('update the test so that GHC doesn\'t regress again)')
1147 result = failBecause('stat too good', tag='stat')
1148 if val > upperBound:
1149 print(field, 'value is too high:')
1150 result = failBecause('stat not good enough', tag='stat')
1151
1152 if val < lowerBound or val > upperBound or config.verbose >= 4:
1153 valStr = str(val)
1154 valLen = len(valStr)
1155 expectedStr = str(expected)
1156 expectedLen = len(expectedStr)
1157 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1158
1159 def display(descr, val, extra):
1160 print(descr, str(val).rjust(length), extra)
1161
1162 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1163 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1164 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1165 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1166 if val != expected:
1167 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1168
1169 return result
1170
1171 # -----------------------------------------------------------------------------
1172 # Build a single-module program
1173
1174 def extras_build( way, extra_mods, extra_hc_opts ):
1175 for modopts in extra_mods:
1176 mod, opts = modopts
1177 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1178 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1179 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1180 if badResult(result):
1181 return result
1182
1183 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1184
1185
1186 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1187 opts = getTestOpts()
1188 errname = add_suffix(name, 'comp.stderr')
1189 rm_no_fail( qualify(errname, '') )
1190
1191 if top_mod != '':
1192 srcname = top_mod
1193 rm_no_fail( qualify(name, '') )
1194 base, suf = os.path.splitext(top_mod)
1195 rm_no_fail( qualify(base, '') )
1196 rm_no_fail( qualify(base, 'exe') )
1197 elif addsuf:
1198 srcname = add_hs_lhs_suffix(name)
1199 rm_no_fail( qualify(name, '') )
1200 else:
1201 srcname = name
1202 rm_no_fail( qualify(name, 'o') )
1203
1204 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1205
1206 to_do = ''
1207 if top_mod != '':
1208 to_do = '--make '
1209 if link:
1210 to_do = to_do + '-o ' + name
1211 elif link:
1212 to_do = '-o ' + name
1213 elif opts.compile_to_hc:
1214 to_do = '-C'
1215 else:
1216 to_do = '-c' # just compile
1217
1218 stats_file = name + '.comp.stats'
1219 if len(opts.compiler_stats_range_fields) > 0:
1220 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1221
1222 # Required by GHC 7.3+, harmless for earlier versions:
1223 if (getTestOpts().c_src or
1224 getTestOpts().objc_src or
1225 getTestOpts().objcpp_src or
1226 getTestOpts().cmm_src):
1227 extra_hc_opts += ' -no-hs-main '
1228
1229 if getTestOpts().compile_cmd_prefix == '':
1230 cmd_prefix = ''
1231 else:
1232 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1233
1234 if override_flags is not None:
1235 comp_flags = copy.copy(override_flags)
1236 else:
1237 comp_flags = copy.copy(getTestOpts().compiler_always_flags)
1238
1239 if noforce:
1240 comp_flags = [f for f in comp_flags if f != '-fforce-recomp']
1241 if getTestOpts().outputdir != None:
1242 comp_flags.extend(["-outputdir", getTestOpts().outputdir])
1243
1244 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \
1245 + config.compiler + "' " \
1246 + ' '.join(comp_flags) + ' ' \
1247 + to_do + ' ' + srcname + ' ' \
1248 + ' '.join(config.way_flags(name)[way]) + ' ' \
1249 + extra_hc_opts + ' ' \
1250 + opts.extra_hc_opts + ' ' \
1251 + '>' + errname + ' 2>&1'
1252
1253 result = runCmdFor(name, cmd)
1254
1255 if result != 0 and not should_fail:
1256 actual_stderr = qualify(name, 'comp.stderr')
1257 if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:')
1258 if_verbose_dump(1,actual_stderr)
1259
1260 # ToDo: if the sub-shell was killed by ^C, then exit
1261
1262 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1263
1264 if badResult(statsResult):
1265 return statsResult
1266
1267 if should_fail:
1268 if result == 0:
1269 return failBecause('exit code 0')
1270 else:
1271 if result != 0:
1272 return failBecause('exit code non-0')
1273
1274 return passed()
1275
1276 # -----------------------------------------------------------------------------
1277 # Run a program and check its output
1278 #
1279 # If testname.stdin exists, route input from that, else
1280 # from /dev/null. Route output to testname.run.stdout and
1281 # testname.run.stderr. Returns the exit code of the run.
1282
1283 def simple_run( name, way, prog, args ):
1284 opts = getTestOpts()
1285
1286 # figure out what to use for stdin
1287 if opts.stdin != '':
1288 use_stdin = opts.stdin
1289 else:
1290 stdin_file = add_suffix(name, 'stdin')
1291 if os.path.exists(in_testdir(stdin_file)):
1292 use_stdin = stdin_file
1293 else:
1294 use_stdin = '/dev/null'
1295
1296 run_stdout = add_suffix(name,'run.stdout')
1297 run_stderr = add_suffix(name,'run.stderr')
1298
1299 rm_no_fail(qualify(name,'run.stdout'))
1300 rm_no_fail(qualify(name,'run.stderr'))
1301 rm_no_fail(qualify(name, 'hp'))
1302 rm_no_fail(qualify(name,'ps'))
1303 rm_no_fail(qualify(name, 'prof'))
1304
1305 my_rts_flags = rts_flags(way)
1306
1307 stats_file = name + '.stats'
1308 if len(opts.stats_range_fields) > 0:
1309 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1310
1311 if opts.no_stdin:
1312 stdin_comes_from = ''
1313 else:
1314 stdin_comes_from = ' <' + use_stdin
1315
1316 if opts.combined_output:
1317 redirection = ' > {} 2>&1'.format(run_stdout)
1318 redirection_append = ' >> {} 2>&1'.format(run_stdout)
1319 else:
1320 redirection = ' > {} 2> {}'.format(run_stdout, run_stderr)
1321 redirection_append = ' >> {} 2>> {}'.format(run_stdout, run_stderr)
1322
1323 cmd = prog + ' ' + args + ' ' \
1324 + my_rts_flags + ' ' \
1325 + stdin_comes_from \
1326 + redirection
1327
1328 if opts.cmd_wrapper != None:
1329 cmd = opts.cmd_wrapper(cmd) + redirection_append
1330
1331 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1332
1333 # run the command
1334 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1335
1336 exit_code = result >> 8
1337 signal = result & 0xff
1338
1339 # check the exit code
1340 if exit_code != opts.exit_code:
1341 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1342 dump_stdout(name)
1343 dump_stderr(name)
1344 return failBecause('bad exit code')
1345
1346 check_hp = my_rts_flags.find("-h") != -1
1347 check_prof = my_rts_flags.find("-p") != -1
1348
1349 if not opts.ignore_output:
1350 bad_stderr = not opts.combined_output and not check_stderr_ok(name)
1351 bad_stdout = not check_stdout_ok(name)
1352 if bad_stderr:
1353 return failBecause('bad stderr')
1354 if bad_stdout:
1355 return failBecause('bad stdout')
1356 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1357 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1358 return failBecause('bad heap profile')
1359 if check_prof and not check_prof_ok(name):
1360 return failBecause('bad profile')
1361
1362 return checkStats(name, way, stats_file, opts.stats_range_fields)
1363
1364 def rts_flags(way):
1365 if (way == ''):
1366 return ''
1367 else:
1368 args = config.way_rts_flags[way]
1369
1370 if args == []:
1371 return ''
1372 else:
1373 return '+RTS ' + ' '.join(args) + ' -RTS'
1374
1375 # -----------------------------------------------------------------------------
1376 # Run a program in the interpreter and check its output
1377
1378 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1379 outname = add_suffix(name, 'interp.stdout')
1380 errname = add_suffix(name, 'interp.stderr')
1381 rm_no_fail(outname)
1382 rm_no_fail(errname)
1383 rm_no_fail(name)
1384
1385 if (top_mod == ''):
1386 srcname = add_hs_lhs_suffix(name)
1387 else:
1388 srcname = top_mod
1389
1390 scriptname = add_suffix(name, 'genscript')
1391 qscriptname = in_testdir(scriptname)
1392 rm_no_fail(qscriptname)
1393
1394 delimiter = '===== program output begins here\n'
1395
1396 script = open(qscriptname, 'w')
1397 if not compile_only:
1398 # set the prog name and command-line args to match the compiled
1399 # environment.
1400 script.write(':set prog ' + name + '\n')
1401 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1402 # Add marker lines to the stdout and stderr output files, so we
1403 # can separate GHCi's output from the program's.
1404 script.write(':! echo ' + delimiter)
1405 script.write(':! echo 1>&2 ' + delimiter)
1406 # Set stdout to be line-buffered to match the compiled environment.
1407 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1408 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1409 # in the event of an exception as for the compiled program.
1410 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1411 script.close()
1412
1413 # figure out what to use for stdin
1414 if getTestOpts().stdin != '':
1415 stdin_file = in_testdir(getTestOpts().stdin)
1416 else:
1417 stdin_file = qualify(name, 'stdin')
1418
1419 if os.path.exists(stdin_file):
1420 stdin = open(stdin_file, 'r')
1421 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1422
1423 script.close()
1424
1425 flags = copy.copy(getTestOpts().compiler_always_flags)
1426 if getTestOpts().outputdir != None:
1427 flags.extend(["-outputdir", getTestOpts().outputdir])
1428
1429 if getTestOpts().combined_output:
1430 redirection = ' > {} 2>&1'.format(outname)
1431 redirection_append = ' >> {} 2>&1'.format(outname)
1432 else:
1433 redirection = ' > {} 2> {}'.format(outname, errname)
1434 redirection_append = ' >> {} 2>> {}'.format(outname, errname)
1435
1436 cmd = "'" + config.compiler + "' " \
1437 + ' '.join(flags) + ' ' \
1438 + srcname + ' ' \
1439 + ' '.join(config.way_flags(name)[way]) + ' ' \
1440 + extra_hc_opts + ' ' \
1441 + getTestOpts().extra_hc_opts + ' ' \
1442 + '<' + scriptname + redirection
1443
1444 if getTestOpts().cmd_wrapper != None:
1445 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1446
1447 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1448
1449 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1450
1451 exit_code = result >> 8
1452 signal = result & 0xff
1453
1454 # split the stdout into compilation/program output
1455 split_file(in_testdir(outname), delimiter,
1456 qualify(name, 'comp.stdout'),
1457 qualify(name, 'run.stdout'))
1458 split_file(in_testdir(errname), delimiter,
1459 qualify(name, 'comp.stderr'),
1460 qualify(name, 'run.stderr'))
1461
1462 # check the exit code
1463 if exit_code != getTestOpts().exit_code:
1464 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1465 dump_stdout(name)
1466 dump_stderr(name)
1467 return failBecause('bad exit code')
1468
1469 # ToDo: if the sub-shell was killed by ^C, then exit
1470
1471 if getTestOpts().ignore_output or (check_stderr_ok(name) and
1472 check_stdout_ok(name)):
1473 return passed()
1474 else:
1475 return failBecause('bad stdout or stderr')
1476
1477
1478 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1479 infile = open(in_fn)
1480 out1 = open(out1_fn, 'w')
1481 out2 = open(out2_fn, 'w')
1482
1483 line = infile.readline()
1484 line = re.sub('\r', '', line) # ignore Windows EOL
1485 while (re.sub('^\s*','',line) != delimiter and line != ''):
1486 out1.write(line)
1487 line = infile.readline()
1488 line = re.sub('\r', '', line)
1489 out1.close()
1490
1491 line = infile.readline()
1492 while (line != ''):
1493 out2.write(line)
1494 line = infile.readline()
1495 out2.close()
1496
1497 # -----------------------------------------------------------------------------
1498 # Utils
1499
1500 def check_stdout_ok( name ):
1501 if getTestOpts().with_namebase == None:
1502 namebase = name
1503 else:
1504 namebase = getTestOpts().with_namebase
1505
1506 actual_stdout_file = qualify(name, 'run.stdout')
1507 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1508
1509 def norm(str):
1510 if platform_specific:
1511 return str
1512 else:
1513 return normalise_output(str)
1514
1515 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1516
1517 check_stdout = getTestOpts().check_stdout
1518 if check_stdout:
1519 return check_stdout(actual_stdout_file, extra_norm)
1520
1521 return compare_outputs('stdout', \
1522 extra_norm, \
1523 expected_stdout_file, actual_stdout_file)
1524
1525 def dump_stdout( name ):
1526 print('Stdout:')
1527 print(read_no_crs(qualify(name, 'run.stdout')))
1528
1529 def check_stderr_ok( name ):
1530 if getTestOpts().with_namebase == None:
1531 namebase = name
1532 else:
1533 namebase = getTestOpts().with_namebase
1534
1535 actual_stderr_file = qualify(name, 'run.stderr')
1536 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1537
1538 def norm(str):
1539 if platform_specific:
1540 return str
1541 else:
1542 return normalise_errmsg(str)
1543
1544 return compare_outputs('stderr', \
1545 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1546 expected_stderr_file, actual_stderr_file)
1547
1548 def dump_stderr( name ):
1549 print("Stderr:")
1550 print(read_no_crs(qualify(name, 'run.stderr')))
1551
1552 def read_no_crs(file):
1553 str = ''
1554 try:
1555 h = open(file)
1556 str = h.read()
1557 h.close
1558 except:
1559 # On Windows, if the program fails very early, it seems the
1560 # files stdout/stderr are redirected to may not get created
1561 pass
1562 return re.sub('\r', '', str)
1563
1564 def write_file(file, str):
1565 h = open(file, 'w')
1566 h.write(str)
1567 h.close
1568
1569 def check_hp_ok(name):
1570
1571 # do not qualify for hp2ps because we should be in the right directory
1572 hp2psCmd = "cd " + getTestOpts().testdir + " && '" + config.hp2ps + "' " + name
1573
1574 hp2psResult = runCmdExitCode(hp2psCmd)
1575
1576 actual_ps_file = qualify(name, 'ps')
1577
1578 if(hp2psResult == 0):
1579 if (os.path.exists(actual_ps_file)):
1580 if gs_working:
1581 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1582 if (gsResult == 0):
1583 return (True)
1584 else:
1585 print("hp2ps output for " + name + "is not valid PostScript")
1586 else: return (True) # assume postscript is valid without ghostscript
1587 else:
1588 print("hp2ps did not generate PostScript for " + name)
1589 return (False)
1590 else:
1591 print("hp2ps error when processing heap profile for " + name)
1592 return(False)
1593
1594 def check_prof_ok(name):
1595
1596 prof_file = qualify(name,'prof')
1597
1598 if not os.path.exists(prof_file):
1599 print(prof_file + " does not exist")
1600 return(False)
1601
1602 if os.path.getsize(qualify(name,'prof')) == 0:
1603 print(prof_file + " is empty")
1604 return(False)
1605
1606 if getTestOpts().with_namebase == None:
1607 namebase = name
1608 else:
1609 namebase = getTestOpts().with_namebase
1610
1611 (platform_specific, expected_prof_file) = \
1612 platform_wordsize_qualify(namebase, 'prof.sample')
1613
1614 # sample prof file is not required
1615 if not os.path.exists(expected_prof_file):
1616 return True
1617 else:
1618 return compare_outputs('prof', \
1619 join_normalisers(normalise_whitespace,normalise_prof), \
1620 expected_prof_file, prof_file)
1621
1622 # Compare expected output to actual output, and optionally accept the
1623 # new output. Returns true if output matched or was accepted, false
1624 # otherwise.
1625 def compare_outputs( kind, normaliser, expected_file, actual_file ):
1626 if os.path.exists(expected_file):
1627 expected_raw = read_no_crs(expected_file)
1628 # print "norm:", normaliser(expected_raw)
1629 expected_str = normaliser(expected_raw)
1630 expected_file_for_diff = expected_file
1631 else:
1632 expected_str = ''
1633 expected_file_for_diff = '/dev/null'
1634
1635 actual_raw = read_no_crs(actual_file)
1636 actual_str = normaliser(actual_raw)
1637
1638 if expected_str == actual_str:
1639 return 1
1640 else:
1641 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1642
1643 if expected_file_for_diff == '/dev/null':
1644 expected_normalised_file = '/dev/null'
1645 else:
1646 expected_normalised_file = expected_file + ".normalised"
1647 write_file(expected_normalised_file, expected_str)
1648
1649 actual_normalised_file = actual_file + ".normalised"
1650 write_file(actual_normalised_file, actual_str)
1651
1652 # Ignore whitespace when diffing. We should only get to this
1653 # point if there are non-whitespace differences
1654 #
1655 # Note we are diffing the *actual* output, not the normalised
1656 # output. The normalised output may have whitespace squashed
1657 # (including newlines) so the diff would be hard to read.
1658 # This does mean that the diff might contain changes that
1659 # would be normalised away.
1660 if (config.verbose >= 1):
1661 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1662 ' ' + actual_file )
1663
1664 # If for some reason there were no non-whitespace differences,
1665 # then do a full diff
1666 if r == 0:
1667 r = os.system( 'diff -u ' + expected_file_for_diff + \
1668 ' ' + actual_file )
1669
1670 if config.accept:
1671 if_verbose(1, 'Accepting new output.')
1672 write_file(expected_file, actual_raw)
1673 return 1
1674 else:
1675 return 0
1676
1677
1678 def normalise_whitespace( str ):
1679 # Merge contiguous whitespace characters into a single space.
1680 str = re.sub('[ \t\n]+', ' ', str)
1681 return str
1682
1683 def normalise_errmsg( str ):
1684 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1685 # the colon is there because it appears in error messages; this
1686 # hacky solution is used in place of more sophisticated filename
1687 # mangling
1688 str = re.sub('([^\\s])\\.exe', '\\1', str)
1689 # normalise slashes, minimise Windows/Unix filename differences
1690 str = re.sub('\\\\', '/', str)
1691 # The inplace ghc's are called ghc-stage[123] to avoid filename
1692 # collisions, so we need to normalise that to just "ghc"
1693 str = re.sub('ghc-stage[123]', 'ghc', str)
1694 # Error messages simetimes contain integer implementation package
1695 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1696 return str
1697
1698 # normalise a .prof file, so that we can reasonably compare it against
1699 # a sample. This doesn't compare any of the actual profiling data,
1700 # only the shape of the profile and the number of entries.
1701 def normalise_prof (str):
1702 # strip everything up to the line beginning "COST CENTRE"
1703 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1704
1705 # strip results for CAFs, these tend to change unpredictably
1706 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1707
1708 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1709 # sometimes under MAIN.
1710 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1711
1712 # We have somthing like this:
1713
1714 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1715 # k Main 204 1 0.0 0.0 0.0 0.0
1716 # foo Main 205 1 0.0 0.0 0.0 0.0
1717 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1718
1719 # then we remove all the specific profiling data, leaving only the
1720 # cost centre name, module, and entries, to end up with this:
1721
1722 # MAIN MAIN 0
1723 # k Main 1
1724 # foo Main 1
1725 # foo.bar Main 1
1726
1727 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1728 return str
1729
1730 def normalise_slashes_( str ):
1731 str = re.sub('\\\\', '/', str)
1732 return str
1733
1734 def normalise_exe_( str ):
1735 str = re.sub('\.exe', '', str)
1736 return str
1737
1738 def normalise_output( str ):
1739 # Remove a .exe extension (for Windows)
1740 # This can occur in error messages generated by the program.
1741 str = re.sub('([^\\s])\\.exe', '\\1', str)
1742 return str
1743
1744 def normalise_asm( str ):
1745 lines = str.split('\n')
1746 # Only keep instructions and labels not starting with a dot.
1747 metadata = re.compile('^[ \t]*\\..*$')
1748 out = []
1749 for line in lines:
1750 # Drop metadata directives (e.g. ".type")
1751 if not metadata.match(line):
1752 line = re.sub('@plt', '', line)
1753 instr = line.lstrip().split()
1754 # Drop empty lines.
1755 if not instr:
1756 continue
1757 # Drop operands, except for call instructions.
1758 elif instr[0] == 'call':
1759 out.append(instr[0] + ' ' + instr[1])
1760 else:
1761 out.append(instr[0])
1762 out = '\n'.join(out)
1763 return out
1764
1765 def if_verbose( n, s ):
1766 if config.verbose >= n:
1767 print(s)
1768
1769 def if_verbose_dump( n, f ):
1770 if config.verbose >= n:
1771 try:
1772 print(open(f).read())
1773 except:
1774 print('')
1775
1776 def rawSystem(cmd_and_args):
1777 # We prefer subprocess.call to os.spawnv as the latter
1778 # seems to send its arguments through a shell or something
1779 # with the Windows (non-cygwin) python. An argument "a b c"
1780 # turns into three arguments ["a", "b", "c"].
1781
1782 # However, subprocess is new in python 2.4, so fall back to
1783 # using spawnv if we don't have it
1784
1785 if have_subprocess:
1786 return subprocess.call(cmd_and_args)
1787 else:
1788 return os.spawnv(os.P_WAIT, cmd_and_args[0], cmd_and_args)
1789
1790 # When running under native msys Python, any invocations of non-msys binaries,
1791 # including timeout.exe, will have their arguments munged according to some
1792 # heuristics, which leads to malformed command lines (#9626). The easiest way
1793 # to avoid problems is to invoke through /usr/bin/cmd which sidesteps argument
1794 # munging because it is a native msys application.
1795 def passThroughCmd(cmd_and_args):
1796 args = []
1797 # cmd needs a Windows-style path for its first argument.
1798 args.append(cmd_and_args[0].replace('/', '\\'))
1799 # Other arguments need to be quoted to deal with spaces.
1800 args.extend(['"%s"' % arg for arg in cmd_and_args[1:]])
1801 return ["cmd", "/c", " ".join(args)]
1802
1803 # Note that this doesn't handle the timeout itself; it is just used for
1804 # commands that have timeout handling built-in.
1805 def rawSystemWithTimeout(cmd_and_args):
1806 if config.os == 'mingw32' and sys.executable.startswith('/usr'):
1807 # This is only needed when running under msys python.
1808 cmd_and_args = passThroughCmd(cmd_and_args)
1809 r = rawSystem(cmd_and_args)
1810 if r == 98:
1811 # The python timeout program uses 98 to signal that ^C was pressed
1812 stopNow()
1813 return r
1814
1815 # cmd is a complex command in Bourne-shell syntax
1816 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1817 # Hence it must ultimately be run by a Bourne shell
1818 #
1819 # Mostly it invokes the command wrapped in 'timeout' thus
1820 # timeout 300 'cd . && ...blah blah'
1821 # so it's timeout's job to invoke the Bourne shell
1822 #
1823 # But watch out for the case when there is no timeout program!
1824 # Then, when using the native Python, os.system will invoke the cmd shell
1825
1826 def runCmd( cmd ):
1827 if_verbose( 3, cmd )
1828 r = 0
1829 if config.os == 'mingw32':
1830 # On MinGW, we will always have timeout
1831 assert config.timeout_prog!=''
1832
1833 if config.timeout_prog != '':
1834 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1835 else:
1836 r = os.system(cmd)
1837 return r << 8
1838
1839 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1840 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1841 cmd = cmd.format(**config.__dict__)
1842
1843 if_verbose( 3, cmd )
1844 r = 0
1845 if config.os == 'mingw32':
1846 # On MinGW, we will always have timeout
1847 assert config.timeout_prog!=''
1848 timeout = int(ceil(config.timeout * timeout_multiplier))
1849
1850 if config.timeout_prog != '':
1851 if config.check_files_written:
1852 fn = name + ".strace"
1853 r = rawSystemWithTimeout(
1854 ["strace", "-o", fn, "-fF",
1855 "-e", "creat,open,chdir,clone,vfork",
1856 config.timeout_prog, str(timeout), cmd])
1857 addTestFilesWritten(name, fn)
1858 rm_no_fail(fn)
1859 else:
1860 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1861 else:
1862 r = os.system(cmd)
1863 return r << 8
1864
1865 def runCmdExitCode( cmd ):
1866 return (runCmd(cmd) >> 8);
1867
1868
1869 # -----------------------------------------------------------------------------
1870 # checking for files being written to by multiple tests
1871
1872 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1873 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1874 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1875 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1876 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1877 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1878 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1879 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1880 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1881 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1882 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1883 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1884 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1885 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1886 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1887 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1888 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1889
1890 # Files that are read or written but shouldn't be:
1891 # * ghci_history shouldn't be read or written by tests
1892 # * things under package.conf.d shouldn't be written by tests
1893 bad_file_usages = {}
1894
1895 # Mapping from tests to the list of files that they write
1896 files_written = {}
1897
1898 # Mapping from tests to the list of files that they write but don't clean
1899 files_written_not_removed = {}
1900
1901 def add_bad_file_usage(name, file):
1902 try:
1903 if not file in bad_file_usages[name]:
1904 bad_file_usages[name].append(file)
1905 except:
1906 bad_file_usages[name] = [file]
1907
1908 def mkPath(curdir, path):
1909 # Given the current full directory is 'curdir', what is the full
1910 # path to 'path'?
1911 return os.path.realpath(os.path.join(curdir, path))
1912
1913 def addTestFilesWritten(name, fn):
1914 if config.use_threads:
1915 with t.lockFilesWritten:
1916 addTestFilesWrittenHelper(name, fn)
1917 else:
1918 addTestFilesWrittenHelper(name, fn)
1919
1920 def addTestFilesWrittenHelper(name, fn):
1921 started = False
1922 working_directories = {}
1923
1924 with open(fn, 'r') as f:
1925 for line in f:
1926 m_pid = re_strace_pid.match(line)
1927 if m_pid:
1928 pid = m_pid.group(1)
1929 content = m_pid.group(2)
1930 elif re_strace_unavailable.match(line):
1931 next
1932 else:
1933 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1934
1935 m_open = re_strace_open.match(content)
1936 m_chdir = re_strace_chdir.match(content)
1937 m_clone = re_strace_clone.match(content)
1938 m_vfork = re_strace_vfork.match(content)
1939
1940 if not started:
1941 working_directories[pid] = os.getcwd()
1942 started = True
1943
1944 if m_open:
1945 file = m_open.group(1)
1946 file = mkPath(working_directories[pid], file)
1947 if file.endswith("ghci_history"):
1948 add_bad_file_usage(name, file)
1949 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1950 flags = m_open.group(2).split('|')
1951 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1952 if re.match('package\.conf\.d', file):
1953 add_bad_file_usage(name, file)
1954 else:
1955 try:
1956 if not file in files_written[name]:
1957 files_written[name].append(file)
1958 except:
1959 files_written[name] = [file]
1960 elif 'O_RDONLY' in flags:
1961 pass
1962 else:
1963 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1964 elif m_chdir:
1965 # We optimistically assume that unfinished chdir's are going to succeed
1966 dir = m_chdir.group(1)
1967 working_directories[pid] = mkPath(working_directories[pid], dir)
1968 elif m_clone:
1969 working_directories[m_clone.group(2)] = working_directories[pid]
1970 elif m_vfork:
1971 working_directories[m_vfork.group(2)] = working_directories[pid]
1972 elif re_strace_open_resumed.match(content):
1973 pass
1974 elif re_strace_chdir_resumed.match(content):
1975 pass
1976 elif re_strace_vfork_unfinished.match(content):
1977 pass
1978 elif re_strace_clone_unfinished.match(content):
1979 pass
1980 elif re_strace_ignore_sigchild.match(content):
1981 pass
1982 elif re_strace_ignore_sigvtalarm.match(content):
1983 pass
1984 elif re_strace_ignore_sigint.match(content):
1985 pass
1986 elif re_strace_ignore_sigfpe.match(content):
1987 pass
1988 elif re_strace_ignore_sigsegv.match(content):
1989 pass
1990 elif re_strace_ignore_sigpipe.match(content):
1991 pass
1992 else:
1993 framework_fail(name, 'strace', "Can't understand strace line: " + line)
1994
1995 def checkForFilesWrittenProblems(file):
1996 foundProblem = False
1997
1998 files_written_inverted = {}
1999 for t in files_written.keys():
2000 for f in files_written[t]:
2001 try:
2002 files_written_inverted[f].append(t)
2003 except:
2004 files_written_inverted[f] = [t]
2005
2006 for f in files_written_inverted.keys():
2007 if len(files_written_inverted[f]) > 1:
2008 if not foundProblem:
2009 foundProblem = True
2010 file.write("\n")
2011 file.write("\nSome files are written by multiple tests:\n")
2012 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
2013 if foundProblem:
2014 file.write("\n")
2015
2016 # -----
2017
2018 if len(files_written_not_removed) > 0:
2019 file.write("\n")
2020 file.write("\nSome files written but not removed:\n")
2021 tests = list(files_written_not_removed.keys())
2022 tests.sort()
2023 for t in tests:
2024 for f in files_written_not_removed[t]:
2025 file.write(" " + t + ": " + f + "\n")
2026 file.write("\n")
2027
2028 # -----
2029
2030 if len(bad_file_usages) > 0:
2031 file.write("\n")
2032 file.write("\nSome bad file usages:\n")
2033 tests = list(bad_file_usages.keys())
2034 tests.sort()
2035 for t in tests:
2036 for f in bad_file_usages[t]:
2037 file.write(" " + t + ": " + f + "\n")
2038 file.write("\n")
2039
2040 # -----------------------------------------------------------------------------
2041 # checking if ghostscript is available for checking the output of hp2ps
2042
2043 def genGSCmd(psfile):
2044 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2045
2046 def gsNotWorking():
2047 global gs_working
2048 print("GhostScript not available for hp2ps tests")
2049
2050 global gs_working
2051 gs_working = 0
2052 if config.have_profiling:
2053 if config.gs != '':
2054 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2055 if resultGood == 0:
2056 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps'));
2057 if resultBad != 0:
2058 print("GhostScript available for hp2ps tests")
2059 gs_working = 1;
2060 else:
2061 gsNotWorking();
2062 else:
2063 gsNotWorking();
2064 else:
2065 gsNotWorking();
2066
2067 def rm_no_fail( file ):
2068 try:
2069 os.remove( file )
2070 finally:
2071 return
2072
2073 def add_suffix( name, suffix ):
2074 if suffix == '':
2075 return name
2076 else:
2077 return name + '.' + suffix
2078
2079 def add_hs_lhs_suffix(name):
2080 if getTestOpts().c_src:
2081 return add_suffix(name, 'c')
2082 elif getTestOpts().cmm_src:
2083 return add_suffix(name, 'cmm')
2084 elif getTestOpts().objc_src:
2085 return add_suffix(name, 'm')
2086 elif getTestOpts().objcpp_src:
2087 return add_suffix(name, 'mm')
2088 elif getTestOpts().literate:
2089 return add_suffix(name, 'lhs')
2090 else:
2091 return add_suffix(name, 'hs')
2092
2093 def replace_suffix( name, suffix ):
2094 base, suf = os.path.splitext(name)
2095 return base + '.' + suffix
2096
2097 def in_testdir( name ):
2098 return (getTestOpts().testdir + '/' + name)
2099
2100 def qualify( name, suff ):
2101 return in_testdir(add_suffix(name, suff))
2102
2103
2104 # Finding the sample output. The filename is of the form
2105 #
2106 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
2107 #
2108 # and we pick the most specific version available. The <version> is
2109 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2110 # more fine-grained control use if_compiler_lt().
2111 #
2112 def platform_wordsize_qualify( name, suff ):
2113
2114 basepath = qualify(name, suff)
2115
2116 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2117 for (platformSpecific, plat) in [(1, '-' + config.platform),
2118 (1, '-' + config.os),
2119 (0, '')]
2120 for ws in ['-ws-' + config.wordsize, '']
2121 for comp in ['-' + config.compiler_type, '']
2122 for vers in ['-' + config.compiler_maj_version, '']]
2123
2124 dir = glob.glob(basepath + '*')
2125 dir = [normalise_slashes_(d) for d in dir]
2126
2127 for (platformSpecific, f) in paths:
2128 if f in dir:
2129 return (platformSpecific,f)
2130
2131 return (0, basepath)
2132
2133 # Clean up prior to the test, so that we can't spuriously conclude
2134 # that it passed on the basis of old run outputs.
2135 def pretest_cleanup(name):
2136 if getTestOpts().outputdir != None:
2137 odir = in_testdir(getTestOpts().outputdir)
2138 try:
2139 shutil.rmtree(odir)
2140 except:
2141 pass
2142 os.mkdir(odir)
2143
2144 rm_no_fail(qualify(name,'interp.stderr'))
2145 rm_no_fail(qualify(name,'interp.stdout'))
2146 rm_no_fail(qualify(name,'comp.stderr'))
2147 rm_no_fail(qualify(name,'comp.stdout'))
2148 rm_no_fail(qualify(name,'run.stderr'))
2149 rm_no_fail(qualify(name,'run.stdout'))
2150 rm_no_fail(qualify(name,'tix'))
2151 rm_no_fail(qualify(name,'exe.tix'))
2152 # simple_build zaps the following:
2153 # rm_nofail(qualify("o"))
2154 # rm_nofail(qualify(""))
2155 # not interested in the return code
2156
2157 # -----------------------------------------------------------------------------
2158 # Return a list of all the files ending in '.T' below directories roots.
2159
2160 def findTFiles(roots):
2161 # It would be better to use os.walk, but that
2162 # gives backslashes on Windows, which trip the
2163 # testsuite later :-(
2164 return [filename for root in roots for filename in findTFiles_(root)]
2165
2166 def findTFiles_(path):
2167 if os.path.isdir(path):
2168 paths = [path + '/' + x for x in os.listdir(path)]
2169 return findTFiles(paths)
2170 elif path[-2:] == '.T':
2171 return [path]
2172 else:
2173 return []
2174
2175 # -----------------------------------------------------------------------------
2176 # Output a test summary to the specified file object
2177
2178 def summary(t, file):
2179
2180 file.write('\n')
2181 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2182 file.write('OVERALL SUMMARY for test run started at '
2183 + time.strftime("%c %Z", t.start_time) + '\n'
2184 + str(datetime.timedelta(seconds=
2185 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2186 + ' spent to go through\n'
2187 + repr(t.total_tests).rjust(8)
2188 + ' total tests, which gave rise to\n'
2189 + repr(t.total_test_cases).rjust(8)
2190 + ' test cases, of which\n'
2191 + repr(t.n_tests_skipped).rjust(8)
2192 + ' were skipped\n'
2193 + '\n'
2194 + repr(t.n_missing_libs).rjust(8)
2195 + ' had missing libraries\n'
2196 + repr(t.n_expected_passes).rjust(8)
2197 + ' expected passes\n'
2198 + repr(t.n_expected_failures).rjust(8)
2199 + ' expected failures\n'
2200 + '\n'
2201 + repr(t.n_framework_failures).rjust(8)
2202 + ' caused framework failures\n'
2203 + repr(t.n_unexpected_passes).rjust(8)
2204 + ' unexpected passes\n'
2205 + repr(t.n_unexpected_failures).rjust(8)
2206 + ' unexpected failures\n'
2207 + repr(t.n_unexpected_stat_failures).rjust(8)
2208 + ' unexpected stat failures\n'
2209 + '\n')
2210
2211 if t.n_unexpected_passes > 0:
2212 file.write('Unexpected passes:\n')
2213 printPassingTestInfosSummary(file, t.unexpected_passes)
2214
2215 if t.n_unexpected_failures > 0:
2216 file.write('Unexpected failures:\n')
2217 printFailingTestInfosSummary(file, t.unexpected_failures)
2218
2219 if t.n_unexpected_stat_failures > 0:
2220 file.write('Unexpected stat failures:\n')
2221 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2222
2223 if config.check_files_written:
2224 checkForFilesWrittenProblems(file)
2225
2226 if stopping():
2227 file.write('WARNING: Testsuite run was terminated early\n')
2228
2229 def printUnexpectedTests(file, testInfoss):
2230 unexpected = []
2231 for testInfos in testInfoss:
2232 directories = testInfos.keys()
2233 for directory in directories:
2234 tests = list(testInfos[directory].keys())
2235 unexpected += tests
2236 if unexpected != []:
2237 file.write('Unexpected results from:\n')
2238 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2239 file.write('\n')
2240
2241 def printPassingTestInfosSummary(file, testInfos):
2242 directories = list(testInfos.keys())
2243 directories.sort()
2244 maxDirLen = max(len(x) for x in directories)
2245 for directory in directories:
2246 tests = list(testInfos[directory].keys())
2247 tests.sort()
2248 for test in tests:
2249 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2250 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2251 file.write('\n')
2252
2253 def printFailingTestInfosSummary(file, testInfos):
2254 directories = list(testInfos.keys())
2255 directories.sort()
2256 maxDirLen = max(len(d) for d in directories)
2257 for directory in directories:
2258 tests = list(testInfos[directory].keys())
2259 tests.sort()
2260 for test in tests:
2261 reasons = testInfos[directory][test].keys()
2262 for reason in reasons:
2263 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2264 ' [' + reason + ']' + \
2265 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2266 file.write('\n')
2267
2268 def getStdout(cmd):
2269 if have_subprocess:
2270 p = subprocess.Popen(cmd,
2271 stdout=subprocess.PIPE,
2272 stderr=subprocess.PIPE)
2273 (stdout, stderr) = p.communicate()
2274 r = p.wait()
2275 if r != 0:
2276 raise Exception("Command failed: " + str(cmd))
2277 if stderr != '':
2278 raise Exception("stderr from command: " + str(cmd))
2279 return stdout
2280 else:
2281 raise Exception("Need subprocess to get stdout, but don't have it")