Testsuite: only print msg when timeout kills process unexpectedly
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20 import subprocess
21
22 from testglobals import *
23 from testutil import *
24
25 if config.use_threads:
26 import threading
27 try:
28 import thread
29 except ImportError: # Python 3
30 import _thread as thread
31
32 global wantToStop
33 wantToStop = False
34 def stopNow():
35 global wantToStop
36 wantToStop = True
37 def stopping():
38 return wantToStop
39
40 # Options valid for the current test only (these get reset to
41 # testdir_testopts after each test).
42
43 global testopts_local
44 if config.use_threads:
45 testopts_local = threading.local()
46 else:
47 class TestOpts_Local:
48 pass
49 testopts_local = TestOpts_Local()
50
51 def getTestOpts():
52 return testopts_local.x
53
54 def setLocalTestOpts(opts):
55 global testopts_local
56 testopts_local.x=opts
57
58 def isStatsTest():
59 opts = getTestOpts()
60 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
61
62
63 # This can be called at the top of a file of tests, to set default test options
64 # for the following tests.
65 def setTestOpts( f ):
66 global thisdir_settings
67 thisdir_settings = [thisdir_settings, f]
68
69 # -----------------------------------------------------------------------------
70 # Canned setup functions for common cases. eg. for a test you might say
71 #
72 # test('test001', normal, compile, [''])
73 #
74 # to run it without any options, but change it to
75 #
76 # test('test001', expect_fail, compile, [''])
77 #
78 # to expect failure for this test.
79
80 def normal( name, opts ):
81 return;
82
83 def skip( name, opts ):
84 opts.skip = 1
85
86 def expect_fail( name, opts ):
87 # The compiler, testdriver, OS or platform is missing a certain
88 # feature, and we don't plan to or can't fix it now or in the
89 # future.
90 opts.expect = 'fail';
91
92 def reqlib( lib ):
93 return lambda name, opts, l=lib: _reqlib (name, opts, l )
94
95 # Cache the results of looking to see if we have a library or not.
96 # This makes quite a difference, especially on Windows.
97 have_lib = {}
98
99 def _reqlib( name, opts, lib ):
100 if lib in have_lib:
101 got_it = have_lib[lib]
102 else:
103 cmd = strip_quotes(config.ghc_pkg)
104 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
105 stdout=subprocess.PIPE,
106 stderr=subprocess.PIPE)
107 # read from stdout and stderr to avoid blocking due to
108 # buffers filling
109 p.communicate()
110 r = p.wait()
111 got_it = r == 0
112 have_lib[lib] = got_it
113
114 if not got_it:
115 opts.expect = 'missing-lib'
116
117 def req_haddock( name, opts ):
118 if not config.haddock:
119 opts.expect = 'missing-lib'
120
121 def req_profiling( name, opts ):
122 if not config.have_profiling:
123 opts.expect = 'fail'
124
125 def req_shared_libs( name, opts ):
126 if not config.have_shared_libs:
127 opts.expect = 'fail'
128
129 def req_interp( name, opts ):
130 if not config.have_interp:
131 opts.expect = 'fail'
132
133 def req_smp( name, opts ):
134 if not config.have_smp:
135 opts.expect = 'fail'
136
137 def ignore_output( name, opts ):
138 opts.ignore_output = 1
139
140 def no_stdin( name, opts ):
141 opts.no_stdin = 1
142
143 def combined_output( name, opts ):
144 opts.combined_output = True
145
146 # -----
147
148 def expect_fail_for( ways ):
149 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
150
151 def _expect_fail_for( name, opts, ways ):
152 opts.expect_fail_for = ways
153
154 def expect_broken( bug ):
155 # This test is a expected not to work due to the indicated trac bug
156 # number.
157 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
158
159 def _expect_broken( name, opts, bug ):
160 record_broken(name, opts, bug)
161 opts.expect = 'fail';
162
163 def expect_broken_for( bug, ways ):
164 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
165
166 def _expect_broken_for( name, opts, bug, ways ):
167 record_broken(name, opts, bug)
168 opts.expect_fail_for = ways
169
170 def record_broken(name, opts, bug):
171 global brokens
172 me = (bug, opts.testdir, name)
173 if not me in brokens:
174 brokens.append(me)
175
176 def _expect_pass(way):
177 # Helper function. Not intended for use in .T files.
178 opts = getTestOpts()
179 return opts.expect == 'pass' and way not in opts.expect_fail_for
180
181 # -----
182
183 def omit_ways( ways ):
184 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
185
186 def _omit_ways( name, opts, ways ):
187 opts.omit_ways = ways
188
189 # -----
190
191 def only_ways( ways ):
192 return lambda name, opts, w=ways: _only_ways( name, opts, w )
193
194 def _only_ways( name, opts, ways ):
195 opts.only_ways = ways
196
197 # -----
198
199 def extra_ways( ways ):
200 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
201
202 def _extra_ways( name, opts, ways ):
203 opts.extra_ways = ways
204
205 # -----
206
207 def only_compiler_types( _compiler_types ):
208 # Don't delete yet. The libraries unix, stm and hpc still call this function.
209 return lambda _name, _opts: None
210
211 # -----
212
213 def set_stdin( file ):
214 return lambda name, opts, f=file: _set_stdin(name, opts, f);
215
216 def _set_stdin( name, opts, f ):
217 opts.stdin = f
218
219 # -----
220
221 def exit_code( val ):
222 return lambda name, opts, v=val: _exit_code(name, opts, v);
223
224 def _exit_code( name, opts, v ):
225 opts.exit_code = v
226
227 def signal_exit_code( val ):
228 if opsys('solaris2'):
229 return exit_code( val );
230 else:
231 # When application running on Linux receives fatal error
232 # signal, then its exit code is encoded as 128 + signal
233 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
234 # I assume that Mac OS X behaves in the same way at least Mac
235 # OS X builder behavior suggests this.
236 return exit_code( val+128 );
237
238 # -----
239
240 def compile_timeout_multiplier( val ):
241 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
242
243 def _compile_timeout_multiplier( name, opts, v ):
244 opts.compile_timeout_multiplier = v
245
246 def run_timeout_multiplier( val ):
247 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
248
249 def _run_timeout_multiplier( name, opts, v ):
250 opts.run_timeout_multiplier = v
251
252 # -----
253
254 def extra_run_opts( val ):
255 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
256
257 def _extra_run_opts( name, opts, v ):
258 opts.extra_run_opts = v
259
260 # -----
261
262 def extra_hc_opts( val ):
263 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
264
265 def _extra_hc_opts( name, opts, v ):
266 opts.extra_hc_opts = v
267
268 # -----
269
270 def extra_clean( files ):
271 assert not isinstance(files, str), files
272 return lambda name, opts, v=files: _extra_clean(name, opts, v);
273
274 def _extra_clean( name, opts, v ):
275 opts.clean_files = v
276
277 # -----
278
279 def stats_num_field( field, expecteds ):
280 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
281
282 def _stats_num_field( name, opts, field, expecteds ):
283 if field in opts.stats_range_fields:
284 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
285
286 if type(expecteds) is list:
287 for (b, expected, dev) in expecteds:
288 if b:
289 opts.stats_range_fields[field] = (expected, dev)
290 return
291 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
292
293 else:
294 (expected, dev) = expecteds
295 opts.stats_range_fields[field] = (expected, dev)
296
297 def compiler_stats_num_field( field, expecteds ):
298 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
299
300 def _compiler_stats_num_field( name, opts, field, expecteds ):
301 if field in opts.compiler_stats_range_fields:
302 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
303
304 # Compiler performance numbers change when debugging is on, making the results
305 # useless and confusing. Therefore, skip if debugging is on.
306 if compiler_debugged():
307 skip(name, opts)
308
309 for (b, expected, dev) in expecteds:
310 if b:
311 opts.compiler_stats_range_fields[field] = (expected, dev)
312 return
313
314 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
315
316 # -----
317
318 def when(b, f):
319 # When list_brokens is on, we want to see all expect_broken calls,
320 # so we always do f
321 if b or config.list_broken:
322 return f
323 else:
324 return normal
325
326 def unless(b, f):
327 return when(not b, f)
328
329 def doing_ghci():
330 return 'ghci' in config.run_ways
331
332 def ghci_dynamic( ):
333 return config.ghc_dynamic
334
335 def fast():
336 return config.speed == 2
337
338 def platform( plat ):
339 return config.platform == plat
340
341 def opsys( os ):
342 return config.os == os
343
344 def arch( arch ):
345 return config.arch == arch
346
347 def wordsize( ws ):
348 return config.wordsize == str(ws)
349
350 def msys( ):
351 return config.msys
352
353 def cygwin( ):
354 return config.cygwin
355
356 def have_vanilla( ):
357 return config.have_vanilla
358
359 def have_dynamic( ):
360 return config.have_dynamic
361
362 def have_profiling( ):
363 return config.have_profiling
364
365 def in_tree_compiler( ):
366 return config.in_tree_compiler
367
368 def compiler_lt( compiler, version ):
369 assert compiler == 'ghc'
370 return version_lt(config.compiler_version, version)
371
372 def compiler_le( compiler, version ):
373 assert compiler == 'ghc'
374 return version_le(config.compiler_version, version)
375
376 def compiler_gt( compiler, version ):
377 assert compiler == 'ghc'
378 return version_gt(config.compiler_version, version)
379
380 def compiler_ge( compiler, version ):
381 assert compiler == 'ghc'
382 return version_ge(config.compiler_version, version)
383
384 def unregisterised( ):
385 return config.unregisterised
386
387 def compiler_profiled( ):
388 return config.compiler_profiled
389
390 def compiler_debugged( ):
391 return config.compiler_debugged
392
393 def tag( t ):
394 return t in config.compiler_tags
395
396 # ---
397
398 def high_memory_usage(name, opts):
399 opts.alone = True
400
401 # If a test is for a multi-CPU race, then running the test alone
402 # increases the chance that we'll actually see it.
403 def multi_cpu_race(name, opts):
404 opts.alone = True
405
406 # ---
407 def literate( name, opts ):
408 opts.literate = 1;
409
410 def c_src( name, opts ):
411 opts.c_src = 1;
412
413 def objc_src( name, opts ):
414 opts.objc_src = 1;
415
416 def objcpp_src( name, opts ):
417 opts.objcpp_src = 1;
418
419 def cmm_src( name, opts ):
420 opts.cmm_src = 1;
421
422 def outputdir( odir ):
423 return lambda name, opts, d=odir: _outputdir(name, opts, d)
424
425 def _outputdir( name, opts, odir ):
426 opts.outputdir = odir;
427
428 # ----
429
430 def pre_cmd( cmd ):
431 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
432
433 def _pre_cmd( name, opts, cmd ):
434 opts.pre_cmd = cmd
435
436 # ----
437
438 def clean_cmd( cmd ):
439 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
440
441 def _clean_cmd( name, opts, cmd ):
442 opts.clean_cmd = cmd
443
444 # ----
445
446 def cmd_prefix( prefix ):
447 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
448
449 def _cmd_prefix( name, opts, prefix ):
450 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
451
452 # ----
453
454 def cmd_wrapper( fun ):
455 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
456
457 def _cmd_wrapper( name, opts, fun ):
458 opts.cmd_wrapper = fun
459
460 # ----
461
462 def compile_cmd_prefix( prefix ):
463 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
464
465 def _compile_cmd_prefix( name, opts, prefix ):
466 opts.compile_cmd_prefix = prefix
467
468 # ----
469
470 def check_stdout( f ):
471 return lambda name, opts, f=f: _check_stdout(name, opts, f)
472
473 def _check_stdout( name, opts, f ):
474 opts.check_stdout = f
475
476 # ----
477
478 def normalise_slashes( name, opts ):
479 _normalise_fun(name, opts, normalise_slashes_)
480
481 def normalise_exe( name, opts ):
482 _normalise_fun(name, opts, normalise_exe_)
483
484 def normalise_fun( *fs ):
485 return lambda name, opts: _normalise_fun(name, opts, fs)
486
487 def _normalise_fun( name, opts, *fs ):
488 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
489
490 def normalise_errmsg_fun( *fs ):
491 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
492
493 def _normalise_errmsg_fun( name, opts, *fs ):
494 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
495
496 def normalise_version_( *pkgs ):
497 def normalise_version__( str ):
498 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
499 '\\1-<VERSION>', str)
500 return normalise_version__
501
502 def normalise_version( *pkgs ):
503 def normalise_version__( name, opts ):
504 _normalise_fun(name, opts, normalise_version_(*pkgs))
505 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
506 return normalise_version__
507
508 def normalise_drive_letter(name, opts):
509 # Windows only. Change D:\\ to C:\\.
510 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
511
512 def join_normalisers(*a):
513 """
514 Compose functions, flattening sequences.
515
516 join_normalisers(f1,[f2,f3],f4)
517
518 is the same as
519
520 lambda x: f1(f2(f3(f4(x))))
521 """
522
523 def flatten(l):
524 """
525 Taken from http://stackoverflow.com/a/2158532/946226
526 """
527 for el in l:
528 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
529 for sub in flatten(el):
530 yield sub
531 else:
532 yield el
533
534 a = flatten(a)
535
536 fn = lambda x:x # identity function
537 for f in a:
538 assert callable(f)
539 fn = lambda x,f=f,fn=fn: fn(f(x))
540 return fn
541
542 # ----
543 # Function for composing two opt-fns together
544
545 def executeSetups(fs, name, opts):
546 if type(fs) is list:
547 # If we have a list of setups, then execute each one
548 for f in fs:
549 executeSetups(f, name, opts)
550 else:
551 # fs is a single function, so just apply it
552 fs(name, opts)
553
554 # -----------------------------------------------------------------------------
555 # The current directory of tests
556
557 def newTestDir( dir ):
558 global thisdir_settings
559 # reset the options for this test directory
560 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
561
562 def _newTestDir( name, opts, dir ):
563 opts.testdir = dir
564 opts.compiler_always_flags = config.compiler_always_flags
565
566 # -----------------------------------------------------------------------------
567 # Actually doing tests
568
569 parallelTests = []
570 aloneTests = []
571 allTestNames = set([])
572
573 def runTest (opts, name, func, args):
574 ok = 0
575
576 if config.use_threads:
577 t.thread_pool.acquire()
578 try:
579 while config.threads<(t.running_threads+1):
580 t.thread_pool.wait()
581 t.running_threads = t.running_threads+1
582 ok=1
583 t.thread_pool.release()
584 thread.start_new_thread(test_common_thread, (name, opts, func, args))
585 except:
586 if not ok:
587 t.thread_pool.release()
588 else:
589 test_common_work (name, opts, func, args)
590
591 # name :: String
592 # setup :: TestOpts -> IO ()
593 def test (name, setup, func, args):
594 if config.only and name not in config.only:
595 return
596
597 global aloneTests
598 global parallelTests
599 global allTestNames
600 global thisdir_settings
601 if name in allTestNames:
602 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
603 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
604 framework_fail(name, 'bad_name', 'This test has an invalid name')
605
606 # Make a deep copy of the default_testopts, as we need our own copy
607 # of any dictionaries etc inside it. Otherwise, if one test modifies
608 # them, all tests will see the modified version!
609 myTestOpts = copy.deepcopy(default_testopts)
610
611 executeSetups([thisdir_settings, setup], name, myTestOpts)
612
613 thisTest = lambda : runTest(myTestOpts, name, func, args)
614 if myTestOpts.alone:
615 aloneTests.append(thisTest)
616 else:
617 parallelTests.append(thisTest)
618 allTestNames.add(name)
619
620 if config.use_threads:
621 def test_common_thread(name, opts, func, args):
622 t.lock.acquire()
623 try:
624 test_common_work(name,opts,func,args)
625 finally:
626 t.lock.release()
627 t.thread_pool.acquire()
628 t.running_threads = t.running_threads - 1
629 t.thread_pool.notify()
630 t.thread_pool.release()
631
632 def get_package_cache_timestamp():
633 if config.package_conf_cache_file == '':
634 return 0.0
635 else:
636 try:
637 return os.stat(config.package_conf_cache_file).st_mtime
638 except:
639 return 0.0
640
641
642 def test_common_work (name, opts, func, args):
643 try:
644 t.total_tests = t.total_tests+1
645 setLocalTestOpts(opts)
646
647 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
648
649 # All the ways we might run this test
650 if func == compile or func == multimod_compile:
651 all_ways = config.compile_ways
652 elif func == compile_and_run or func == multimod_compile_and_run:
653 all_ways = config.run_ways
654 elif func == ghci_script:
655 if 'ghci' in config.run_ways:
656 all_ways = ['ghci']
657 else:
658 all_ways = []
659 else:
660 all_ways = ['normal']
661
662 # A test itself can request extra ways by setting opts.extra_ways
663 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
664
665 t.total_test_cases = t.total_test_cases + len(all_ways)
666
667 ok_way = lambda way: \
668 not getTestOpts().skip \
669 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
670 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
671 and (not (config.skip_perf_tests and isStatsTest())) \
672 and way not in getTestOpts().omit_ways
673
674 # Which ways we are asked to skip
675 do_ways = list(filter (ok_way,all_ways))
676
677 # Only run all ways in slow mode.
678 # See Note [validate and testsuite speed] in toplevel Makefile.
679 if config.speed > 0:
680 do_ways = do_ways[:1]
681
682 if not config.clean_only:
683 # Run the required tests...
684 for way in do_ways:
685 if stopping():
686 break
687 do_test (name, way, func, args)
688
689 for way in all_ways:
690 if way not in do_ways:
691 skiptest (name,way)
692
693 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
694 pretest_cleanup(name)
695 clean([name + suff for suff in [
696 '', '.exe', '.exe.manifest', '.genscript',
697 '.stderr.normalised', '.stdout.normalised',
698 '.run.stderr.normalised', '.run.stdout.normalised',
699 '.comp.stderr.normalised', '.comp.stdout.normalised',
700 '.interp.stderr.normalised', '.interp.stdout.normalised',
701 '.stats', '.comp.stats',
702 '.hi', '.o', '.prof', '.exe.prof', '.hc',
703 '_stub.h', '_stub.c', '_stub.o',
704 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
705
706 if func == multi_compile or func == multi_compile_fail:
707 extra_mods = args[1]
708 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
709 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
710
711
712 clean(getTestOpts().clean_files)
713
714 if getTestOpts().outputdir != None:
715 odir = in_testdir(getTestOpts().outputdir)
716 try:
717 shutil.rmtree(odir)
718 except:
719 pass
720
721 try:
722 shutil.rmtree(in_testdir('.hpc.' + name))
723 except:
724 pass
725
726 try:
727 cleanCmd = getTestOpts().clean_cmd
728 if cleanCmd != None:
729 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
730 if result != 0:
731 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
732 except:
733 framework_fail(name, 'cleaning', 'clean-command exception')
734
735 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
736
737 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
738 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
739
740 try:
741 for f in files_written[name]:
742 if os.path.exists(f):
743 try:
744 if not f in files_written_not_removed[name]:
745 files_written_not_removed[name].append(f)
746 except:
747 files_written_not_removed[name] = [f]
748 except:
749 pass
750 except Exception as e:
751 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
752
753 def clean(strs):
754 for str in strs:
755 if (str.endswith('.package.conf') or
756 str.startswith('package.conf.') and not str.endswith('/*')):
757 # Package confs are directories now.
758 str += '/*'
759
760 for name in glob.glob(in_testdir(str)):
761 clean_full_path(name)
762
763 def clean_full_path(name):
764 try:
765 # Remove files...
766 os.remove(name)
767 except OSError as e1:
768 try:
769 # ... and empty directories
770 os.rmdir(name)
771 except OSError as e2:
772 # We don't want to fail here, but we do want to know
773 # what went wrong, so print out the exceptions.
774 # ENOENT isn't a problem, though, as we clean files
775 # that don't necessarily exist.
776 if e1.errno != errno.ENOENT:
777 print(e1)
778 if e2.errno != errno.ENOENT:
779 print(e2)
780
781 def do_test(name, way, func, args):
782 full_name = name + '(' + way + ')'
783
784 try:
785 if_verbose(2, "=====> %s %d of %d %s " % \
786 (full_name, t.total_tests, len(allTestNames), \
787 [t.n_unexpected_passes, \
788 t.n_unexpected_failures, \
789 t.n_framework_failures]))
790
791 if config.use_threads:
792 t.lock.release()
793
794 try:
795 preCmd = getTestOpts().pre_cmd
796 if preCmd != None:
797 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
798 if result != 0:
799 framework_fail(name, way, 'pre-command failed: ' + str(result))
800 except:
801 framework_fail(name, way, 'pre-command exception')
802
803 try:
804 result = func(*[name,way] + args)
805 finally:
806 if config.use_threads:
807 t.lock.acquire()
808
809 if getTestOpts().expect != 'pass' and \
810 getTestOpts().expect != 'fail' and \
811 getTestOpts().expect != 'missing-lib':
812 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
813
814 try:
815 passFail = result['passFail']
816 except:
817 passFail = 'No passFail found'
818
819 if passFail == 'pass':
820 if _expect_pass(way):
821 t.n_expected_passes = t.n_expected_passes + 1
822 if name in t.expected_passes:
823 t.expected_passes[name].append(way)
824 else:
825 t.expected_passes[name] = [way]
826 else:
827 if_verbose(1, '*** unexpected pass for %s' % full_name)
828 t.n_unexpected_passes = t.n_unexpected_passes + 1
829 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
830 elif passFail == 'fail':
831 if _expect_pass(way):
832 reason = result['reason']
833 tag = result.get('tag')
834 if tag == 'stat':
835 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
836 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
837 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
838 else:
839 if_verbose(1, '*** unexpected failure for %s' % full_name)
840 t.n_unexpected_failures = t.n_unexpected_failures + 1
841 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
842 else:
843 if getTestOpts().expect == 'missing-lib':
844 t.n_missing_libs = t.n_missing_libs + 1
845 if name in t.missing_libs:
846 t.missing_libs[name].append(way)
847 else:
848 t.missing_libs[name] = [way]
849 else:
850 t.n_expected_failures = t.n_expected_failures + 1
851 if name in t.expected_failures:
852 t.expected_failures[name].append(way)
853 else:
854 t.expected_failures[name] = [way]
855 else:
856 framework_fail(name, way, 'bad result ' + passFail)
857 except KeyboardInterrupt:
858 stopNow()
859 except:
860 framework_fail(name, way, 'do_test exception')
861 traceback.print_exc()
862
863 def addPassingTestInfo (testInfos, directory, name, way):
864 directory = re.sub('^\\.[/\\\\]', '', directory)
865
866 if not directory in testInfos:
867 testInfos[directory] = {}
868
869 if not name in testInfos[directory]:
870 testInfos[directory][name] = []
871
872 testInfos[directory][name].append(way)
873
874 def addFailingTestInfo (testInfos, directory, name, reason, way):
875 directory = re.sub('^\\.[/\\\\]', '', directory)
876
877 if not directory in testInfos:
878 testInfos[directory] = {}
879
880 if not name in testInfos[directory]:
881 testInfos[directory][name] = {}
882
883 if not reason in testInfos[directory][name]:
884 testInfos[directory][name][reason] = []
885
886 testInfos[directory][name][reason].append(way)
887
888 def skiptest (name, way):
889 # print 'Skipping test \"', name, '\"'
890 t.n_tests_skipped = t.n_tests_skipped + 1
891 if name in t.tests_skipped:
892 t.tests_skipped[name].append(way)
893 else:
894 t.tests_skipped[name] = [way]
895
896 def framework_fail( name, way, reason ):
897 full_name = name + '(' + way + ')'
898 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
899 t.n_framework_failures = t.n_framework_failures + 1
900 if name in t.framework_failures:
901 t.framework_failures[name].append(way)
902 else:
903 t.framework_failures[name] = [way]
904
905 def badResult(result):
906 try:
907 if result['passFail'] == 'pass':
908 return False
909 return True
910 except:
911 return True
912
913 def passed():
914 return {'passFail': 'pass'}
915
916 def failBecause(reason, tag=None):
917 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
918
919 # -----------------------------------------------------------------------------
920 # Generic command tests
921
922 # A generic command test is expected to run and exit successfully.
923 #
924 # The expected exit code can be changed via exit_code() as normal, and
925 # the expected stdout/stderr are stored in <testname>.stdout and
926 # <testname>.stderr. The output of the command can be ignored
927 # altogether by using run_command_ignore_output instead of
928 # run_command.
929
930 def run_command( name, way, cmd ):
931 return simple_run( name, '', cmd, '' )
932
933 # -----------------------------------------------------------------------------
934 # GHCi tests
935
936 def ghci_script_without_flag(flag):
937 def apply(name, way, script):
938 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
939 return ghci_script_override_default_flags(overrides)(name, way, script)
940
941 return apply
942
943 def ghci_script_override_default_flags(overrides):
944 def apply(name, way, script):
945 return ghci_script(name, way, script, overrides)
946
947 return apply
948
949 def ghci_script( name, way, script, override_flags = None ):
950 # filter out -fforce-recomp from compiler_always_flags, because we're
951 # actually testing the recompilation behaviour in the GHCi tests.
952 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
953
954 way_flags = ' '.join(config.way_flags(name)['ghci'])
955
956 # We pass HC and HC_OPTS as environment variables, so that the
957 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
958 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
959 ).format(flags=flags, way_flags=way_flags)
960
961 getTestOpts().stdin = script
962 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
963
964 # -----------------------------------------------------------------------------
965 # Compile-only tests
966
967 def compile_override_default_flags(overrides):
968 def apply(name, way, extra_opts):
969 return do_compile(name, way, 0, '', [], extra_opts, overrides)
970
971 return apply
972
973 def compile_fail_override_default_flags(overrides):
974 def apply(name, way, extra_opts):
975 return do_compile(name, way, 1, '', [], extra_opts, overrides)
976
977 return apply
978
979 def compile_without_flag(flag):
980 def apply(name, way, extra_opts):
981 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
982 return compile_override_default_flags(overrides)(name, way, extra_opts)
983
984 return apply
985
986 def compile_fail_without_flag(flag):
987 def apply(name, way, extra_opts):
988 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
989 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
990
991 return apply
992
993 def compile( name, way, extra_hc_opts ):
994 return do_compile( name, way, 0, '', [], extra_hc_opts )
995
996 def compile_fail( name, way, extra_hc_opts ):
997 return do_compile( name, way, 1, '', [], extra_hc_opts )
998
999 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1000 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1001
1002 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1003 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1004
1005 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1006 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1007
1008 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1009 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1010
1011 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1012 # print 'Compile only, extra args = ', extra_hc_opts
1013 pretest_cleanup(name)
1014
1015 result = extras_build( way, extra_mods, extra_hc_opts )
1016 if badResult(result):
1017 return result
1018 extra_hc_opts = result['hc_opts']
1019
1020 force = 0
1021 if extra_mods:
1022 force = 1
1023 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1024
1025 if badResult(result):
1026 return result
1027
1028 # the actual stderr should always match the expected, regardless
1029 # of whether we expected the compilation to fail or not (successful
1030 # compilations may generate warnings).
1031
1032 (_, expected_stderr_file) = find_expected_file(name, 'stderr')
1033 actual_stderr_file = add_suffix(name, 'comp.stderr')
1034
1035 if not compare_outputs(way, 'stderr',
1036 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1037 normalise_errmsg),
1038 expected_stderr_file, actual_stderr_file,
1039 whitespace_normaliser=normalise_whitespace):
1040 return failBecause('stderr mismatch')
1041
1042 # no problems found, this test passed
1043 return passed()
1044
1045 def compile_cmp_asm( name, way, extra_hc_opts ):
1046 print('Compile only, extra args = ', extra_hc_opts)
1047 pretest_cleanup(name)
1048 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1049
1050 if badResult(result):
1051 return result
1052
1053 # the actual stderr should always match the expected, regardless
1054 # of whether we expected the compilation to fail or not (successful
1055 # compilations may generate warnings).
1056
1057 (_, expected_asm_file) = find_expected_file(name, 'asm')
1058 actual_asm_file = add_suffix(name, 's')
1059
1060 if not compare_outputs(way, 'asm',
1061 join_normalisers(normalise_errmsg, normalise_asm),
1062 expected_asm_file, actual_asm_file):
1063 return failBecause('asm mismatch')
1064
1065 # no problems found, this test passed
1066 return passed()
1067
1068 # -----------------------------------------------------------------------------
1069 # Compile-and-run tests
1070
1071 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1072 # print 'Compile and run, extra args = ', extra_hc_opts
1073 pretest_cleanup(name)
1074
1075 result = extras_build( way, extra_mods, extra_hc_opts )
1076 if badResult(result):
1077 return result
1078 extra_hc_opts = result['hc_opts']
1079
1080 if way == 'ghci': # interpreted...
1081 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1082 else: # compiled...
1083 force = 0
1084 if extra_mods:
1085 force = 1
1086
1087 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1088 if badResult(result):
1089 return result
1090
1091 cmd = './' + name;
1092
1093 # we don't check the compiler's stderr for a compile-and-run test
1094 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1095
1096 def compile_and_run( name, way, extra_hc_opts ):
1097 return compile_and_run__( name, way, '', [], extra_hc_opts)
1098
1099 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1100 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1101
1102 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1103 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1104
1105 def stats( name, way, stats_file ):
1106 opts = getTestOpts()
1107 return checkStats(name, way, stats_file, opts.stats_range_fields)
1108
1109 # -----------------------------------------------------------------------------
1110 # Check -t stats info
1111
1112 def checkStats(name, way, stats_file, range_fields):
1113 full_name = name + '(' + way + ')'
1114
1115 result = passed()
1116 if len(range_fields) > 0:
1117 try:
1118 f = open(in_testdir(stats_file))
1119 except IOError as e:
1120 return failBecause(str(e))
1121 contents = f.read()
1122 f.close()
1123
1124 for (field, (expected, dev)) in range_fields.items():
1125 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1126 if m == None:
1127 print('Failed to find field: ', field)
1128 result = failBecause('no such stats field')
1129 val = int(m.group(1))
1130
1131 lowerBound = trunc( expected * ((100 - float(dev))/100))
1132 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1133
1134 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1135
1136 if val < lowerBound:
1137 print(field, 'value is too low:')
1138 print('(If this is because you have improved GHC, please')
1139 print('update the test so that GHC doesn\'t regress again)')
1140 result = failBecause('stat too good', tag='stat')
1141 if val > upperBound:
1142 print(field, 'value is too high:')
1143 result = failBecause('stat not good enough', tag='stat')
1144
1145 if val < lowerBound or val > upperBound or config.verbose >= 4:
1146 valStr = str(val)
1147 valLen = len(valStr)
1148 expectedStr = str(expected)
1149 expectedLen = len(expectedStr)
1150 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1151
1152 def display(descr, val, extra):
1153 print(descr, str(val).rjust(length), extra)
1154
1155 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1156 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1157 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1158 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1159 if val != expected:
1160 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1161
1162 return result
1163
1164 # -----------------------------------------------------------------------------
1165 # Build a single-module program
1166
1167 def extras_build( way, extra_mods, extra_hc_opts ):
1168 for modopts in extra_mods:
1169 mod, opts = modopts
1170 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1171 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1172 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1173 if badResult(result):
1174 return result
1175
1176 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1177
1178
1179 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1180 opts = getTestOpts()
1181 errname = add_suffix(name, 'comp.stderr')
1182 rm_no_fail( qualify(errname, '') )
1183
1184 if top_mod != '':
1185 srcname = top_mod
1186 rm_no_fail( qualify(name, '') )
1187 base, suf = os.path.splitext(top_mod)
1188 rm_no_fail( qualify(base, '') )
1189 rm_no_fail( qualify(base, 'exe') )
1190 elif addsuf:
1191 srcname = add_hs_lhs_suffix(name)
1192 rm_no_fail( qualify(name, '') )
1193 else:
1194 srcname = name
1195 rm_no_fail( qualify(name, 'o') )
1196
1197 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1198
1199 to_do = ''
1200 if top_mod != '':
1201 to_do = '--make '
1202 if link:
1203 to_do = to_do + '-o ' + name
1204 elif link:
1205 to_do = '-o ' + name
1206 elif opts.compile_to_hc:
1207 to_do = '-C'
1208 else:
1209 to_do = '-c' # just compile
1210
1211 stats_file = name + '.comp.stats'
1212 if len(opts.compiler_stats_range_fields) > 0:
1213 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1214
1215 # Required by GHC 7.3+, harmless for earlier versions:
1216 if (getTestOpts().c_src or
1217 getTestOpts().objc_src or
1218 getTestOpts().objcpp_src or
1219 getTestOpts().cmm_src):
1220 extra_hc_opts += ' -no-hs-main '
1221
1222 if getTestOpts().compile_cmd_prefix == '':
1223 cmd_prefix = ''
1224 else:
1225 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1226
1227 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1228 config.way_flags(name)[way])
1229
1230 cmd = ('cd {opts.testdir} && {cmd_prefix} '
1231 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1232 '> {errname} 2>&1'
1233 ).format(**locals())
1234
1235 result = runCmdFor(name, cmd, timeout_multiplier=opts.compile_timeout_multiplier)
1236
1237 if result != 0 and not should_fail:
1238 if config.verbose >= 1 and _expect_pass(way):
1239 print('Compile failed (status ' + repr(result) + ') errors were:')
1240 actual_stderr_path = in_testdir(name, 'comp.stderr')
1241 if_verbose_dump(1, actual_stderr_path)
1242
1243 # ToDo: if the sub-shell was killed by ^C, then exit
1244
1245 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1246
1247 if badResult(statsResult):
1248 return statsResult
1249
1250 if should_fail:
1251 if result == 0:
1252 return failBecause('exit code 0')
1253 else:
1254 if result != 0:
1255 return failBecause('exit code non-0')
1256
1257 return passed()
1258
1259 # -----------------------------------------------------------------------------
1260 # Run a program and check its output
1261 #
1262 # If testname.stdin exists, route input from that, else
1263 # from /dev/null. Route output to testname.run.stdout and
1264 # testname.run.stderr. Returns the exit code of the run.
1265
1266 def simple_run(name, way, prog, extra_run_opts):
1267 opts = getTestOpts()
1268
1269 # figure out what to use for stdin
1270 if opts.stdin != '':
1271 use_stdin = opts.stdin
1272 else:
1273 stdin_file = add_suffix(name, 'stdin')
1274 if os.path.exists(in_testdir(stdin_file)):
1275 use_stdin = stdin_file
1276 else:
1277 use_stdin = '/dev/null'
1278
1279 run_stdout = add_suffix(name,'run.stdout')
1280 run_stderr = add_suffix(name,'run.stderr')
1281
1282 rm_no_fail(qualify(name,'run.stdout'))
1283 rm_no_fail(qualify(name,'run.stderr'))
1284 rm_no_fail(qualify(name, 'hp'))
1285 rm_no_fail(qualify(name,'ps'))
1286 rm_no_fail(qualify(name, 'prof'))
1287
1288 my_rts_flags = rts_flags(way)
1289
1290 stats_file = name + '.stats'
1291 if len(opts.stats_range_fields) > 0:
1292 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1293 else:
1294 stats_args = ''
1295
1296 if opts.no_stdin:
1297 stdin_comes_from = ''
1298 else:
1299 stdin_comes_from = ' <' + use_stdin
1300
1301 if opts.combined_output:
1302 redirection = ' > {0} 2>&1'.format(run_stdout)
1303 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1304 else:
1305 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1306 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1307
1308 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1309 cmd = prog + stats_args + ' ' \
1310 + my_rts_flags + ' ' \
1311 + extra_run_opts + ' ' \
1312 + stdin_comes_from \
1313 + redirection
1314
1315 if opts.cmd_wrapper != None:
1316 cmd = opts.cmd_wrapper(cmd) + redirection_append
1317
1318 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1319
1320 # run the command
1321 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1322
1323 exit_code = result >> 8
1324 signal = result & 0xff
1325
1326 # check the exit code
1327 if exit_code != opts.exit_code:
1328 if config.verbose >= 1 and _expect_pass(way):
1329 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1330 dump_stdout(name)
1331 dump_stderr(name)
1332 return failBecause('bad exit code')
1333
1334 check_hp = my_rts_flags.find("-h") != -1
1335 check_prof = my_rts_flags.find("-p") != -1
1336
1337 if not opts.ignore_output:
1338 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1339 bad_stdout = not check_stdout_ok(name, way)
1340 if bad_stderr:
1341 return failBecause('bad stderr')
1342 if bad_stdout:
1343 return failBecause('bad stdout')
1344 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1345 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1346 return failBecause('bad heap profile')
1347 if check_prof and not check_prof_ok(name, way):
1348 return failBecause('bad profile')
1349
1350 return checkStats(name, way, stats_file, opts.stats_range_fields)
1351
1352 def rts_flags(way):
1353 if (way == ''):
1354 return ''
1355 else:
1356 args = config.way_rts_flags[way]
1357
1358 if args == []:
1359 return ''
1360 else:
1361 return '+RTS ' + ' '.join(args) + ' -RTS'
1362
1363 # -----------------------------------------------------------------------------
1364 # Run a program in the interpreter and check its output
1365
1366 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1367 opts = getTestOpts()
1368
1369 outname = add_suffix(name, 'interp.stdout')
1370 errname = add_suffix(name, 'interp.stderr')
1371 rm_no_fail(outname)
1372 rm_no_fail(errname)
1373 rm_no_fail(name)
1374
1375 if (top_mod == ''):
1376 srcname = add_hs_lhs_suffix(name)
1377 else:
1378 srcname = top_mod
1379
1380 scriptname = add_suffix(name, 'genscript')
1381 qscriptname = in_testdir(scriptname)
1382 rm_no_fail(qscriptname)
1383
1384 delimiter = '===== program output begins here\n'
1385
1386 script = open(qscriptname, 'w')
1387 if not compile_only:
1388 # set the prog name and command-line args to match the compiled
1389 # environment.
1390 script.write(':set prog ' + name + '\n')
1391 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1392 # Add marker lines to the stdout and stderr output files, so we
1393 # can separate GHCi's output from the program's.
1394 script.write(':! echo ' + delimiter)
1395 script.write(':! echo 1>&2 ' + delimiter)
1396 # Set stdout to be line-buffered to match the compiled environment.
1397 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1398 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1399 # in the event of an exception as for the compiled program.
1400 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1401 script.close()
1402
1403 # figure out what to use for stdin
1404 if getTestOpts().stdin != '':
1405 stdin_file = in_testdir(getTestOpts().stdin)
1406 else:
1407 stdin_file = qualify(name, 'stdin')
1408
1409 if os.path.exists(stdin_file):
1410 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1411
1412 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1413 config.way_flags(name)[way])
1414
1415 if getTestOpts().combined_output:
1416 redirection = ' > {0} 2>&1'.format(outname)
1417 redirection_append = ' >> {0} 2>&1'.format(outname)
1418 else:
1419 redirection = ' > {0} 2> {1}'.format(outname, errname)
1420 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1421
1422 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1423 '< {scriptname} {redirection}'
1424 ).format(**locals())
1425
1426 if getTestOpts().cmd_wrapper != None:
1427 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1428
1429 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1430
1431 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1432
1433 exit_code = result >> 8
1434 signal = result & 0xff
1435
1436 # split the stdout into compilation/program output
1437 split_file(in_testdir(outname), delimiter,
1438 in_testdir(name, 'comp.stdout'),
1439 in_testdir(name, 'run.stdout'))
1440 split_file(in_testdir(errname), delimiter,
1441 in_testdir(name, 'comp.stderr'),
1442 in_testdir(name, 'run.stderr'))
1443
1444 # check the exit code
1445 if exit_code != getTestOpts().exit_code:
1446 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1447 dump_stdout(name)
1448 dump_stderr(name)
1449 return failBecause('bad exit code')
1450
1451 # ToDo: if the sub-shell was killed by ^C, then exit
1452
1453 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1454 check_stdout_ok(name, way)):
1455 return passed()
1456 else:
1457 return failBecause('bad stdout or stderr')
1458
1459
1460 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1461 infile = open(in_fn)
1462 out1 = open(out1_fn, 'w')
1463 out2 = open(out2_fn, 'w')
1464
1465 line = infile.readline()
1466 line = re.sub('\r', '', line) # ignore Windows EOL
1467 while (re.sub('^\s*','',line) != delimiter and line != ''):
1468 out1.write(line)
1469 line = infile.readline()
1470 line = re.sub('\r', '', line)
1471 out1.close()
1472
1473 line = infile.readline()
1474 while (line != ''):
1475 out2.write(line)
1476 line = infile.readline()
1477 out2.close()
1478
1479 # -----------------------------------------------------------------------------
1480 # Utils
1481 def get_compiler_flags(override_flags, noforce):
1482 opts = getTestOpts()
1483
1484 if override_flags is not None:
1485 flags = copy.copy(override_flags)
1486 else:
1487 flags = copy.copy(opts.compiler_always_flags)
1488
1489 if noforce:
1490 flags = [f for f in flags if f != '-fforce-recomp']
1491
1492 flags.append(opts.extra_hc_opts)
1493
1494 if opts.outputdir != None:
1495 flags.extend(["-outputdir", opts.outputdir])
1496
1497 return flags
1498
1499 def check_stdout_ok(name, way):
1500 actual_stdout_file = add_suffix(name, 'run.stdout')
1501 (platform_specific, expected_stdout_file) = find_expected_file(name, 'stdout')
1502
1503 def norm(str):
1504 if platform_specific:
1505 return str
1506 else:
1507 return normalise_output(str)
1508
1509 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1510
1511 check_stdout = getTestOpts().check_stdout
1512 if check_stdout:
1513 actual_stdout_path = in_testdir(actual_stdout_file)
1514 return check_stdout(actual_stdout_path, extra_norm)
1515
1516 return compare_outputs(way, 'stdout', extra_norm,
1517 expected_stdout_file, actual_stdout_file)
1518
1519 def dump_stdout( name ):
1520 print('Stdout:')
1521 print(read_no_crs(in_testdir(name, 'run.stdout')))
1522
1523 def check_stderr_ok(name, way):
1524 actual_stderr_file = add_suffix(name, 'run.stderr')
1525 (platform_specific, expected_stderr_file) = find_expected_file(name, 'stderr')
1526
1527 def norm(str):
1528 if platform_specific:
1529 return str
1530 else:
1531 return normalise_errmsg(str)
1532
1533 return compare_outputs(way, 'stderr',
1534 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1535 expected_stderr_file, actual_stderr_file)
1536
1537 def dump_stderr( name ):
1538 print("Stderr:")
1539 print(read_no_crs(in_testdir(name, 'run.stderr')))
1540
1541 def read_no_crs(file):
1542 str = ''
1543 try:
1544 h = open(file)
1545 str = h.read()
1546 h.close
1547 except:
1548 # On Windows, if the program fails very early, it seems the
1549 # files stdout/stderr are redirected to may not get created
1550 pass
1551 return re.sub('\r', '', str)
1552
1553 def write_file(file, str):
1554 h = open(file, 'w')
1555 h.write(str)
1556 h.close
1557
1558 def check_hp_ok(name):
1559
1560 # do not qualify for hp2ps because we should be in the right directory
1561 hp2psCmd = "cd " + getTestOpts().testdir + " && {hp2ps} " + name
1562
1563 hp2psResult = runCmdExitCode(hp2psCmd)
1564
1565 actual_ps_path = in_testdir(name, 'ps')
1566
1567 if(hp2psResult == 0):
1568 if (os.path.exists(actual_ps_path)):
1569 if gs_working:
1570 gsResult = runCmdExitCode(genGSCmd(actual_ps_path))
1571 if (gsResult == 0):
1572 return (True)
1573 else:
1574 print("hp2ps output for " + name + "is not valid PostScript")
1575 else: return (True) # assume postscript is valid without ghostscript
1576 else:
1577 print("hp2ps did not generate PostScript for " + name)
1578 return (False)
1579 else:
1580 print("hp2ps error when processing heap profile for " + name)
1581 return(False)
1582
1583 def check_prof_ok(name, way):
1584 actual_prof_file = add_suffix(name, 'prof')
1585 actual_prof_path = in_testdir(actual_prof_file)
1586
1587 if not os.path.exists(actual_prof_path):
1588 print(actual_prof_path + " does not exist")
1589 return(False)
1590
1591 if os.path.getsize(actual_prof_path) == 0:
1592 print(actual_prof_path + " is empty")
1593 return(False)
1594
1595 (_, expected_prof_file) = find_expected_file(name, 'prof.sample')
1596 expected_prof_path = in_testdir(expected_prof_file)
1597
1598 # sample prof file is not required
1599 if not os.path.exists(expected_prof_path):
1600 return True
1601 else:
1602 return compare_outputs(way, 'prof', normalise_prof,
1603 expected_prof_file, actual_prof_file,
1604 whitespace_normaliser=normalise_whitespace)
1605
1606 # Compare expected output to actual output, and optionally accept the
1607 # new output. Returns true if output matched or was accepted, false
1608 # otherwise. See Note [Output comparison] for the meaning of the
1609 # normaliser and whitespace_normaliser parameters.
1610 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1611 whitespace_normaliser=lambda x:x):
1612
1613 expected_path = in_testdir(expected_file)
1614 actual_path = in_testdir(actual_file)
1615
1616 if os.path.exists(expected_path):
1617 expected_str = normaliser(read_no_crs(expected_path))
1618 expected_normalised_file = add_suffix(expected_file, 'normalised')
1619 expected_normalised_path = in_testdir(expected_normalised_file)
1620 else:
1621 expected_str = ''
1622 expected_normalised_path = '/dev/null'
1623
1624 actual_raw = read_no_crs(actual_path)
1625 actual_str = normaliser(actual_raw)
1626
1627 # See Note [Output comparison].
1628 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1629 return 1
1630 else:
1631 if config.verbose >= 1 and _expect_pass(way):
1632 print('Actual ' + kind + ' output differs from expected:')
1633
1634 if expected_normalised_path != '/dev/null':
1635 write_file(expected_normalised_path, expected_str)
1636
1637 actual_normalised_path = add_suffix(actual_path, 'normalised')
1638 write_file(actual_normalised_path, actual_str)
1639
1640 if config.verbose >= 1 and _expect_pass(way):
1641 # See Note [Output comparison].
1642 r = os.system('diff -uw {} {}'.format(expected_normalised_path,
1643 actual_normalised_path))
1644
1645 # If for some reason there were no non-whitespace differences,
1646 # then do a full diff
1647 if r == 0:
1648 r = os.system('diff -u {} {}'.format(expected_normalised_path,
1649 actual_normalised_path))
1650
1651 if config.accept and (getTestOpts().expect == 'fail' or
1652 way in getTestOpts().expect_fail_for):
1653 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1654 return 0
1655 elif config.accept:
1656 if_verbose(1, 'Accepting new output.')
1657 write_file(expected_path, actual_raw)
1658 return 1
1659 else:
1660 return 0
1661
1662 # Note [Output comparison]
1663 #
1664 # We do two types of output comparison:
1665 #
1666 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1667 # optional `whitespace_normaliser` to the expected and the actual
1668 # output, before comparing the two.
1669 #
1670 # 2. To show as a diff to the user when the test indeed failed. We apply
1671 # the same `normaliser` function to the outputs, to make the diff as
1672 # small as possible (only showing the actual problem). But we don't
1673 # apply the `whitespace_normaliser` here, because it might completely
1674 # squash all whitespace, making the diff unreadable. Instead we rely
1675 # on the `diff` program to ignore whitespace changes as much as
1676 # possible (#10152).
1677
1678 def normalise_whitespace( str ):
1679 # Merge contiguous whitespace characters into a single space.
1680 str = re.sub('[ \t\n]+', ' ', str)
1681 return str.strip()
1682
1683 def normalise_errmsg( str ):
1684 # remove " error:" and lower-case " Warning:" to make patch for
1685 # trac issue #10021 smaller
1686 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1687 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1688
1689 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1690 # the colon is there because it appears in error messages; this
1691 # hacky solution is used in place of more sophisticated filename
1692 # mangling
1693 str = re.sub('([^\\s])\\.exe', '\\1', str)
1694 # normalise slashes, minimise Windows/Unix filename differences
1695 str = re.sub('\\\\', '/', str)
1696 # The inplace ghc's are called ghc-stage[123] to avoid filename
1697 # collisions, so we need to normalise that to just "ghc"
1698 str = re.sub('ghc-stage[123]', 'ghc', str)
1699 # Error messages simetimes contain integer implementation package
1700 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1701 return str
1702
1703 # normalise a .prof file, so that we can reasonably compare it against
1704 # a sample. This doesn't compare any of the actual profiling data,
1705 # only the shape of the profile and the number of entries.
1706 def normalise_prof (str):
1707 # strip everything up to the line beginning "COST CENTRE"
1708 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1709
1710 # strip results for CAFs, these tend to change unpredictably
1711 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1712
1713 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1714 # sometimes under MAIN.
1715 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1716
1717 # We have somthing like this:
1718
1719 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1720 # k Main 204 1 0.0 0.0 0.0 0.0
1721 # foo Main 205 1 0.0 0.0 0.0 0.0
1722 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1723
1724 # then we remove all the specific profiling data, leaving only the
1725 # cost centre name, module, and entries, to end up with this:
1726
1727 # MAIN MAIN 0
1728 # k Main 1
1729 # foo Main 1
1730 # foo.bar Main 1
1731
1732 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1733 return str
1734
1735 def normalise_slashes_( str ):
1736 str = re.sub('\\\\', '/', str)
1737 return str
1738
1739 def normalise_exe_( str ):
1740 str = re.sub('\.exe', '', str)
1741 return str
1742
1743 def normalise_output( str ):
1744 # remove " error:" and lower-case " Warning:" to make patch for
1745 # trac issue #10021 smaller
1746 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1747 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1748 # Remove a .exe extension (for Windows)
1749 # This can occur in error messages generated by the program.
1750 str = re.sub('([^\\s])\\.exe', '\\1', str)
1751 return str
1752
1753 def normalise_asm( str ):
1754 lines = str.split('\n')
1755 # Only keep instructions and labels not starting with a dot.
1756 metadata = re.compile('^[ \t]*\\..*$')
1757 out = []
1758 for line in lines:
1759 # Drop metadata directives (e.g. ".type")
1760 if not metadata.match(line):
1761 line = re.sub('@plt', '', line)
1762 instr = line.lstrip().split()
1763 # Drop empty lines.
1764 if not instr:
1765 continue
1766 # Drop operands, except for call instructions.
1767 elif instr[0] == 'call':
1768 out.append(instr[0] + ' ' + instr[1])
1769 else:
1770 out.append(instr[0])
1771 out = '\n'.join(out)
1772 return out
1773
1774 def if_verbose( n, s ):
1775 if config.verbose >= n:
1776 print(s)
1777
1778 def if_verbose_dump( n, f ):
1779 if config.verbose >= n:
1780 try:
1781 print(open(f).read())
1782 except:
1783 print('')
1784
1785 def rawSystem(cmd_and_args):
1786 # We prefer subprocess.call to os.spawnv as the latter
1787 # seems to send its arguments through a shell or something
1788 # with the Windows (non-cygwin) python. An argument "a b c"
1789 # turns into three arguments ["a", "b", "c"].
1790
1791 cmd = cmd_and_args[0]
1792 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1793
1794 # Note that this doesn't handle the timeout itself; it is just used for
1795 # commands that have timeout handling built-in.
1796 def rawSystemWithTimeout(cmd_and_args):
1797 r = rawSystem(cmd_and_args)
1798 if r == 98:
1799 # The python timeout program uses 98 to signal that ^C was pressed
1800 stopNow()
1801 if r == 99 and getTestOpts().exit_code != 99:
1802 # Only print a message when timeout killed the process unexpectedly.
1803 cmd = cmd_and_args[-1]
1804 if_verbose(1, 'Timeout happened...killed process "{}"...\n'.format(cmd))
1805 return r
1806
1807 # cmd is a complex command in Bourne-shell syntax
1808 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1809 # Hence it must ultimately be run by a Bourne shell
1810 #
1811 # Mostly it invokes the command wrapped in 'timeout' thus
1812 # timeout 300 'cd . && ...blah blah'
1813 # so it's timeout's job to invoke the Bourne shell
1814 #
1815 # But watch out for the case when there is no timeout program!
1816 # Then, when using the native Python, os.system will invoke the cmd shell
1817
1818 def runCmd( cmd ):
1819 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1820 cmd = cmd.format(**config.__dict__)
1821
1822 if_verbose( 3, cmd )
1823 r = 0
1824 if config.os == 'mingw32':
1825 # On MinGW, we will always have timeout
1826 assert config.timeout_prog!=''
1827
1828 if config.timeout_prog != '':
1829 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1830 else:
1831 r = os.system(cmd)
1832 return r << 8
1833
1834 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1835 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1836 cmd = cmd.format(**config.__dict__)
1837
1838 if_verbose( 3, cmd )
1839 r = 0
1840 if config.os == 'mingw32':
1841 # On MinGW, we will always have timeout
1842 assert config.timeout_prog!=''
1843 timeout = int(ceil(config.timeout * timeout_multiplier))
1844
1845 if config.timeout_prog != '':
1846 if config.check_files_written:
1847 fn = name + ".strace"
1848 r = rawSystemWithTimeout(
1849 ["strace", "-o", fn, "-fF",
1850 "-e", "creat,open,chdir,clone,vfork",
1851 strip_quotes(config.timeout_prog), str(timeout), cmd])
1852 addTestFilesWritten(name, fn)
1853 rm_no_fail(fn)
1854 else:
1855 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1856 else:
1857 r = os.system(cmd)
1858 return r << 8
1859
1860 def runCmdExitCode( cmd ):
1861 return (runCmd(cmd) >> 8);
1862
1863
1864 # -----------------------------------------------------------------------------
1865 # checking for files being written to by multiple tests
1866
1867 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1868 re_strace_unavailable_end ='\) += \? <unavailable>$'
1869
1870 re_strace_unavailable_line = re.compile('^' + re_strace_unavailable_end)
1871 re_strace_unavailable_cntnt = re.compile('^<\.\.\. .* resumed> ' + re_strace_unavailable_end)
1872 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1873 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1874 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1875 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1876 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1877 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1878 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1879 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1880 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1881 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1882 re_strace_ignore_sigchild2 = re.compile('^--- SIGCHLD {si_signo=SIGCHLD, si_code=CLD_EXITED, .*} ---$')
1883 re_strace_ignore_exited = re.compile('^\+\+\+ exited with [0-9]* \+\+\+$')
1884 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1885 re_strace_ignore_sigvtalarm2= re.compile('^--- SIGVTALRM {si_signo=SIGVTALRM, si_code=SI_TIMER, .*} ---$')
1886 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1887 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1888 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1889 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1890
1891 # Files that are read or written but shouldn't be:
1892 # * ghci_history shouldn't be read or written by tests
1893 # * things under package.conf.d shouldn't be written by tests
1894 bad_file_usages = {}
1895
1896 # Mapping from tests to the list of files that they write
1897 files_written = {}
1898
1899 # Mapping from tests to the list of files that they write but don't clean
1900 files_written_not_removed = {}
1901
1902 def add_bad_file_usage(name, file):
1903 try:
1904 if not file in bad_file_usages[name]:
1905 bad_file_usages[name].append(file)
1906 except:
1907 bad_file_usages[name] = [file]
1908
1909 def mkPath(curdir, path):
1910 # Given the current full directory is 'curdir', what is the full
1911 # path to 'path'?
1912 return os.path.realpath(os.path.join(curdir, path))
1913
1914 def addTestFilesWritten(name, fn):
1915 if config.use_threads:
1916 with t.lockFilesWritten:
1917 addTestFilesWrittenHelper(name, fn)
1918 else:
1919 addTestFilesWrittenHelper(name, fn)
1920
1921 def addTestFilesWrittenHelper(name, fn):
1922 started = False
1923 working_directories = {}
1924
1925 with open(fn, 'r') as f:
1926 for line in f:
1927 m_pid = re_strace_pid.match(line)
1928 if m_pid:
1929 pid = m_pid.group(1)
1930 content = m_pid.group(2)
1931 elif re_strace_unavailable_line.match(line):
1932 next
1933 else:
1934 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1935
1936 m_open = re_strace_open.match(content)
1937 m_chdir = re_strace_chdir.match(content)
1938 m_clone = re_strace_clone.match(content)
1939 m_vfork = re_strace_vfork.match(content)
1940
1941 if not started:
1942 working_directories[pid] = os.getcwd()
1943 started = True
1944
1945 if m_open:
1946 file = m_open.group(1)
1947 file = mkPath(working_directories[pid], file)
1948 if file.endswith("ghci_history"):
1949 add_bad_file_usage(name, file)
1950 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1951 flags = m_open.group(2).split('|')
1952 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1953 if re.match('package\.conf\.d', file):
1954 add_bad_file_usage(name, file)
1955 else:
1956 try:
1957 if not file in files_written[name]:
1958 files_written[name].append(file)
1959 except:
1960 files_written[name] = [file]
1961 elif 'O_RDONLY' in flags:
1962 pass
1963 else:
1964 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1965 elif m_chdir:
1966 # We optimistically assume that unfinished chdir's are going to succeed
1967 dir = m_chdir.group(1)
1968 working_directories[pid] = mkPath(working_directories[pid], dir)
1969 elif m_clone:
1970 working_directories[m_clone.group(2)] = working_directories[pid]
1971 elif m_vfork:
1972 working_directories[m_vfork.group(2)] = working_directories[pid]
1973 elif re_strace_open_resumed.match(content):
1974 pass
1975 elif re_strace_chdir_resumed.match(content):
1976 pass
1977 elif re_strace_vfork_unfinished.match(content):
1978 pass
1979 elif re_strace_clone_unfinished.match(content):
1980 pass
1981 elif re_strace_ignore_sigchild.match(content):
1982 pass
1983 elif re_strace_ignore_sigchild2.match(content):
1984 pass
1985 elif re_strace_ignore_exited.match(content):
1986 pass
1987 elif re_strace_ignore_sigvtalarm.match(content):
1988 pass
1989 elif re_strace_ignore_sigvtalarm2.match(content):
1990 pass
1991 elif re_strace_ignore_sigint.match(content):
1992 pass
1993 elif re_strace_ignore_sigfpe.match(content):
1994 pass
1995 elif re_strace_ignore_sigsegv.match(content):
1996 pass
1997 elif re_strace_ignore_sigpipe.match(content):
1998 pass
1999 elif re_strace_unavailable_cntnt.match(content):
2000 pass
2001 else:
2002 framework_fail(name, 'strace', "Can't understand strace line: " + line)
2003
2004 def checkForFilesWrittenProblems(file):
2005 foundProblem = False
2006
2007 files_written_inverted = {}
2008 for t in files_written.keys():
2009 for f in files_written[t]:
2010 try:
2011 files_written_inverted[f].append(t)
2012 except:
2013 files_written_inverted[f] = [t]
2014
2015 for f in files_written_inverted.keys():
2016 if len(files_written_inverted[f]) > 1:
2017 if not foundProblem:
2018 foundProblem = True
2019 file.write("\n")
2020 file.write("\nSome files are written by multiple tests:\n")
2021 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
2022 if foundProblem:
2023 file.write("\n")
2024
2025 # -----
2026
2027 if len(files_written_not_removed) > 0:
2028 file.write("\n")
2029 file.write("\nSome files written but not removed:\n")
2030 tests = list(files_written_not_removed.keys())
2031 tests.sort()
2032 for t in tests:
2033 for f in files_written_not_removed[t]:
2034 file.write(" " + t + ": " + f + "\n")
2035 file.write("\n")
2036
2037 # -----
2038
2039 if len(bad_file_usages) > 0:
2040 file.write("\n")
2041 file.write("\nSome bad file usages:\n")
2042 tests = list(bad_file_usages.keys())
2043 tests.sort()
2044 for t in tests:
2045 for f in bad_file_usages[t]:
2046 file.write(" " + t + ": " + f + "\n")
2047 file.write("\n")
2048
2049 # -----------------------------------------------------------------------------
2050 # checking if ghostscript is available for checking the output of hp2ps
2051
2052 def genGSCmd(psfile):
2053 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2054
2055 def gsNotWorking():
2056 global gs_working
2057 print("GhostScript not available for hp2ps tests")
2058
2059 global gs_working
2060 gs_working = 0
2061 if config.have_profiling:
2062 if config.gs != '':
2063 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2064 if resultGood == 0:
2065 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
2066 ' >/dev/null 2>&1')
2067 if resultBad != 0:
2068 print("GhostScript available for hp2ps tests")
2069 gs_working = 1;
2070 else:
2071 gsNotWorking();
2072 else:
2073 gsNotWorking();
2074 else:
2075 gsNotWorking();
2076
2077 def rm_no_fail( file ):
2078 try:
2079 os.remove( file )
2080 finally:
2081 return
2082
2083 def add_suffix( name, suffix ):
2084 if suffix == '':
2085 return name
2086 else:
2087 return name + '.' + suffix
2088
2089 def add_hs_lhs_suffix(name):
2090 if getTestOpts().c_src:
2091 return add_suffix(name, 'c')
2092 elif getTestOpts().cmm_src:
2093 return add_suffix(name, 'cmm')
2094 elif getTestOpts().objc_src:
2095 return add_suffix(name, 'm')
2096 elif getTestOpts().objcpp_src:
2097 return add_suffix(name, 'mm')
2098 elif getTestOpts().literate:
2099 return add_suffix(name, 'lhs')
2100 else:
2101 return add_suffix(name, 'hs')
2102
2103 def replace_suffix( name, suffix ):
2104 base, suf = os.path.splitext(name)
2105 return base + '.' + suffix
2106
2107 def in_testdir(name, suffix=''):
2108 return getTestOpts().testdir + '/' + add_suffix(name, suffix)
2109
2110 def qualify( name, suff ):
2111 return in_testdir(add_suffix(name, suff))
2112
2113
2114 # Finding the sample output. The filename is of the form
2115 #
2116 # <test>.stdout[-ws-<wordsize>][-<platform>]
2117 #
2118 # and we pick the most specific version available. The <version> is
2119 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2120 # more fine-grained control use compiler_lt().
2121 #
2122 def find_expected_file(name, suff):
2123 basename = add_suffix(name, suff)
2124 basepath = in_testdir(basename)
2125
2126 files = [(platformSpecific, basename + ws + plat)
2127 for (platformSpecific, plat) in [(1, '-' + config.platform),
2128 (1, '-' + config.os),
2129 (0, '')]
2130 for ws in ['-ws-' + config.wordsize, '']]
2131
2132 dir = glob.glob(basepath + '*')
2133 dir = [normalise_slashes_(d) for d in dir]
2134
2135 for (platformSpecific, f) in files:
2136 if in_testdir(f) in dir:
2137 return (platformSpecific,f)
2138
2139 return (0, basename)
2140
2141 # Clean up prior to the test, so that we can't spuriously conclude
2142 # that it passed on the basis of old run outputs.
2143 def pretest_cleanup(name):
2144 if getTestOpts().outputdir != None:
2145 odir = in_testdir(getTestOpts().outputdir)
2146 try:
2147 shutil.rmtree(odir)
2148 except:
2149 pass
2150 os.mkdir(odir)
2151
2152 rm_no_fail(qualify(name,'interp.stderr'))
2153 rm_no_fail(qualify(name,'interp.stdout'))
2154 rm_no_fail(qualify(name,'comp.stderr'))
2155 rm_no_fail(qualify(name,'comp.stdout'))
2156 rm_no_fail(qualify(name,'run.stderr'))
2157 rm_no_fail(qualify(name,'run.stdout'))
2158 rm_no_fail(qualify(name,'tix'))
2159 rm_no_fail(qualify(name,'exe.tix'))
2160 # simple_build zaps the following:
2161 # rm_nofail(qualify("o"))
2162 # rm_nofail(qualify(""))
2163 # not interested in the return code
2164
2165 # -----------------------------------------------------------------------------
2166 # Return a list of all the files ending in '.T' below directories roots.
2167
2168 def findTFiles(roots):
2169 # It would be better to use os.walk, but that
2170 # gives backslashes on Windows, which trip the
2171 # testsuite later :-(
2172 return [filename for root in roots for filename in findTFiles_(root)]
2173
2174 def findTFiles_(path):
2175 if os.path.isdir(path):
2176 paths = [path + '/' + x for x in os.listdir(path)]
2177 return findTFiles(paths)
2178 elif path[-2:] == '.T':
2179 return [path]
2180 else:
2181 return []
2182
2183 # -----------------------------------------------------------------------------
2184 # Output a test summary to the specified file object
2185
2186 def summary(t, file, short=False):
2187
2188 file.write('\n')
2189 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2190
2191 if short:
2192 # Only print the list of unexpected tests above.
2193 return
2194
2195 file.write('OVERALL SUMMARY for test run started at '
2196 + time.strftime("%c %Z", t.start_time) + '\n'
2197 + str(datetime.timedelta(seconds=
2198 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2199 + ' spent to go through\n'
2200 + repr(t.total_tests).rjust(8)
2201 + ' total tests, which gave rise to\n'
2202 + repr(t.total_test_cases).rjust(8)
2203 + ' test cases, of which\n'
2204 + repr(t.n_tests_skipped).rjust(8)
2205 + ' were skipped\n'
2206 + '\n'
2207 + repr(t.n_missing_libs).rjust(8)
2208 + ' had missing libraries\n'
2209 + repr(t.n_expected_passes).rjust(8)
2210 + ' expected passes\n'
2211 + repr(t.n_expected_failures).rjust(8)
2212 + ' expected failures\n'
2213 + '\n'
2214 + repr(t.n_framework_failures).rjust(8)
2215 + ' caused framework failures\n'
2216 + repr(t.n_unexpected_passes).rjust(8)
2217 + ' unexpected passes\n'
2218 + repr(t.n_unexpected_failures).rjust(8)
2219 + ' unexpected failures\n'
2220 + repr(t.n_unexpected_stat_failures).rjust(8)
2221 + ' unexpected stat failures\n'
2222 + '\n')
2223
2224 if t.n_unexpected_passes > 0:
2225 file.write('Unexpected passes:\n')
2226 printPassingTestInfosSummary(file, t.unexpected_passes)
2227
2228 if t.n_unexpected_failures > 0:
2229 file.write('Unexpected failures:\n')
2230 printFailingTestInfosSummary(file, t.unexpected_failures)
2231
2232 if t.n_unexpected_stat_failures > 0:
2233 file.write('Unexpected stat failures:\n')
2234 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2235
2236 if config.check_files_written:
2237 checkForFilesWrittenProblems(file)
2238
2239 if stopping():
2240 file.write('WARNING: Testsuite run was terminated early\n')
2241
2242 def printUnexpectedTests(file, testInfoss):
2243 unexpected = []
2244 for testInfos in testInfoss:
2245 directories = testInfos.keys()
2246 for directory in directories:
2247 tests = list(testInfos[directory].keys())
2248 unexpected += tests
2249 if unexpected != []:
2250 file.write('Unexpected results from:\n')
2251 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2252 file.write('\n')
2253
2254 def printPassingTestInfosSummary(file, testInfos):
2255 directories = list(testInfos.keys())
2256 directories.sort()
2257 maxDirLen = max(len(x) for x in directories)
2258 for directory in directories:
2259 tests = list(testInfos[directory].keys())
2260 tests.sort()
2261 for test in tests:
2262 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2263 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2264 file.write('\n')
2265
2266 def printFailingTestInfosSummary(file, testInfos):
2267 directories = list(testInfos.keys())
2268 directories.sort()
2269 maxDirLen = max(len(d) for d in directories)
2270 for directory in directories:
2271 tests = list(testInfos[directory].keys())
2272 tests.sort()
2273 for test in tests:
2274 reasons = testInfos[directory][test].keys()
2275 for reason in reasons:
2276 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2277 ' [' + reason + ']' + \
2278 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2279 file.write('\n')
2280
2281 def modify_lines(s, f):
2282 return '\n'.join([f(l) for l in s.splitlines()])