Make validate more quiet
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20 import subprocess
21
22 from testglobals import *
23 from testutil import *
24
25 if config.use_threads:
26 import threading
27 try:
28 import thread
29 except ImportError: # Python 3
30 import _thread as thread
31
32 global wantToStop
33 wantToStop = False
34 def stopNow():
35 global wantToStop
36 wantToStop = True
37 def stopping():
38 return wantToStop
39
40 # Options valid for the current test only (these get reset to
41 # testdir_testopts after each test).
42
43 global testopts_local
44 if config.use_threads:
45 testopts_local = threading.local()
46 else:
47 class TestOpts_Local:
48 pass
49 testopts_local = TestOpts_Local()
50
51 def getTestOpts():
52 return testopts_local.x
53
54 def setLocalTestOpts(opts):
55 global testopts_local
56 testopts_local.x=opts
57
58 def isStatsTest():
59 opts = getTestOpts()
60 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
61
62
63 # This can be called at the top of a file of tests, to set default test options
64 # for the following tests.
65 def setTestOpts( f ):
66 global thisdir_settings
67 thisdir_settings = [thisdir_settings, f]
68
69 # -----------------------------------------------------------------------------
70 # Canned setup functions for common cases. eg. for a test you might say
71 #
72 # test('test001', normal, compile, [''])
73 #
74 # to run it without any options, but change it to
75 #
76 # test('test001', expect_fail, compile, [''])
77 #
78 # to expect failure for this test.
79
80 def normal( name, opts ):
81 return;
82
83 def skip( name, opts ):
84 opts.skip = 1
85
86 def expect_fail( name, opts ):
87 opts.expect = 'fail';
88
89 def reqlib( lib ):
90 return lambda name, opts, l=lib: _reqlib (name, opts, l )
91
92 # Cache the results of looking to see if we have a library or not.
93 # This makes quite a difference, especially on Windows.
94 have_lib = {}
95
96 def _reqlib( name, opts, lib ):
97 if lib in have_lib:
98 got_it = have_lib[lib]
99 else:
100 cmd = strip_quotes(config.ghc_pkg)
101 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
102 stdout=subprocess.PIPE,
103 stderr=subprocess.PIPE)
104 # read from stdout and stderr to avoid blocking due to
105 # buffers filling
106 p.communicate()
107 r = p.wait()
108 got_it = r == 0
109 have_lib[lib] = got_it
110
111 if not got_it:
112 opts.expect = 'missing-lib'
113
114 def req_haddock( name, opts ):
115 if not config.haddock:
116 opts.expect = 'missing-lib'
117
118 def req_profiling( name, opts ):
119 if not config.have_profiling:
120 opts.expect = 'fail'
121
122 def req_shared_libs( name, opts ):
123 if not config.have_shared_libs:
124 opts.expect = 'fail'
125
126 def req_interp( name, opts ):
127 if not config.have_interp:
128 opts.expect = 'fail'
129
130 def req_smp( name, opts ):
131 if not config.have_smp:
132 opts.expect = 'fail'
133
134 def ignore_output( name, opts ):
135 opts.ignore_output = 1
136
137 def no_stdin( name, opts ):
138 opts.no_stdin = 1
139
140 def combined_output( name, opts ):
141 opts.combined_output = True
142
143 # -----
144
145 def expect_fail_for( ways ):
146 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
147
148 def _expect_fail_for( name, opts, ways ):
149 opts.expect_fail_for = ways
150
151 def expect_broken( bug ):
152 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
153
154 def _expect_broken( name, opts, bug ):
155 record_broken(name, opts, bug)
156 opts.expect = 'fail';
157
158 def expect_broken_for( bug, ways ):
159 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
160
161 def _expect_broken_for( name, opts, bug, ways ):
162 record_broken(name, opts, bug)
163 opts.expect_fail_for = ways
164
165 def record_broken(name, opts, bug):
166 global brokens
167 me = (bug, opts.testdir, name)
168 if not me in brokens:
169 brokens.append(me)
170
171 # -----
172
173 def omit_ways( ways ):
174 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
175
176 def _omit_ways( name, opts, ways ):
177 opts.omit_ways = ways
178
179 # -----
180
181 def only_ways( ways ):
182 return lambda name, opts, w=ways: _only_ways( name, opts, w )
183
184 def _only_ways( name, opts, ways ):
185 opts.only_ways = ways
186
187 # -----
188
189 def extra_ways( ways ):
190 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
191
192 def _extra_ways( name, opts, ways ):
193 opts.extra_ways = ways
194
195 # -----
196
197 def omit_compiler_types( compiler_types ):
198 return lambda name, opts, c=compiler_types: _omit_compiler_types(name, opts, c)
199
200 def _omit_compiler_types( name, opts, compiler_types ):
201 if config.compiler_type in compiler_types:
202 opts.skip = 1
203
204 # -----
205
206 def only_compiler_types( compiler_types ):
207 return lambda name, opts, c=compiler_types: _only_compiler_types(name, opts, c)
208
209 def _only_compiler_types( name, opts, compiler_types ):
210 if config.compiler_type not in compiler_types:
211 opts.skip = 1
212
213 # -----
214
215 def set_stdin( file ):
216 return lambda name, opts, f=file: _set_stdin(name, opts, f);
217
218 def _set_stdin( name, opts, f ):
219 opts.stdin = f
220
221 # -----
222
223 def exit_code( val ):
224 return lambda name, opts, v=val: _exit_code(name, opts, v);
225
226 def _exit_code( name, opts, v ):
227 opts.exit_code = v
228
229 def signal_exit_code( val ):
230 if opsys('solaris2'):
231 return exit_code( val );
232 else:
233 # When application running on Linux receives fatal error
234 # signal, then its exit code is encoded as 128 + signal
235 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
236 # I assume that Mac OS X behaves in the same way at least Mac
237 # OS X builder behavior suggests this.
238 return exit_code( val+128 );
239
240 # -----
241
242 def timeout_multiplier( val ):
243 return lambda name, opts, v=val: _timeout_multiplier(name, opts, v)
244
245 def _timeout_multiplier( name, opts, v ):
246 opts.timeout_multiplier = v
247
248 # -----
249
250 def extra_run_opts( val ):
251 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
252
253 def _extra_run_opts( name, opts, v ):
254 opts.extra_run_opts = v
255
256 # -----
257
258 def extra_hc_opts( val ):
259 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
260
261 def _extra_hc_opts( name, opts, v ):
262 opts.extra_hc_opts = v
263
264 # -----
265
266 def extra_clean( files ):
267 return lambda name, opts, v=files: _extra_clean(name, opts, v);
268
269 def _extra_clean( name, opts, v ):
270 opts.clean_files = v
271
272 # -----
273
274 def stats_num_field( field, expecteds ):
275 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
276
277 def _stats_num_field( name, opts, field, expecteds ):
278 if field in opts.stats_range_fields:
279 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
280
281 if type(expecteds) is list:
282 for (b, expected, dev) in expecteds:
283 if b:
284 opts.stats_range_fields[field] = (expected, dev)
285 return
286 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
287
288 else:
289 (expected, dev) = expecteds
290 opts.stats_range_fields[field] = (expected, dev)
291
292 def compiler_stats_num_field( field, expecteds ):
293 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
294
295 def _compiler_stats_num_field( name, opts, field, expecteds ):
296 if field in opts.compiler_stats_range_fields:
297 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
298
299 # Compiler performance numbers change when debugging is on, making the results
300 # useless and confusing. Therefore, skip if debugging is on.
301 if compiler_debugged():
302 skip(name, opts)
303
304 for (b, expected, dev) in expecteds:
305 if b:
306 opts.compiler_stats_range_fields[field] = (expected, dev)
307 return
308
309 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
310
311 # -----
312
313 def when(b, f):
314 # When list_brokens is on, we want to see all expect_broken calls,
315 # so we always do f
316 if b or config.list_broken:
317 return f
318 else:
319 return normal
320
321 def unless(b, f):
322 return when(not b, f)
323
324 def doing_ghci():
325 return 'ghci' in config.run_ways
326
327 def ghci_dynamic( ):
328 return config.ghc_dynamic
329
330 def fast():
331 return config.fast
332
333 def platform( plat ):
334 return config.platform == plat
335
336 def opsys( os ):
337 return config.os == os
338
339 def arch( arch ):
340 return config.arch == arch
341
342 def wordsize( ws ):
343 return config.wordsize == str(ws)
344
345 def msys( ):
346 return config.msys
347
348 def cygwin( ):
349 return config.cygwin
350
351 def have_vanilla( ):
352 return config.have_vanilla
353
354 def have_dynamic( ):
355 return config.have_dynamic
356
357 def have_profiling( ):
358 return config.have_profiling
359
360 def in_tree_compiler( ):
361 return config.in_tree_compiler
362
363 def compiler_type( compiler ):
364 return config.compiler_type == compiler
365
366 def compiler_lt( compiler, version ):
367 return config.compiler_type == compiler and \
368 version_lt(config.compiler_version, version)
369
370 def compiler_le( compiler, version ):
371 return config.compiler_type == compiler and \
372 version_le(config.compiler_version, version)
373
374 def compiler_gt( compiler, version ):
375 return config.compiler_type == compiler and \
376 version_gt(config.compiler_version, version)
377
378 def compiler_ge( compiler, version ):
379 return config.compiler_type == compiler and \
380 version_ge(config.compiler_version, version)
381
382 def unregisterised( ):
383 return config.unregisterised
384
385 def compiler_profiled( ):
386 return config.compiler_profiled
387
388 def compiler_debugged( ):
389 return config.compiler_debugged
390
391 def tag( t ):
392 return t in config.compiler_tags
393
394 # ---
395
396 def namebase( nb ):
397 return lambda opts, nb=nb: _namebase(opts, nb)
398
399 def _namebase( opts, nb ):
400 opts.with_namebase = nb
401
402 # ---
403
404 def high_memory_usage(name, opts):
405 opts.alone = True
406
407 # If a test is for a multi-CPU race, then running the test alone
408 # increases the chance that we'll actually see it.
409 def multi_cpu_race(name, opts):
410 opts.alone = True
411
412 # ---
413 def literate( name, opts ):
414 opts.literate = 1;
415
416 def c_src( name, opts ):
417 opts.c_src = 1;
418
419 def objc_src( name, opts ):
420 opts.objc_src = 1;
421
422 def objcpp_src( name, opts ):
423 opts.objcpp_src = 1;
424
425 def cmm_src( name, opts ):
426 opts.cmm_src = 1;
427
428 def outputdir( odir ):
429 return lambda name, opts, d=odir: _outputdir(name, opts, d)
430
431 def _outputdir( name, opts, odir ):
432 opts.outputdir = odir;
433
434 # ----
435
436 def pre_cmd( cmd ):
437 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
438
439 def _pre_cmd( name, opts, cmd ):
440 opts.pre_cmd = cmd
441
442 # ----
443
444 def clean_cmd( cmd ):
445 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
446
447 def _clean_cmd( name, opts, cmd ):
448 opts.clean_cmd = cmd
449
450 # ----
451
452 def cmd_prefix( prefix ):
453 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
454
455 def _cmd_prefix( name, opts, prefix ):
456 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
457
458 # ----
459
460 def cmd_wrapper( fun ):
461 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
462
463 def _cmd_wrapper( name, opts, fun ):
464 opts.cmd_wrapper = fun
465
466 # ----
467
468 def compile_cmd_prefix( prefix ):
469 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
470
471 def _compile_cmd_prefix( name, opts, prefix ):
472 opts.compile_cmd_prefix = prefix
473
474 # ----
475
476 def check_stdout( f ):
477 return lambda name, opts, f=f: _check_stdout(name, opts, f)
478
479 def _check_stdout( name, opts, f ):
480 opts.check_stdout = f
481
482 # ----
483
484 def normalise_slashes( name, opts ):
485 _normalise_fun(name, opts, normalise_slashes_)
486
487 def normalise_exe( name, opts ):
488 _normalise_fun(name, opts, normalise_exe_)
489
490 def normalise_fun( *fs ):
491 return lambda name, opts: _normalise_fun(name, opts, fs)
492
493 def _normalise_fun( name, opts, *fs ):
494 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
495
496 def normalise_errmsg_fun( *fs ):
497 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
498
499 def _normalise_errmsg_fun( name, opts, *fs ):
500 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
501
502 def normalise_version_( *pkgs ):
503 def normalise_version__( str ):
504 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
505 '\\1-<VERSION>', str)
506 return normalise_version__
507
508 def normalise_version( *pkgs ):
509 def normalise_version__( name, opts ):
510 _normalise_fun(name, opts, normalise_version_(*pkgs))
511 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
512 return normalise_version__
513
514 def join_normalisers(*a):
515 """
516 Compose functions, flattening sequences.
517
518 join_normalisers(f1,[f2,f3],f4)
519
520 is the same as
521
522 lambda x: f1(f2(f3(f4(x))))
523 """
524
525 def flatten(l):
526 """
527 Taken from http://stackoverflow.com/a/2158532/946226
528 """
529 for el in l:
530 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
531 for sub in flatten(el):
532 yield sub
533 else:
534 yield el
535
536 a = flatten(a)
537
538 fn = lambda x:x # identity function
539 for f in a:
540 assert callable(f)
541 fn = lambda x,f=f,fn=fn: fn(f(x))
542 return fn
543
544 # ----
545 # Function for composing two opt-fns together
546
547 def executeSetups(fs, name, opts):
548 if type(fs) is list:
549 # If we have a list of setups, then execute each one
550 for f in fs:
551 executeSetups(f, name, opts)
552 else:
553 # fs is a single function, so just apply it
554 fs(name, opts)
555
556 # -----------------------------------------------------------------------------
557 # The current directory of tests
558
559 def newTestDir( dir ):
560 global thisdir_settings
561 # reset the options for this test directory
562 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
563
564 def _newTestDir( name, opts, dir ):
565 opts.testdir = dir
566 opts.compiler_always_flags = config.compiler_always_flags
567
568 # -----------------------------------------------------------------------------
569 # Actually doing tests
570
571 parallelTests = []
572 aloneTests = []
573 allTestNames = set([])
574
575 def runTest (opts, name, func, args):
576 ok = 0
577
578 if config.use_threads:
579 t.thread_pool.acquire()
580 try:
581 while config.threads<(t.running_threads+1):
582 t.thread_pool.wait()
583 t.running_threads = t.running_threads+1
584 ok=1
585 t.thread_pool.release()
586 thread.start_new_thread(test_common_thread, (name, opts, func, args))
587 except:
588 if not ok:
589 t.thread_pool.release()
590 else:
591 test_common_work (name, opts, func, args)
592
593 # name :: String
594 # setup :: TestOpts -> IO ()
595 def test (name, setup, func, args):
596 global aloneTests
597 global parallelTests
598 global allTestNames
599 global thisdir_settings
600 if name in allTestNames:
601 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
602 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
603 framework_fail(name, 'bad_name', 'This test has an invalid name')
604
605 # Make a deep copy of the default_testopts, as we need our own copy
606 # of any dictionaries etc inside it. Otherwise, if one test modifies
607 # them, all tests will see the modified version!
608 myTestOpts = copy.deepcopy(default_testopts)
609
610 executeSetups([thisdir_settings, setup], name, myTestOpts)
611
612 thisTest = lambda : runTest(myTestOpts, name, func, args)
613 if myTestOpts.alone:
614 aloneTests.append(thisTest)
615 else:
616 parallelTests.append(thisTest)
617 allTestNames.add(name)
618
619 if config.use_threads:
620 def test_common_thread(name, opts, func, args):
621 t.lock.acquire()
622 try:
623 test_common_work(name,opts,func,args)
624 finally:
625 t.lock.release()
626 t.thread_pool.acquire()
627 t.running_threads = t.running_threads - 1
628 t.thread_pool.notify()
629 t.thread_pool.release()
630
631 def get_package_cache_timestamp():
632 if config.package_conf_cache_file == '':
633 return 0.0
634 else:
635 try:
636 return os.stat(config.package_conf_cache_file).st_mtime
637 except:
638 return 0.0
639
640
641 def test_common_work (name, opts, func, args):
642 try:
643 t.total_tests = t.total_tests+1
644 setLocalTestOpts(opts)
645
646 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
647
648 # All the ways we might run this test
649 if func == compile or func == multimod_compile:
650 all_ways = config.compile_ways
651 elif func == compile_and_run or func == multimod_compile_and_run:
652 all_ways = config.run_ways
653 elif func == ghci_script:
654 if 'ghci' in config.run_ways:
655 all_ways = ['ghci']
656 else:
657 all_ways = []
658 else:
659 all_ways = ['normal']
660
661 # A test itself can request extra ways by setting opts.extra_ways
662 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
663
664 t.total_test_cases = t.total_test_cases + len(all_ways)
665
666 ok_way = lambda way: \
667 not getTestOpts().skip \
668 and (config.only == [] or name in config.only) \
669 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
670 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
671 and (not (config.skip_perf_tests and isStatsTest())) \
672 and way not in getTestOpts().omit_ways
673
674 # Which ways we are asked to skip
675 do_ways = list(filter (ok_way,all_ways))
676
677 # In fast mode, we skip all but one way
678 if config.fast and len(do_ways) > 0:
679 do_ways = [do_ways[0]]
680
681 if not config.clean_only:
682 # Run the required tests...
683 for way in do_ways:
684 if stopping():
685 break
686 do_test (name, way, func, args)
687
688 for way in all_ways:
689 if way not in do_ways:
690 skiptest (name,way)
691
692 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
693 pretest_cleanup(name)
694 clean([name + suff for suff in [
695 '', '.exe', '.exe.manifest', '.genscript',
696 '.stderr.normalised', '.stdout.normalised',
697 '.run.stderr.normalised', '.run.stdout.normalised',
698 '.comp.stderr.normalised', '.comp.stdout.normalised',
699 '.interp.stderr.normalised', '.interp.stdout.normalised',
700 '.stats', '.comp.stats',
701 '.hi', '.o', '.prof', '.exe.prof', '.hc',
702 '_stub.h', '_stub.c', '_stub.o',
703 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
704
705 if func == multi_compile or func == multi_compile_fail:
706 extra_mods = args[1]
707 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
708 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
709
710
711 clean(getTestOpts().clean_files)
712
713 if getTestOpts().outputdir != None:
714 odir = in_testdir(getTestOpts().outputdir)
715 try:
716 shutil.rmtree(odir)
717 except:
718 pass
719
720 try:
721 shutil.rmtree(in_testdir('.hpc.' + name))
722 except:
723 pass
724
725 try:
726 cleanCmd = getTestOpts().clean_cmd
727 if cleanCmd != None:
728 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
729 if result != 0:
730 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
731 except:
732 framework_fail(name, 'cleaning', 'clean-command exception')
733
734 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
735
736 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
737 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
738
739 try:
740 for f in files_written[name]:
741 if os.path.exists(f):
742 try:
743 if not f in files_written_not_removed[name]:
744 files_written_not_removed[name].append(f)
745 except:
746 files_written_not_removed[name] = [f]
747 except:
748 pass
749 except Exception as e:
750 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
751
752 def clean(strs):
753 for str in strs:
754 for name in glob.glob(in_testdir(str)):
755 clean_full_path(name)
756
757 def clean_full_path(name):
758 try:
759 # Remove files...
760 os.remove(name)
761 except OSError as e1:
762 try:
763 # ... and empty directories
764 os.rmdir(name)
765 except OSError as e2:
766 # We don't want to fail here, but we do want to know
767 # what went wrong, so print out the exceptions.
768 # ENOENT isn't a problem, though, as we clean files
769 # that don't necessarily exist.
770 if e1.errno != errno.ENOENT:
771 print(e1)
772 if e2.errno != errno.ENOENT:
773 print(e2)
774
775 def do_test(name, way, func, args):
776 full_name = name + '(' + way + ')'
777
778 try:
779 if_verbose(2, "=====> %s %d of %d %s " % \
780 (full_name, t.total_tests, len(allTestNames), \
781 [t.n_unexpected_passes, \
782 t.n_unexpected_failures, \
783 t.n_framework_failures]))
784
785 if config.use_threads:
786 t.lock.release()
787
788 try:
789 preCmd = getTestOpts().pre_cmd
790 if preCmd != None:
791 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
792 if result != 0:
793 framework_fail(name, way, 'pre-command failed: ' + str(result))
794 except:
795 framework_fail(name, way, 'pre-command exception')
796
797 try:
798 result = func(*[name,way] + args)
799 finally:
800 if config.use_threads:
801 t.lock.acquire()
802
803 if getTestOpts().expect != 'pass' and \
804 getTestOpts().expect != 'fail' and \
805 getTestOpts().expect != 'missing-lib':
806 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
807
808 try:
809 passFail = result['passFail']
810 except:
811 passFail = 'No passFail found'
812
813 if passFail == 'pass':
814 if getTestOpts().expect == 'pass' \
815 and way not in getTestOpts().expect_fail_for:
816 t.n_expected_passes = t.n_expected_passes + 1
817 if name in t.expected_passes:
818 t.expected_passes[name].append(way)
819 else:
820 t.expected_passes[name] = [way]
821 else:
822 if_verbose(1, '*** unexpected pass for %s' % full_name)
823 t.n_unexpected_passes = t.n_unexpected_passes + 1
824 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
825 elif passFail == 'fail':
826 if getTestOpts().expect == 'pass' \
827 and way not in getTestOpts().expect_fail_for:
828 reason = result['reason']
829 tag = result.get('tag')
830 if tag == 'stat':
831 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
832 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
833 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
834 else:
835 if_verbose(1, '*** unexpected failure for %s' % full_name)
836 t.n_unexpected_failures = t.n_unexpected_failures + 1
837 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
838 else:
839 if getTestOpts().expect == 'missing-lib':
840 t.n_missing_libs = t.n_missing_libs + 1
841 if name in t.missing_libs:
842 t.missing_libs[name].append(way)
843 else:
844 t.missing_libs[name] = [way]
845 else:
846 t.n_expected_failures = t.n_expected_failures + 1
847 if name in t.expected_failures:
848 t.expected_failures[name].append(way)
849 else:
850 t.expected_failures[name] = [way]
851 else:
852 framework_fail(name, way, 'bad result ' + passFail)
853 except KeyboardInterrupt:
854 stopNow()
855 except:
856 framework_fail(name, way, 'do_test exception')
857 traceback.print_exc()
858
859 def addPassingTestInfo (testInfos, directory, name, way):
860 directory = re.sub('^\\.[/\\\\]', '', directory)
861
862 if not directory in testInfos:
863 testInfos[directory] = {}
864
865 if not name in testInfos[directory]:
866 testInfos[directory][name] = []
867
868 testInfos[directory][name].append(way)
869
870 def addFailingTestInfo (testInfos, directory, name, reason, way):
871 directory = re.sub('^\\.[/\\\\]', '', directory)
872
873 if not directory in testInfos:
874 testInfos[directory] = {}
875
876 if not name in testInfos[directory]:
877 testInfos[directory][name] = {}
878
879 if not reason in testInfos[directory][name]:
880 testInfos[directory][name][reason] = []
881
882 testInfos[directory][name][reason].append(way)
883
884 def skiptest (name, way):
885 # print 'Skipping test \"', name, '\"'
886 t.n_tests_skipped = t.n_tests_skipped + 1
887 if name in t.tests_skipped:
888 t.tests_skipped[name].append(way)
889 else:
890 t.tests_skipped[name] = [way]
891
892 def framework_fail( name, way, reason ):
893 full_name = name + '(' + way + ')'
894 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
895 t.n_framework_failures = t.n_framework_failures + 1
896 if name in t.framework_failures:
897 t.framework_failures[name].append(way)
898 else:
899 t.framework_failures[name] = [way]
900
901 def badResult(result):
902 try:
903 if result['passFail'] == 'pass':
904 return False
905 return True
906 except:
907 return True
908
909 def passed():
910 return {'passFail': 'pass'}
911
912 def failBecause(reason, tag=None):
913 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
914
915 # -----------------------------------------------------------------------------
916 # Generic command tests
917
918 # A generic command test is expected to run and exit successfully.
919 #
920 # The expected exit code can be changed via exit_code() as normal, and
921 # the expected stdout/stderr are stored in <testname>.stdout and
922 # <testname>.stderr. The output of the command can be ignored
923 # altogether by using run_command_ignore_output instead of
924 # run_command.
925
926 def run_command( name, way, cmd ):
927 return simple_run( name, '', cmd, '' )
928
929 # -----------------------------------------------------------------------------
930 # GHCi tests
931
932 def ghci_script_without_flag(flag):
933 def apply(name, way, script):
934 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
935 return ghci_script_override_default_flags(overrides)(name, way, script)
936
937 return apply
938
939 def ghci_script_override_default_flags(overrides):
940 def apply(name, way, script):
941 return ghci_script(name, way, script, overrides)
942
943 return apply
944
945 def ghci_script( name, way, script, override_flags = None ):
946 # filter out -fforce-recomp from compiler_always_flags, because we're
947 # actually testing the recompilation behaviour in the GHCi tests.
948 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
949
950 way_flags = ' '.join(config.way_flags(name)['ghci'])
951
952 # We pass HC and HC_OPTS as environment variables, so that the
953 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
954 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
955 ).format(flags=flags, way_flags=way_flags)
956
957 getTestOpts().stdin = script
958 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
959
960 # -----------------------------------------------------------------------------
961 # Compile-only tests
962
963 def compile_override_default_flags(overrides):
964 def apply(name, way, extra_opts):
965 return do_compile(name, way, 0, '', [], extra_opts, overrides)
966
967 return apply
968
969 def compile_fail_override_default_flags(overrides):
970 def apply(name, way, extra_opts):
971 return do_compile(name, way, 1, '', [], extra_opts, overrides)
972
973 return apply
974
975 def compile_without_flag(flag):
976 def apply(name, way, extra_opts):
977 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
978 return compile_override_default_flags(overrides)(name, way, extra_opts)
979
980 return apply
981
982 def compile_fail_without_flag(flag):
983 def apply(name, way, extra_opts):
984 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
985 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
986
987 return apply
988
989 def compile( name, way, extra_hc_opts ):
990 return do_compile( name, way, 0, '', [], extra_hc_opts )
991
992 def compile_fail( name, way, extra_hc_opts ):
993 return do_compile( name, way, 1, '', [], extra_hc_opts )
994
995 def multimod_compile( name, way, top_mod, extra_hc_opts ):
996 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
997
998 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
999 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1000
1001 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1002 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1003
1004 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1005 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1006
1007 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1008 # print 'Compile only, extra args = ', extra_hc_opts
1009 pretest_cleanup(name)
1010
1011 result = extras_build( way, extra_mods, extra_hc_opts )
1012 if badResult(result):
1013 return result
1014 extra_hc_opts = result['hc_opts']
1015
1016 force = 0
1017 if extra_mods:
1018 force = 1
1019 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1020
1021 if badResult(result):
1022 return result
1023
1024 # the actual stderr should always match the expected, regardless
1025 # of whether we expected the compilation to fail or not (successful
1026 # compilations may generate warnings).
1027
1028 if getTestOpts().with_namebase == None:
1029 namebase = name
1030 else:
1031 namebase = getTestOpts().with_namebase
1032
1033 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1034 actual_stderr_file = qualify(name, 'comp.stderr')
1035
1036 if not compare_outputs(way, 'stderr',
1037 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1038 normalise_errmsg,
1039 normalise_whitespace),
1040 expected_stderr_file, actual_stderr_file):
1041 return failBecause('stderr mismatch')
1042
1043 # no problems found, this test passed
1044 return passed()
1045
1046 def compile_cmp_asm( name, way, extra_hc_opts ):
1047 print('Compile only, extra args = ', extra_hc_opts)
1048 pretest_cleanup(name)
1049 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1050
1051 if badResult(result):
1052 return result
1053
1054 # the actual stderr should always match the expected, regardless
1055 # of whether we expected the compilation to fail or not (successful
1056 # compilations may generate warnings).
1057
1058 if getTestOpts().with_namebase == None:
1059 namebase = name
1060 else:
1061 namebase = getTestOpts().with_namebase
1062
1063 (platform_specific, expected_asm_file) = platform_wordsize_qualify(namebase, 'asm')
1064 actual_asm_file = qualify(name, 's')
1065
1066 if not compare_outputs(way, 'asm',
1067 join_normalisers(normalise_errmsg, normalise_asm),
1068 expected_asm_file, actual_asm_file):
1069 return failBecause('asm mismatch')
1070
1071 # no problems found, this test passed
1072 return passed()
1073
1074 # -----------------------------------------------------------------------------
1075 # Compile-and-run tests
1076
1077 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1078 # print 'Compile and run, extra args = ', extra_hc_opts
1079 pretest_cleanup(name)
1080
1081 result = extras_build( way, extra_mods, extra_hc_opts )
1082 if badResult(result):
1083 return result
1084 extra_hc_opts = result['hc_opts']
1085
1086 if way == 'ghci': # interpreted...
1087 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1088 else: # compiled...
1089 force = 0
1090 if extra_mods:
1091 force = 1
1092
1093 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1094 if badResult(result):
1095 return result
1096
1097 cmd = './' + name;
1098
1099 # we don't check the compiler's stderr for a compile-and-run test
1100 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1101
1102 def compile_and_run( name, way, extra_hc_opts ):
1103 return compile_and_run__( name, way, '', [], extra_hc_opts)
1104
1105 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1106 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1107
1108 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1109 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1110
1111 def stats( name, way, stats_file ):
1112 opts = getTestOpts()
1113 return checkStats(name, way, stats_file, opts.stats_range_fields)
1114
1115 # -----------------------------------------------------------------------------
1116 # Check -t stats info
1117
1118 def checkStats(name, way, stats_file, range_fields):
1119 full_name = name + '(' + way + ')'
1120
1121 result = passed()
1122 if len(range_fields) > 0:
1123 try:
1124 f = open(in_testdir(stats_file))
1125 except IOError as e:
1126 return failBecause(str(e))
1127 contents = f.read()
1128 f.close()
1129
1130 for (field, (expected, dev)) in range_fields.items():
1131 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1132 if m == None:
1133 print('Failed to find field: ', field)
1134 result = failBecause('no such stats field')
1135 val = int(m.group(1))
1136
1137 lowerBound = trunc( expected * ((100 - float(dev))/100))
1138 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1139
1140 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1141
1142 if val < lowerBound:
1143 print(field, 'value is too low:')
1144 print('(If this is because you have improved GHC, please')
1145 print('update the test so that GHC doesn\'t regress again)')
1146 result = failBecause('stat too good', tag='stat')
1147 if val > upperBound:
1148 print(field, 'value is too high:')
1149 result = failBecause('stat not good enough', tag='stat')
1150
1151 if val < lowerBound or val > upperBound or config.verbose >= 4:
1152 valStr = str(val)
1153 valLen = len(valStr)
1154 expectedStr = str(expected)
1155 expectedLen = len(expectedStr)
1156 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1157
1158 def display(descr, val, extra):
1159 print(descr, str(val).rjust(length), extra)
1160
1161 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1162 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1163 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1164 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1165 if val != expected:
1166 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1167
1168 return result
1169
1170 # -----------------------------------------------------------------------------
1171 # Build a single-module program
1172
1173 def extras_build( way, extra_mods, extra_hc_opts ):
1174 for modopts in extra_mods:
1175 mod, opts = modopts
1176 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1177 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1178 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1179 if badResult(result):
1180 return result
1181
1182 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1183
1184
1185 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1186 opts = getTestOpts()
1187 errname = add_suffix(name, 'comp.stderr')
1188 rm_no_fail( qualify(errname, '') )
1189
1190 if top_mod != '':
1191 srcname = top_mod
1192 rm_no_fail( qualify(name, '') )
1193 base, suf = os.path.splitext(top_mod)
1194 rm_no_fail( qualify(base, '') )
1195 rm_no_fail( qualify(base, 'exe') )
1196 elif addsuf:
1197 srcname = add_hs_lhs_suffix(name)
1198 rm_no_fail( qualify(name, '') )
1199 else:
1200 srcname = name
1201 rm_no_fail( qualify(name, 'o') )
1202
1203 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1204
1205 to_do = ''
1206 if top_mod != '':
1207 to_do = '--make '
1208 if link:
1209 to_do = to_do + '-o ' + name
1210 elif link:
1211 to_do = '-o ' + name
1212 elif opts.compile_to_hc:
1213 to_do = '-C'
1214 else:
1215 to_do = '-c' # just compile
1216
1217 stats_file = name + '.comp.stats'
1218 if len(opts.compiler_stats_range_fields) > 0:
1219 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1220
1221 # Required by GHC 7.3+, harmless for earlier versions:
1222 if (getTestOpts().c_src or
1223 getTestOpts().objc_src or
1224 getTestOpts().objcpp_src or
1225 getTestOpts().cmm_src):
1226 extra_hc_opts += ' -no-hs-main '
1227
1228 if getTestOpts().compile_cmd_prefix == '':
1229 cmd_prefix = ''
1230 else:
1231 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1232
1233 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1234 config.way_flags(name)[way])
1235
1236 cmd = ('cd {opts.testdir} && {cmd_prefix} '
1237 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1238 '> {errname} 2>&1'
1239 ).format(**locals())
1240
1241 result = runCmdFor(name, cmd)
1242
1243 if result != 0 and not should_fail:
1244 actual_stderr = qualify(name, 'comp.stderr')
1245 if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:')
1246 if_verbose_dump(1,actual_stderr)
1247
1248 # ToDo: if the sub-shell was killed by ^C, then exit
1249
1250 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1251
1252 if badResult(statsResult):
1253 return statsResult
1254
1255 if should_fail:
1256 if result == 0:
1257 return failBecause('exit code 0')
1258 else:
1259 if result != 0:
1260 return failBecause('exit code non-0')
1261
1262 return passed()
1263
1264 # -----------------------------------------------------------------------------
1265 # Run a program and check its output
1266 #
1267 # If testname.stdin exists, route input from that, else
1268 # from /dev/null. Route output to testname.run.stdout and
1269 # testname.run.stderr. Returns the exit code of the run.
1270
1271 def simple_run( name, way, prog, args ):
1272 opts = getTestOpts()
1273
1274 # figure out what to use for stdin
1275 if opts.stdin != '':
1276 use_stdin = opts.stdin
1277 else:
1278 stdin_file = add_suffix(name, 'stdin')
1279 if os.path.exists(in_testdir(stdin_file)):
1280 use_stdin = stdin_file
1281 else:
1282 use_stdin = '/dev/null'
1283
1284 run_stdout = add_suffix(name,'run.stdout')
1285 run_stderr = add_suffix(name,'run.stderr')
1286
1287 rm_no_fail(qualify(name,'run.stdout'))
1288 rm_no_fail(qualify(name,'run.stderr'))
1289 rm_no_fail(qualify(name, 'hp'))
1290 rm_no_fail(qualify(name,'ps'))
1291 rm_no_fail(qualify(name, 'prof'))
1292
1293 my_rts_flags = rts_flags(way)
1294
1295 stats_file = name + '.stats'
1296 if len(opts.stats_range_fields) > 0:
1297 args += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1298
1299 if opts.no_stdin:
1300 stdin_comes_from = ''
1301 else:
1302 stdin_comes_from = ' <' + use_stdin
1303
1304 if opts.combined_output:
1305 redirection = ' > {0} 2>&1'.format(run_stdout)
1306 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1307 else:
1308 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1309 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1310
1311 cmd = prog + ' ' + args + ' ' \
1312 + my_rts_flags + ' ' \
1313 + stdin_comes_from \
1314 + redirection
1315
1316 if opts.cmd_wrapper != None:
1317 cmd = opts.cmd_wrapper(cmd) + redirection_append
1318
1319 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1320
1321 # run the command
1322 result = runCmdFor(name, cmd, timeout_multiplier=opts.timeout_multiplier)
1323
1324 exit_code = result >> 8
1325 signal = result & 0xff
1326
1327 # check the exit code
1328 if exit_code != opts.exit_code:
1329 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1330 dump_stdout(name)
1331 dump_stderr(name)
1332 return failBecause('bad exit code')
1333
1334 check_hp = my_rts_flags.find("-h") != -1
1335 check_prof = my_rts_flags.find("-p") != -1
1336
1337 if not opts.ignore_output:
1338 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1339 bad_stdout = not check_stdout_ok(name, way)
1340 if bad_stderr:
1341 return failBecause('bad stderr')
1342 if bad_stdout:
1343 return failBecause('bad stdout')
1344 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1345 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1346 return failBecause('bad heap profile')
1347 if check_prof and not check_prof_ok(name, way):
1348 return failBecause('bad profile')
1349
1350 return checkStats(name, way, stats_file, opts.stats_range_fields)
1351
1352 def rts_flags(way):
1353 if (way == ''):
1354 return ''
1355 else:
1356 args = config.way_rts_flags[way]
1357
1358 if args == []:
1359 return ''
1360 else:
1361 return '+RTS ' + ' '.join(args) + ' -RTS'
1362
1363 # -----------------------------------------------------------------------------
1364 # Run a program in the interpreter and check its output
1365
1366 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1367 outname = add_suffix(name, 'interp.stdout')
1368 errname = add_suffix(name, 'interp.stderr')
1369 rm_no_fail(outname)
1370 rm_no_fail(errname)
1371 rm_no_fail(name)
1372
1373 if (top_mod == ''):
1374 srcname = add_hs_lhs_suffix(name)
1375 else:
1376 srcname = top_mod
1377
1378 scriptname = add_suffix(name, 'genscript')
1379 qscriptname = in_testdir(scriptname)
1380 rm_no_fail(qscriptname)
1381
1382 delimiter = '===== program output begins here\n'
1383
1384 script = open(qscriptname, 'w')
1385 if not compile_only:
1386 # set the prog name and command-line args to match the compiled
1387 # environment.
1388 script.write(':set prog ' + name + '\n')
1389 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1390 # Add marker lines to the stdout and stderr output files, so we
1391 # can separate GHCi's output from the program's.
1392 script.write(':! echo ' + delimiter)
1393 script.write(':! echo 1>&2 ' + delimiter)
1394 # Set stdout to be line-buffered to match the compiled environment.
1395 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1396 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1397 # in the event of an exception as for the compiled program.
1398 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1399 script.close()
1400
1401 # figure out what to use for stdin
1402 if getTestOpts().stdin != '':
1403 stdin_file = in_testdir(getTestOpts().stdin)
1404 else:
1405 stdin_file = qualify(name, 'stdin')
1406
1407 if os.path.exists(stdin_file):
1408 stdin = open(stdin_file, 'r')
1409 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1410
1411 script.close()
1412
1413 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1414 config.way_flags(name)[way])
1415
1416 if getTestOpts().combined_output:
1417 redirection = ' > {0} 2>&1'.format(outname)
1418 redirection_append = ' >> {0} 2>&1'.format(outname)
1419 else:
1420 redirection = ' > {0} 2> {1}'.format(outname, errname)
1421 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1422
1423 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1424 '< {scriptname} {redirection}'
1425 ).format(**locals())
1426
1427 if getTestOpts().cmd_wrapper != None:
1428 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1429
1430 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1431
1432 result = runCmdFor(name, cmd, timeout_multiplier=getTestOpts().timeout_multiplier)
1433
1434 exit_code = result >> 8
1435 signal = result & 0xff
1436
1437 # split the stdout into compilation/program output
1438 split_file(in_testdir(outname), delimiter,
1439 qualify(name, 'comp.stdout'),
1440 qualify(name, 'run.stdout'))
1441 split_file(in_testdir(errname), delimiter,
1442 qualify(name, 'comp.stderr'),
1443 qualify(name, 'run.stderr'))
1444
1445 # check the exit code
1446 if exit_code != getTestOpts().exit_code:
1447 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1448 dump_stdout(name)
1449 dump_stderr(name)
1450 return failBecause('bad exit code')
1451
1452 # ToDo: if the sub-shell was killed by ^C, then exit
1453
1454 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1455 check_stdout_ok(name, way)):
1456 return passed()
1457 else:
1458 return failBecause('bad stdout or stderr')
1459
1460
1461 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1462 infile = open(in_fn)
1463 out1 = open(out1_fn, 'w')
1464 out2 = open(out2_fn, 'w')
1465
1466 line = infile.readline()
1467 line = re.sub('\r', '', line) # ignore Windows EOL
1468 while (re.sub('^\s*','',line) != delimiter and line != ''):
1469 out1.write(line)
1470 line = infile.readline()
1471 line = re.sub('\r', '', line)
1472 out1.close()
1473
1474 line = infile.readline()
1475 while (line != ''):
1476 out2.write(line)
1477 line = infile.readline()
1478 out2.close()
1479
1480 # -----------------------------------------------------------------------------
1481 # Utils
1482 def get_compiler_flags(override_flags, noforce):
1483 opts = getTestOpts()
1484
1485 if override_flags is not None:
1486 flags = copy.copy(override_flags)
1487 else:
1488 flags = copy.copy(opts.compiler_always_flags)
1489
1490 if noforce:
1491 flags = [f for f in flags if f != '-fforce-recomp']
1492
1493 flags.append(opts.extra_hc_opts)
1494
1495 if opts.outputdir != None:
1496 flags.extend(["-outputdir", opts.outputdir])
1497
1498 return flags
1499
1500 def check_stdout_ok(name, way):
1501 if getTestOpts().with_namebase == None:
1502 namebase = name
1503 else:
1504 namebase = getTestOpts().with_namebase
1505
1506 actual_stdout_file = qualify(name, 'run.stdout')
1507 (platform_specific, expected_stdout_file) = platform_wordsize_qualify(namebase, 'stdout')
1508
1509 def norm(str):
1510 if platform_specific:
1511 return str
1512 else:
1513 return normalise_output(str)
1514
1515 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1516
1517 check_stdout = getTestOpts().check_stdout
1518 if check_stdout:
1519 return check_stdout(actual_stdout_file, extra_norm)
1520
1521 return compare_outputs(way, 'stdout', extra_norm,
1522 expected_stdout_file, actual_stdout_file)
1523
1524 def dump_stdout( name ):
1525 print('Stdout:')
1526 print(read_no_crs(qualify(name, 'run.stdout')))
1527
1528 def check_stderr_ok(name, way):
1529 if getTestOpts().with_namebase == None:
1530 namebase = name
1531 else:
1532 namebase = getTestOpts().with_namebase
1533
1534 actual_stderr_file = qualify(name, 'run.stderr')
1535 (platform_specific, expected_stderr_file) = platform_wordsize_qualify(namebase, 'stderr')
1536
1537 def norm(str):
1538 if platform_specific:
1539 return str
1540 else:
1541 return normalise_errmsg(str)
1542
1543 return compare_outputs(way, 'stderr',
1544 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1545 expected_stderr_file, actual_stderr_file)
1546
1547 def dump_stderr( name ):
1548 print("Stderr:")
1549 print(read_no_crs(qualify(name, 'run.stderr')))
1550
1551 def read_no_crs(file):
1552 str = ''
1553 try:
1554 h = open(file)
1555 str = h.read()
1556 h.close
1557 except:
1558 # On Windows, if the program fails very early, it seems the
1559 # files stdout/stderr are redirected to may not get created
1560 pass
1561 return re.sub('\r', '', str)
1562
1563 def write_file(file, str):
1564 h = open(file, 'w')
1565 h.write(str)
1566 h.close
1567
1568 def check_hp_ok(name):
1569
1570 # do not qualify for hp2ps because we should be in the right directory
1571 hp2psCmd = "cd " + getTestOpts().testdir + " && {hp2ps} " + name
1572
1573 hp2psResult = runCmdExitCode(hp2psCmd)
1574
1575 actual_ps_file = qualify(name, 'ps')
1576
1577 if(hp2psResult == 0):
1578 if (os.path.exists(actual_ps_file)):
1579 if gs_working:
1580 gsResult = runCmdExitCode(genGSCmd(actual_ps_file))
1581 if (gsResult == 0):
1582 return (True)
1583 else:
1584 print("hp2ps output for " + name + "is not valid PostScript")
1585 else: return (True) # assume postscript is valid without ghostscript
1586 else:
1587 print("hp2ps did not generate PostScript for " + name)
1588 return (False)
1589 else:
1590 print("hp2ps error when processing heap profile for " + name)
1591 return(False)
1592
1593 def check_prof_ok(name, way):
1594
1595 prof_file = qualify(name,'prof')
1596
1597 if not os.path.exists(prof_file):
1598 print(prof_file + " does not exist")
1599 return(False)
1600
1601 if os.path.getsize(qualify(name,'prof')) == 0:
1602 print(prof_file + " is empty")
1603 return(False)
1604
1605 if getTestOpts().with_namebase == None:
1606 namebase = name
1607 else:
1608 namebase = getTestOpts().with_namebase
1609
1610 (platform_specific, expected_prof_file) = \
1611 platform_wordsize_qualify(namebase, 'prof.sample')
1612
1613 # sample prof file is not required
1614 if not os.path.exists(expected_prof_file):
1615 return True
1616 else:
1617 return compare_outputs(way, 'prof',
1618 join_normalisers(normalise_whitespace,normalise_prof), \
1619 expected_prof_file, prof_file)
1620
1621 # Compare expected output to actual output, and optionally accept the
1622 # new output. Returns true if output matched or was accepted, false
1623 # otherwise.
1624 def compare_outputs(way, kind, normaliser, expected_file, actual_file):
1625 if os.path.exists(expected_file):
1626 expected_raw = read_no_crs(expected_file)
1627 # print "norm:", normaliser(expected_raw)
1628 expected_str = normaliser(expected_raw)
1629 expected_file_for_diff = expected_file
1630 else:
1631 expected_str = ''
1632 expected_file_for_diff = '/dev/null'
1633
1634 actual_raw = read_no_crs(actual_file)
1635 actual_str = normaliser(actual_raw)
1636
1637 if expected_str == actual_str:
1638 return 1
1639 else:
1640 if_verbose(1, 'Actual ' + kind + ' output differs from expected:')
1641
1642 if expected_file_for_diff == '/dev/null':
1643 expected_normalised_file = '/dev/null'
1644 else:
1645 expected_normalised_file = expected_file + ".normalised"
1646 write_file(expected_normalised_file, expected_str)
1647
1648 actual_normalised_file = actual_file + ".normalised"
1649 write_file(actual_normalised_file, actual_str)
1650
1651 # Ignore whitespace when diffing. We should only get to this
1652 # point if there are non-whitespace differences
1653 #
1654 # Note we are diffing the *actual* output, not the normalised
1655 # output. The normalised output may have whitespace squashed
1656 # (including newlines) so the diff would be hard to read.
1657 # This does mean that the diff might contain changes that
1658 # would be normalised away.
1659 if (config.verbose >= 1):
1660 r = os.system( 'diff -uw ' + expected_file_for_diff + \
1661 ' ' + actual_file )
1662
1663 # If for some reason there were no non-whitespace differences,
1664 # then do a full diff
1665 if r == 0:
1666 r = os.system( 'diff -u ' + expected_file_for_diff + \
1667 ' ' + actual_file )
1668
1669 if config.accept and (getTestOpts().expect == 'fail' or
1670 way in getTestOpts().expect_fail_for):
1671 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1672 return 0
1673 elif config.accept:
1674 if_verbose(1, 'Accepting new output.')
1675 write_file(expected_file, actual_raw)
1676 return 1
1677 else:
1678 return 0
1679
1680
1681 def normalise_whitespace( str ):
1682 # Merge contiguous whitespace characters into a single space.
1683 str = re.sub('[ \t\n]+', ' ', str)
1684 return str
1685
1686 def normalise_errmsg( str ):
1687 # remove " error:" and lower-case " Warning:" to make patch for
1688 # trac issue #10021 smaller
1689 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1690 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1691
1692 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1693 # the colon is there because it appears in error messages; this
1694 # hacky solution is used in place of more sophisticated filename
1695 # mangling
1696 str = re.sub('([^\\s])\\.exe', '\\1', str)
1697 # normalise slashes, minimise Windows/Unix filename differences
1698 str = re.sub('\\\\', '/', str)
1699 # The inplace ghc's are called ghc-stage[123] to avoid filename
1700 # collisions, so we need to normalise that to just "ghc"
1701 str = re.sub('ghc-stage[123]', 'ghc', str)
1702 # Error messages simetimes contain integer implementation package
1703 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1704 return str
1705
1706 # normalise a .prof file, so that we can reasonably compare it against
1707 # a sample. This doesn't compare any of the actual profiling data,
1708 # only the shape of the profile and the number of entries.
1709 def normalise_prof (str):
1710 # strip everything up to the line beginning "COST CENTRE"
1711 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1712
1713 # strip results for CAFs, these tend to change unpredictably
1714 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1715
1716 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1717 # sometimes under MAIN.
1718 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1719
1720 # We have somthing like this:
1721
1722 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1723 # k Main 204 1 0.0 0.0 0.0 0.0
1724 # foo Main 205 1 0.0 0.0 0.0 0.0
1725 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1726
1727 # then we remove all the specific profiling data, leaving only the
1728 # cost centre name, module, and entries, to end up with this:
1729
1730 # MAIN MAIN 0
1731 # k Main 1
1732 # foo Main 1
1733 # foo.bar Main 1
1734
1735 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1736 return str
1737
1738 def normalise_slashes_( str ):
1739 str = re.sub('\\\\', '/', str)
1740 return str
1741
1742 def normalise_exe_( str ):
1743 str = re.sub('\.exe', '', str)
1744 return str
1745
1746 def normalise_output( str ):
1747 # remove " error:" and lower-case " Warning:" to make patch for
1748 # trac issue #10021 smaller
1749 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1750 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1751 # Remove a .exe extension (for Windows)
1752 # This can occur in error messages generated by the program.
1753 str = re.sub('([^\\s])\\.exe', '\\1', str)
1754 return str
1755
1756 def normalise_asm( str ):
1757 lines = str.split('\n')
1758 # Only keep instructions and labels not starting with a dot.
1759 metadata = re.compile('^[ \t]*\\..*$')
1760 out = []
1761 for line in lines:
1762 # Drop metadata directives (e.g. ".type")
1763 if not metadata.match(line):
1764 line = re.sub('@plt', '', line)
1765 instr = line.lstrip().split()
1766 # Drop empty lines.
1767 if not instr:
1768 continue
1769 # Drop operands, except for call instructions.
1770 elif instr[0] == 'call':
1771 out.append(instr[0] + ' ' + instr[1])
1772 else:
1773 out.append(instr[0])
1774 out = '\n'.join(out)
1775 return out
1776
1777 def if_verbose( n, s ):
1778 if config.verbose >= n:
1779 print(s)
1780
1781 def if_verbose_dump( n, f ):
1782 if config.verbose >= n:
1783 try:
1784 print(open(f).read())
1785 except:
1786 print('')
1787
1788 def rawSystem(cmd_and_args):
1789 # We prefer subprocess.call to os.spawnv as the latter
1790 # seems to send its arguments through a shell or something
1791 # with the Windows (non-cygwin) python. An argument "a b c"
1792 # turns into three arguments ["a", "b", "c"].
1793
1794 cmd = cmd_and_args[0]
1795 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1796
1797 # When running under native msys Python, any invocations of non-msys binaries,
1798 # including timeout.exe, will have their arguments munged according to some
1799 # heuristics, which leads to malformed command lines (#9626). The easiest way
1800 # to avoid problems is to invoke through /usr/bin/cmd which sidesteps argument
1801 # munging because it is a native msys application.
1802 def passThroughCmd(cmd_and_args):
1803 args = []
1804 # cmd needs a Windows-style path for its first argument.
1805 args.append(cmd_and_args[0].replace('/', '\\'))
1806 # Other arguments need to be quoted to deal with spaces.
1807 args.extend(['"%s"' % arg for arg in cmd_and_args[1:]])
1808 return ["cmd", "/c", " ".join(args)]
1809
1810 # Note that this doesn't handle the timeout itself; it is just used for
1811 # commands that have timeout handling built-in.
1812 def rawSystemWithTimeout(cmd_and_args):
1813 if config.os == 'mingw32' and sys.executable.startswith('/usr'):
1814 # This is only needed when running under msys python.
1815 cmd_and_args = passThroughCmd(cmd_and_args)
1816 r = rawSystem(cmd_and_args)
1817 if r == 98:
1818 # The python timeout program uses 98 to signal that ^C was pressed
1819 stopNow()
1820 return r
1821
1822 # cmd is a complex command in Bourne-shell syntax
1823 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1824 # Hence it must ultimately be run by a Bourne shell
1825 #
1826 # Mostly it invokes the command wrapped in 'timeout' thus
1827 # timeout 300 'cd . && ...blah blah'
1828 # so it's timeout's job to invoke the Bourne shell
1829 #
1830 # But watch out for the case when there is no timeout program!
1831 # Then, when using the native Python, os.system will invoke the cmd shell
1832
1833 def runCmd( cmd ):
1834 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1835 cmd = cmd.format(**config.__dict__)
1836
1837 if_verbose( 3, cmd )
1838 r = 0
1839 if config.os == 'mingw32':
1840 # On MinGW, we will always have timeout
1841 assert config.timeout_prog!=''
1842
1843 if config.timeout_prog != '':
1844 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1845 else:
1846 r = os.system(cmd)
1847 return r << 8
1848
1849 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1850 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1851 cmd = cmd.format(**config.__dict__)
1852
1853 if_verbose( 3, cmd )
1854 r = 0
1855 if config.os == 'mingw32':
1856 # On MinGW, we will always have timeout
1857 assert config.timeout_prog!=''
1858 timeout = int(ceil(config.timeout * timeout_multiplier))
1859
1860 if config.timeout_prog != '':
1861 if config.check_files_written:
1862 fn = name + ".strace"
1863 r = rawSystemWithTimeout(
1864 ["strace", "-o", fn, "-fF",
1865 "-e", "creat,open,chdir,clone,vfork",
1866 config.timeout_prog, str(timeout), cmd])
1867 addTestFilesWritten(name, fn)
1868 rm_no_fail(fn)
1869 else:
1870 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1871 else:
1872 r = os.system(cmd)
1873 return r << 8
1874
1875 def runCmdExitCode( cmd ):
1876 return (runCmd(cmd) >> 8);
1877
1878
1879 # -----------------------------------------------------------------------------
1880 # checking for files being written to by multiple tests
1881
1882 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1883 re_strace_unavailable = re.compile('^\) += \? <unavailable>$')
1884 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1885 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1886 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1887 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1888 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1889 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1890 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1891 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1892 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1893 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1894 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1895 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1896 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1897 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1898 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1899
1900 # Files that are read or written but shouldn't be:
1901 # * ghci_history shouldn't be read or written by tests
1902 # * things under package.conf.d shouldn't be written by tests
1903 bad_file_usages = {}
1904
1905 # Mapping from tests to the list of files that they write
1906 files_written = {}
1907
1908 # Mapping from tests to the list of files that they write but don't clean
1909 files_written_not_removed = {}
1910
1911 def add_bad_file_usage(name, file):
1912 try:
1913 if not file in bad_file_usages[name]:
1914 bad_file_usages[name].append(file)
1915 except:
1916 bad_file_usages[name] = [file]
1917
1918 def mkPath(curdir, path):
1919 # Given the current full directory is 'curdir', what is the full
1920 # path to 'path'?
1921 return os.path.realpath(os.path.join(curdir, path))
1922
1923 def addTestFilesWritten(name, fn):
1924 if config.use_threads:
1925 with t.lockFilesWritten:
1926 addTestFilesWrittenHelper(name, fn)
1927 else:
1928 addTestFilesWrittenHelper(name, fn)
1929
1930 def addTestFilesWrittenHelper(name, fn):
1931 started = False
1932 working_directories = {}
1933
1934 with open(fn, 'r') as f:
1935 for line in f:
1936 m_pid = re_strace_pid.match(line)
1937 if m_pid:
1938 pid = m_pid.group(1)
1939 content = m_pid.group(2)
1940 elif re_strace_unavailable.match(line):
1941 next
1942 else:
1943 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1944
1945 m_open = re_strace_open.match(content)
1946 m_chdir = re_strace_chdir.match(content)
1947 m_clone = re_strace_clone.match(content)
1948 m_vfork = re_strace_vfork.match(content)
1949
1950 if not started:
1951 working_directories[pid] = os.getcwd()
1952 started = True
1953
1954 if m_open:
1955 file = m_open.group(1)
1956 file = mkPath(working_directories[pid], file)
1957 if file.endswith("ghci_history"):
1958 add_bad_file_usage(name, file)
1959 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1960 flags = m_open.group(2).split('|')
1961 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1962 if re.match('package\.conf\.d', file):
1963 add_bad_file_usage(name, file)
1964 else:
1965 try:
1966 if not file in files_written[name]:
1967 files_written[name].append(file)
1968 except:
1969 files_written[name] = [file]
1970 elif 'O_RDONLY' in flags:
1971 pass
1972 else:
1973 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1974 elif m_chdir:
1975 # We optimistically assume that unfinished chdir's are going to succeed
1976 dir = m_chdir.group(1)
1977 working_directories[pid] = mkPath(working_directories[pid], dir)
1978 elif m_clone:
1979 working_directories[m_clone.group(2)] = working_directories[pid]
1980 elif m_vfork:
1981 working_directories[m_vfork.group(2)] = working_directories[pid]
1982 elif re_strace_open_resumed.match(content):
1983 pass
1984 elif re_strace_chdir_resumed.match(content):
1985 pass
1986 elif re_strace_vfork_unfinished.match(content):
1987 pass
1988 elif re_strace_clone_unfinished.match(content):
1989 pass
1990 elif re_strace_ignore_sigchild.match(content):
1991 pass
1992 elif re_strace_ignore_sigvtalarm.match(content):
1993 pass
1994 elif re_strace_ignore_sigint.match(content):
1995 pass
1996 elif re_strace_ignore_sigfpe.match(content):
1997 pass
1998 elif re_strace_ignore_sigsegv.match(content):
1999 pass
2000 elif re_strace_ignore_sigpipe.match(content):
2001 pass
2002 else:
2003 framework_fail(name, 'strace', "Can't understand strace line: " + line)
2004
2005 def checkForFilesWrittenProblems(file):
2006 foundProblem = False
2007
2008 files_written_inverted = {}
2009 for t in files_written.keys():
2010 for f in files_written[t]:
2011 try:
2012 files_written_inverted[f].append(t)
2013 except:
2014 files_written_inverted[f] = [t]
2015
2016 for f in files_written_inverted.keys():
2017 if len(files_written_inverted[f]) > 1:
2018 if not foundProblem:
2019 foundProblem = True
2020 file.write("\n")
2021 file.write("\nSome files are written by multiple tests:\n")
2022 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
2023 if foundProblem:
2024 file.write("\n")
2025
2026 # -----
2027
2028 if len(files_written_not_removed) > 0:
2029 file.write("\n")
2030 file.write("\nSome files written but not removed:\n")
2031 tests = list(files_written_not_removed.keys())
2032 tests.sort()
2033 for t in tests:
2034 for f in files_written_not_removed[t]:
2035 file.write(" " + t + ": " + f + "\n")
2036 file.write("\n")
2037
2038 # -----
2039
2040 if len(bad_file_usages) > 0:
2041 file.write("\n")
2042 file.write("\nSome bad file usages:\n")
2043 tests = list(bad_file_usages.keys())
2044 tests.sort()
2045 for t in tests:
2046 for f in bad_file_usages[t]:
2047 file.write(" " + t + ": " + f + "\n")
2048 file.write("\n")
2049
2050 # -----------------------------------------------------------------------------
2051 # checking if ghostscript is available for checking the output of hp2ps
2052
2053 def genGSCmd(psfile):
2054 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2055
2056 def gsNotWorking():
2057 global gs_working
2058 print("GhostScript not available for hp2ps tests")
2059
2060 global gs_working
2061 gs_working = 0
2062 if config.have_profiling:
2063 if config.gs != '':
2064 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2065 if resultGood == 0:
2066 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
2067 ' >/dev/null 2>&1')
2068 if resultBad != 0:
2069 print("GhostScript available for hp2ps tests")
2070 gs_working = 1;
2071 else:
2072 gsNotWorking();
2073 else:
2074 gsNotWorking();
2075 else:
2076 gsNotWorking();
2077
2078 def rm_no_fail( file ):
2079 try:
2080 os.remove( file )
2081 finally:
2082 return
2083
2084 def add_suffix( name, suffix ):
2085 if suffix == '':
2086 return name
2087 else:
2088 return name + '.' + suffix
2089
2090 def add_hs_lhs_suffix(name):
2091 if getTestOpts().c_src:
2092 return add_suffix(name, 'c')
2093 elif getTestOpts().cmm_src:
2094 return add_suffix(name, 'cmm')
2095 elif getTestOpts().objc_src:
2096 return add_suffix(name, 'm')
2097 elif getTestOpts().objcpp_src:
2098 return add_suffix(name, 'mm')
2099 elif getTestOpts().literate:
2100 return add_suffix(name, 'lhs')
2101 else:
2102 return add_suffix(name, 'hs')
2103
2104 def replace_suffix( name, suffix ):
2105 base, suf = os.path.splitext(name)
2106 return base + '.' + suffix
2107
2108 def in_testdir( name ):
2109 return (getTestOpts().testdir + '/' + name)
2110
2111 def qualify( name, suff ):
2112 return in_testdir(add_suffix(name, suff))
2113
2114
2115 # Finding the sample output. The filename is of the form
2116 #
2117 # <test>.stdout[-<compiler>][-<version>][-ws-<wordsize>][-<platform>]
2118 #
2119 # and we pick the most specific version available. The <version> is
2120 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2121 # more fine-grained control use if_compiler_lt().
2122 #
2123 def platform_wordsize_qualify( name, suff ):
2124
2125 basepath = qualify(name, suff)
2126
2127 paths = [(platformSpecific, basepath + comp + vers + ws + plat)
2128 for (platformSpecific, plat) in [(1, '-' + config.platform),
2129 (1, '-' + config.os),
2130 (0, '')]
2131 for ws in ['-ws-' + config.wordsize, '']
2132 for comp in ['-' + config.compiler_type, '']
2133 for vers in ['-' + config.compiler_maj_version, '']]
2134
2135 dir = glob.glob(basepath + '*')
2136 dir = [normalise_slashes_(d) for d in dir]
2137
2138 for (platformSpecific, f) in paths:
2139 if f in dir:
2140 return (platformSpecific,f)
2141
2142 return (0, basepath)
2143
2144 # Clean up prior to the test, so that we can't spuriously conclude
2145 # that it passed on the basis of old run outputs.
2146 def pretest_cleanup(name):
2147 if getTestOpts().outputdir != None:
2148 odir = in_testdir(getTestOpts().outputdir)
2149 try:
2150 shutil.rmtree(odir)
2151 except:
2152 pass
2153 os.mkdir(odir)
2154
2155 rm_no_fail(qualify(name,'interp.stderr'))
2156 rm_no_fail(qualify(name,'interp.stdout'))
2157 rm_no_fail(qualify(name,'comp.stderr'))
2158 rm_no_fail(qualify(name,'comp.stdout'))
2159 rm_no_fail(qualify(name,'run.stderr'))
2160 rm_no_fail(qualify(name,'run.stdout'))
2161 rm_no_fail(qualify(name,'tix'))
2162 rm_no_fail(qualify(name,'exe.tix'))
2163 # simple_build zaps the following:
2164 # rm_nofail(qualify("o"))
2165 # rm_nofail(qualify(""))
2166 # not interested in the return code
2167
2168 # -----------------------------------------------------------------------------
2169 # Return a list of all the files ending in '.T' below directories roots.
2170
2171 def findTFiles(roots):
2172 # It would be better to use os.walk, but that
2173 # gives backslashes on Windows, which trip the
2174 # testsuite later :-(
2175 return [filename for root in roots for filename in findTFiles_(root)]
2176
2177 def findTFiles_(path):
2178 if os.path.isdir(path):
2179 paths = [path + '/' + x for x in os.listdir(path)]
2180 return findTFiles(paths)
2181 elif path[-2:] == '.T':
2182 return [path]
2183 else:
2184 return []
2185
2186 # -----------------------------------------------------------------------------
2187 # Output a test summary to the specified file object
2188
2189 def summary(t, file, short=False):
2190
2191 file.write('\n')
2192 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2193
2194 if short:
2195 # Only print the list of unexpected tests above.
2196 return
2197
2198 file.write('OVERALL SUMMARY for test run started at '
2199 + time.strftime("%c %Z", t.start_time) + '\n'
2200 + str(datetime.timedelta(seconds=
2201 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2202 + ' spent to go through\n'
2203 + repr(t.total_tests).rjust(8)
2204 + ' total tests, which gave rise to\n'
2205 + repr(t.total_test_cases).rjust(8)
2206 + ' test cases, of which\n'
2207 + repr(t.n_tests_skipped).rjust(8)
2208 + ' were skipped\n'
2209 + '\n'
2210 + repr(t.n_missing_libs).rjust(8)
2211 + ' had missing libraries\n'
2212 + repr(t.n_expected_passes).rjust(8)
2213 + ' expected passes\n'
2214 + repr(t.n_expected_failures).rjust(8)
2215 + ' expected failures\n'
2216 + '\n'
2217 + repr(t.n_framework_failures).rjust(8)
2218 + ' caused framework failures\n'
2219 + repr(t.n_unexpected_passes).rjust(8)
2220 + ' unexpected passes\n'
2221 + repr(t.n_unexpected_failures).rjust(8)
2222 + ' unexpected failures\n'
2223 + repr(t.n_unexpected_stat_failures).rjust(8)
2224 + ' unexpected stat failures\n'
2225 + '\n')
2226
2227 if t.n_unexpected_passes > 0:
2228 file.write('Unexpected passes:\n')
2229 printPassingTestInfosSummary(file, t.unexpected_passes)
2230
2231 if t.n_unexpected_failures > 0:
2232 file.write('Unexpected failures:\n')
2233 printFailingTestInfosSummary(file, t.unexpected_failures)
2234
2235 if t.n_unexpected_stat_failures > 0:
2236 file.write('Unexpected stat failures:\n')
2237 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2238
2239 if config.check_files_written:
2240 checkForFilesWrittenProblems(file)
2241
2242 if stopping():
2243 file.write('WARNING: Testsuite run was terminated early\n')
2244
2245 def printUnexpectedTests(file, testInfoss):
2246 unexpected = []
2247 for testInfos in testInfoss:
2248 directories = testInfos.keys()
2249 for directory in directories:
2250 tests = list(testInfos[directory].keys())
2251 unexpected += tests
2252 if unexpected != []:
2253 file.write('Unexpected results from:\n')
2254 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2255 file.write('\n')
2256
2257 def printPassingTestInfosSummary(file, testInfos):
2258 directories = list(testInfos.keys())
2259 directories.sort()
2260 maxDirLen = max(len(x) for x in directories)
2261 for directory in directories:
2262 tests = list(testInfos[directory].keys())
2263 tests.sort()
2264 for test in tests:
2265 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2266 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2267 file.write('\n')
2268
2269 def printFailingTestInfosSummary(file, testInfos):
2270 directories = list(testInfos.keys())
2271 directories.sort()
2272 maxDirLen = max(len(d) for d in directories)
2273 for directory in directories:
2274 tests = list(testInfos[directory].keys())
2275 tests.sort()
2276 for test in tests:
2277 reasons = testInfos[directory][test].keys()
2278 for reason in reasons:
2279 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2280 ' [' + reason + ']' + \
2281 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2282 file.write('\n')
2283
2284 def modify_lines(s, f):
2285 return '\n'.join([f(l) for l in s.splitlines()])