Testsuite: refactoring only
[ghc.git] / testsuite / driver / testlib.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 from __future__ import print_function
6
7 import shutil
8 import sys
9 import os
10 import errno
11 import string
12 import re
13 import traceback
14 import time
15 import datetime
16 import copy
17 import glob
18 from math import ceil, trunc
19 import collections
20 import subprocess
21
22 from testglobals import *
23 from testutil import *
24
25 if config.use_threads:
26 import threading
27 try:
28 import thread
29 except ImportError: # Python 3
30 import _thread as thread
31
32 global wantToStop
33 wantToStop = False
34 def stopNow():
35 global wantToStop
36 wantToStop = True
37 def stopping():
38 return wantToStop
39
40 # Options valid for the current test only (these get reset to
41 # testdir_testopts after each test).
42
43 global testopts_local
44 if config.use_threads:
45 testopts_local = threading.local()
46 else:
47 class TestOpts_Local:
48 pass
49 testopts_local = TestOpts_Local()
50
51 def getTestOpts():
52 return testopts_local.x
53
54 def setLocalTestOpts(opts):
55 global testopts_local
56 testopts_local.x=opts
57
58 def isStatsTest():
59 opts = getTestOpts()
60 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
61
62
63 # This can be called at the top of a file of tests, to set default test options
64 # for the following tests.
65 def setTestOpts( f ):
66 global thisdir_settings
67 thisdir_settings = [thisdir_settings, f]
68
69 # -----------------------------------------------------------------------------
70 # Canned setup functions for common cases. eg. for a test you might say
71 #
72 # test('test001', normal, compile, [''])
73 #
74 # to run it without any options, but change it to
75 #
76 # test('test001', expect_fail, compile, [''])
77 #
78 # to expect failure for this test.
79
80 def normal( name, opts ):
81 return;
82
83 def skip( name, opts ):
84 opts.skip = 1
85
86 def expect_fail( name, opts ):
87 # The compiler, testdriver, OS or platform is missing a certain
88 # feature, and we don't plan to or can't fix it now or in the
89 # future.
90 opts.expect = 'fail';
91
92 def reqlib( lib ):
93 return lambda name, opts, l=lib: _reqlib (name, opts, l )
94
95 # Cache the results of looking to see if we have a library or not.
96 # This makes quite a difference, especially on Windows.
97 have_lib = {}
98
99 def _reqlib( name, opts, lib ):
100 if lib in have_lib:
101 got_it = have_lib[lib]
102 else:
103 cmd = strip_quotes(config.ghc_pkg)
104 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
105 stdout=subprocess.PIPE,
106 stderr=subprocess.PIPE)
107 # read from stdout and stderr to avoid blocking due to
108 # buffers filling
109 p.communicate()
110 r = p.wait()
111 got_it = r == 0
112 have_lib[lib] = got_it
113
114 if not got_it:
115 opts.expect = 'missing-lib'
116
117 def req_haddock( name, opts ):
118 if not config.haddock:
119 opts.expect = 'missing-lib'
120
121 def req_profiling( name, opts ):
122 if not config.have_profiling:
123 opts.expect = 'fail'
124
125 def req_shared_libs( name, opts ):
126 if not config.have_shared_libs:
127 opts.expect = 'fail'
128
129 def req_interp( name, opts ):
130 if not config.have_interp:
131 opts.expect = 'fail'
132
133 def req_smp( name, opts ):
134 if not config.have_smp:
135 opts.expect = 'fail'
136
137 def ignore_output( name, opts ):
138 opts.ignore_output = 1
139
140 def no_stdin( name, opts ):
141 opts.no_stdin = 1
142
143 def combined_output( name, opts ):
144 opts.combined_output = True
145
146 # -----
147
148 def expect_fail_for( ways ):
149 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
150
151 def _expect_fail_for( name, opts, ways ):
152 opts.expect_fail_for = ways
153
154 def expect_broken( bug ):
155 # This test is a expected not to work due to the indicated trac bug
156 # number.
157 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
158
159 def _expect_broken( name, opts, bug ):
160 record_broken(name, opts, bug)
161 opts.expect = 'fail';
162
163 def expect_broken_for( bug, ways ):
164 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
165
166 def _expect_broken_for( name, opts, bug, ways ):
167 record_broken(name, opts, bug)
168 opts.expect_fail_for = ways
169
170 def record_broken(name, opts, bug):
171 global brokens
172 me = (bug, opts.testdir, name)
173 if not me in brokens:
174 brokens.append(me)
175
176 def _expect_pass(way):
177 # Helper function. Not intended for use in .T files.
178 opts = getTestOpts()
179 return opts.expect == 'pass' and way not in opts.expect_fail_for
180
181 # -----
182
183 def omit_ways( ways ):
184 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
185
186 def _omit_ways( name, opts, ways ):
187 opts.omit_ways = ways
188
189 # -----
190
191 def only_ways( ways ):
192 return lambda name, opts, w=ways: _only_ways( name, opts, w )
193
194 def _only_ways( name, opts, ways ):
195 opts.only_ways = ways
196
197 # -----
198
199 def extra_ways( ways ):
200 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
201
202 def _extra_ways( name, opts, ways ):
203 opts.extra_ways = ways
204
205 # -----
206
207 def only_compiler_types( _compiler_types ):
208 # Don't delete yet. The libraries unix, stm and hpc still call this function.
209 return lambda _name, _opts: None
210
211 # -----
212
213 def set_stdin( file ):
214 return lambda name, opts, f=file: _set_stdin(name, opts, f);
215
216 def _set_stdin( name, opts, f ):
217 opts.stdin = f
218
219 # -----
220
221 def exit_code( val ):
222 return lambda name, opts, v=val: _exit_code(name, opts, v);
223
224 def _exit_code( name, opts, v ):
225 opts.exit_code = v
226
227 def signal_exit_code( val ):
228 if opsys('solaris2'):
229 return exit_code( val );
230 else:
231 # When application running on Linux receives fatal error
232 # signal, then its exit code is encoded as 128 + signal
233 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
234 # I assume that Mac OS X behaves in the same way at least Mac
235 # OS X builder behavior suggests this.
236 return exit_code( val+128 );
237
238 # -----
239
240 def compile_timeout_multiplier( val ):
241 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
242
243 def _compile_timeout_multiplier( name, opts, v ):
244 opts.compile_timeout_multiplier = v
245
246 def run_timeout_multiplier( val ):
247 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
248
249 def _run_timeout_multiplier( name, opts, v ):
250 opts.run_timeout_multiplier = v
251
252 # -----
253
254 def extra_run_opts( val ):
255 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
256
257 def _extra_run_opts( name, opts, v ):
258 opts.extra_run_opts = v
259
260 # -----
261
262 def extra_hc_opts( val ):
263 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
264
265 def _extra_hc_opts( name, opts, v ):
266 opts.extra_hc_opts = v
267
268 # -----
269
270 def extra_clean( files ):
271 assert not isinstance(files, str), files
272 return lambda name, opts, v=files: _extra_clean(name, opts, v);
273
274 def _extra_clean( name, opts, v ):
275 opts.clean_files = v
276
277 # -----
278
279 def stats_num_field( field, expecteds ):
280 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
281
282 def _stats_num_field( name, opts, field, expecteds ):
283 if field in opts.stats_range_fields:
284 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
285
286 if type(expecteds) is list:
287 for (b, expected, dev) in expecteds:
288 if b:
289 opts.stats_range_fields[field] = (expected, dev)
290 return
291 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
292
293 else:
294 (expected, dev) = expecteds
295 opts.stats_range_fields[field] = (expected, dev)
296
297 def compiler_stats_num_field( field, expecteds ):
298 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
299
300 def _compiler_stats_num_field( name, opts, field, expecteds ):
301 if field in opts.compiler_stats_range_fields:
302 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
303
304 # Compiler performance numbers change when debugging is on, making the results
305 # useless and confusing. Therefore, skip if debugging is on.
306 if compiler_debugged():
307 skip(name, opts)
308
309 for (b, expected, dev) in expecteds:
310 if b:
311 opts.compiler_stats_range_fields[field] = (expected, dev)
312 return
313
314 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
315
316 # -----
317
318 def when(b, f):
319 # When list_brokens is on, we want to see all expect_broken calls,
320 # so we always do f
321 if b or config.list_broken:
322 return f
323 else:
324 return normal
325
326 def unless(b, f):
327 return when(not b, f)
328
329 def doing_ghci():
330 return 'ghci' in config.run_ways
331
332 def ghci_dynamic( ):
333 return config.ghc_dynamic
334
335 def fast():
336 return config.fast
337
338 def platform( plat ):
339 return config.platform == plat
340
341 def opsys( os ):
342 return config.os == os
343
344 def arch( arch ):
345 return config.arch == arch
346
347 def wordsize( ws ):
348 return config.wordsize == str(ws)
349
350 def msys( ):
351 return config.msys
352
353 def cygwin( ):
354 return config.cygwin
355
356 def have_vanilla( ):
357 return config.have_vanilla
358
359 def have_dynamic( ):
360 return config.have_dynamic
361
362 def have_profiling( ):
363 return config.have_profiling
364
365 def in_tree_compiler( ):
366 return config.in_tree_compiler
367
368 def compiler_lt( compiler, version ):
369 assert compiler == 'ghc'
370 return version_lt(config.compiler_version, version)
371
372 def compiler_le( compiler, version ):
373 assert compiler == 'ghc'
374 return version_le(config.compiler_version, version)
375
376 def compiler_gt( compiler, version ):
377 assert compiler == 'ghc'
378 return version_gt(config.compiler_version, version)
379
380 def compiler_ge( compiler, version ):
381 assert compiler == 'ghc'
382 return version_ge(config.compiler_version, version)
383
384 def unregisterised( ):
385 return config.unregisterised
386
387 def compiler_profiled( ):
388 return config.compiler_profiled
389
390 def compiler_debugged( ):
391 return config.compiler_debugged
392
393 def tag( t ):
394 return t in config.compiler_tags
395
396 # ---
397
398 def high_memory_usage(name, opts):
399 opts.alone = True
400
401 # If a test is for a multi-CPU race, then running the test alone
402 # increases the chance that we'll actually see it.
403 def multi_cpu_race(name, opts):
404 opts.alone = True
405
406 # ---
407 def literate( name, opts ):
408 opts.literate = 1;
409
410 def c_src( name, opts ):
411 opts.c_src = 1;
412
413 def objc_src( name, opts ):
414 opts.objc_src = 1;
415
416 def objcpp_src( name, opts ):
417 opts.objcpp_src = 1;
418
419 def cmm_src( name, opts ):
420 opts.cmm_src = 1;
421
422 def outputdir( odir ):
423 return lambda name, opts, d=odir: _outputdir(name, opts, d)
424
425 def _outputdir( name, opts, odir ):
426 opts.outputdir = odir;
427
428 # ----
429
430 def pre_cmd( cmd ):
431 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
432
433 def _pre_cmd( name, opts, cmd ):
434 opts.pre_cmd = cmd
435
436 # ----
437
438 def clean_cmd( cmd ):
439 return lambda name, opts, c=cmd: _clean_cmd(name, opts, cmd)
440
441 def _clean_cmd( name, opts, cmd ):
442 opts.clean_cmd = cmd
443
444 # ----
445
446 def cmd_prefix( prefix ):
447 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
448
449 def _cmd_prefix( name, opts, prefix ):
450 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
451
452 # ----
453
454 def cmd_wrapper( fun ):
455 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
456
457 def _cmd_wrapper( name, opts, fun ):
458 opts.cmd_wrapper = fun
459
460 # ----
461
462 def compile_cmd_prefix( prefix ):
463 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
464
465 def _compile_cmd_prefix( name, opts, prefix ):
466 opts.compile_cmd_prefix = prefix
467
468 # ----
469
470 def check_stdout( f ):
471 return lambda name, opts, f=f: _check_stdout(name, opts, f)
472
473 def _check_stdout( name, opts, f ):
474 opts.check_stdout = f
475
476 # ----
477
478 def normalise_slashes( name, opts ):
479 _normalise_fun(name, opts, normalise_slashes_)
480
481 def normalise_exe( name, opts ):
482 _normalise_fun(name, opts, normalise_exe_)
483
484 def normalise_fun( *fs ):
485 return lambda name, opts: _normalise_fun(name, opts, fs)
486
487 def _normalise_fun( name, opts, *fs ):
488 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
489
490 def normalise_errmsg_fun( *fs ):
491 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
492
493 def _normalise_errmsg_fun( name, opts, *fs ):
494 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
495
496 def normalise_version_( *pkgs ):
497 def normalise_version__( str ):
498 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
499 '\\1-<VERSION>', str)
500 return normalise_version__
501
502 def normalise_version( *pkgs ):
503 def normalise_version__( name, opts ):
504 _normalise_fun(name, opts, normalise_version_(*pkgs))
505 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
506 return normalise_version__
507
508 def normalise_drive_letter(name, opts):
509 # Windows only. Change D:\\ to C:\\.
510 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
511
512 def join_normalisers(*a):
513 """
514 Compose functions, flattening sequences.
515
516 join_normalisers(f1,[f2,f3],f4)
517
518 is the same as
519
520 lambda x: f1(f2(f3(f4(x))))
521 """
522
523 def flatten(l):
524 """
525 Taken from http://stackoverflow.com/a/2158532/946226
526 """
527 for el in l:
528 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
529 for sub in flatten(el):
530 yield sub
531 else:
532 yield el
533
534 a = flatten(a)
535
536 fn = lambda x:x # identity function
537 for f in a:
538 assert callable(f)
539 fn = lambda x,f=f,fn=fn: fn(f(x))
540 return fn
541
542 # ----
543 # Function for composing two opt-fns together
544
545 def executeSetups(fs, name, opts):
546 if type(fs) is list:
547 # If we have a list of setups, then execute each one
548 for f in fs:
549 executeSetups(f, name, opts)
550 else:
551 # fs is a single function, so just apply it
552 fs(name, opts)
553
554 # -----------------------------------------------------------------------------
555 # The current directory of tests
556
557 def newTestDir( dir ):
558 global thisdir_settings
559 # reset the options for this test directory
560 thisdir_settings = lambda name, opts, dir=dir: _newTestDir( name, opts, dir )
561
562 def _newTestDir( name, opts, dir ):
563 opts.testdir = dir
564 opts.compiler_always_flags = config.compiler_always_flags
565
566 # -----------------------------------------------------------------------------
567 # Actually doing tests
568
569 parallelTests = []
570 aloneTests = []
571 allTestNames = set([])
572
573 def runTest (opts, name, func, args):
574 ok = 0
575
576 if config.use_threads:
577 t.thread_pool.acquire()
578 try:
579 while config.threads<(t.running_threads+1):
580 t.thread_pool.wait()
581 t.running_threads = t.running_threads+1
582 ok=1
583 t.thread_pool.release()
584 thread.start_new_thread(test_common_thread, (name, opts, func, args))
585 except:
586 if not ok:
587 t.thread_pool.release()
588 else:
589 test_common_work (name, opts, func, args)
590
591 # name :: String
592 # setup :: TestOpts -> IO ()
593 def test (name, setup, func, args):
594 if config.only and name not in config.only:
595 return
596
597 global aloneTests
598 global parallelTests
599 global allTestNames
600 global thisdir_settings
601 if name in allTestNames:
602 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
603 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
604 framework_fail(name, 'bad_name', 'This test has an invalid name')
605
606 # Make a deep copy of the default_testopts, as we need our own copy
607 # of any dictionaries etc inside it. Otherwise, if one test modifies
608 # them, all tests will see the modified version!
609 myTestOpts = copy.deepcopy(default_testopts)
610
611 executeSetups([thisdir_settings, setup], name, myTestOpts)
612
613 thisTest = lambda : runTest(myTestOpts, name, func, args)
614 if myTestOpts.alone:
615 aloneTests.append(thisTest)
616 else:
617 parallelTests.append(thisTest)
618 allTestNames.add(name)
619
620 if config.use_threads:
621 def test_common_thread(name, opts, func, args):
622 t.lock.acquire()
623 try:
624 test_common_work(name,opts,func,args)
625 finally:
626 t.lock.release()
627 t.thread_pool.acquire()
628 t.running_threads = t.running_threads - 1
629 t.thread_pool.notify()
630 t.thread_pool.release()
631
632 def get_package_cache_timestamp():
633 if config.package_conf_cache_file == '':
634 return 0.0
635 else:
636 try:
637 return os.stat(config.package_conf_cache_file).st_mtime
638 except:
639 return 0.0
640
641
642 def test_common_work (name, opts, func, args):
643 try:
644 t.total_tests = t.total_tests+1
645 setLocalTestOpts(opts)
646
647 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
648
649 # All the ways we might run this test
650 if func == compile or func == multimod_compile:
651 all_ways = config.compile_ways
652 elif func == compile_and_run or func == multimod_compile_and_run:
653 all_ways = config.run_ways
654 elif func == ghci_script:
655 if 'ghci' in config.run_ways:
656 all_ways = ['ghci']
657 else:
658 all_ways = []
659 else:
660 all_ways = ['normal']
661
662 # A test itself can request extra ways by setting opts.extra_ways
663 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
664
665 t.total_test_cases = t.total_test_cases + len(all_ways)
666
667 ok_way = lambda way: \
668 not getTestOpts().skip \
669 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
670 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
671 and (not (config.skip_perf_tests and isStatsTest())) \
672 and way not in getTestOpts().omit_ways
673
674 # Which ways we are asked to skip
675 do_ways = list(filter (ok_way,all_ways))
676
677 # In fast mode, we skip all but one way
678 if config.fast and len(do_ways) > 0:
679 do_ways = [do_ways[0]]
680
681 if not config.clean_only:
682 # Run the required tests...
683 for way in do_ways:
684 if stopping():
685 break
686 do_test (name, way, func, args)
687
688 for way in all_ways:
689 if way not in do_ways:
690 skiptest (name,way)
691
692 if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []):
693 pretest_cleanup(name)
694 clean([name + suff for suff in [
695 '', '.exe', '.exe.manifest', '.genscript',
696 '.stderr.normalised', '.stdout.normalised',
697 '.run.stderr.normalised', '.run.stdout.normalised',
698 '.comp.stderr.normalised', '.comp.stdout.normalised',
699 '.interp.stderr.normalised', '.interp.stdout.normalised',
700 '.stats', '.comp.stats',
701 '.hi', '.o', '.prof', '.exe.prof', '.hc',
702 '_stub.h', '_stub.c', '_stub.o',
703 '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']])
704
705 if func == multi_compile or func == multi_compile_fail:
706 extra_mods = args[1]
707 clean([replace_suffix(fx[0],'o') for fx in extra_mods])
708 clean([replace_suffix(fx[0], 'hi') for fx in extra_mods])
709
710
711 clean(getTestOpts().clean_files)
712
713 if getTestOpts().outputdir != None:
714 odir = in_testdir(getTestOpts().outputdir)
715 try:
716 shutil.rmtree(odir)
717 except:
718 pass
719
720 try:
721 shutil.rmtree(in_testdir('.hpc.' + name))
722 except:
723 pass
724
725 try:
726 cleanCmd = getTestOpts().clean_cmd
727 if cleanCmd != None:
728 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + cleanCmd)
729 if result != 0:
730 framework_fail(name, 'cleaning', 'clean-command failed: ' + str(result))
731 except:
732 framework_fail(name, 'cleaning', 'clean-command exception')
733
734 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
735
736 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
737 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
738
739 try:
740 for f in files_written[name]:
741 if os.path.exists(f):
742 try:
743 if not f in files_written_not_removed[name]:
744 files_written_not_removed[name].append(f)
745 except:
746 files_written_not_removed[name] = [f]
747 except:
748 pass
749 except Exception as e:
750 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
751
752 def clean(strs):
753 for str in strs:
754 if (str.endswith('.package.conf') or
755 str.startswith('package.conf.') and not str.endswith('/*')):
756 # Package confs are directories now.
757 str += '/*'
758
759 for name in glob.glob(in_testdir(str)):
760 clean_full_path(name)
761
762 def clean_full_path(name):
763 try:
764 # Remove files...
765 os.remove(name)
766 except OSError as e1:
767 try:
768 # ... and empty directories
769 os.rmdir(name)
770 except OSError as e2:
771 # We don't want to fail here, but we do want to know
772 # what went wrong, so print out the exceptions.
773 # ENOENT isn't a problem, though, as we clean files
774 # that don't necessarily exist.
775 if e1.errno != errno.ENOENT:
776 print(e1)
777 if e2.errno != errno.ENOENT:
778 print(e2)
779
780 def do_test(name, way, func, args):
781 full_name = name + '(' + way + ')'
782
783 try:
784 if_verbose(2, "=====> %s %d of %d %s " % \
785 (full_name, t.total_tests, len(allTestNames), \
786 [t.n_unexpected_passes, \
787 t.n_unexpected_failures, \
788 t.n_framework_failures]))
789
790 if config.use_threads:
791 t.lock.release()
792
793 try:
794 preCmd = getTestOpts().pre_cmd
795 if preCmd != None:
796 result = runCmdFor(name, 'cd ' + getTestOpts().testdir + ' && ' + preCmd)
797 if result != 0:
798 framework_fail(name, way, 'pre-command failed: ' + str(result))
799 except:
800 framework_fail(name, way, 'pre-command exception')
801
802 try:
803 result = func(*[name,way] + args)
804 finally:
805 if config.use_threads:
806 t.lock.acquire()
807
808 if getTestOpts().expect != 'pass' and \
809 getTestOpts().expect != 'fail' and \
810 getTestOpts().expect != 'missing-lib':
811 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
812
813 try:
814 passFail = result['passFail']
815 except:
816 passFail = 'No passFail found'
817
818 if passFail == 'pass':
819 if _expect_pass(way):
820 t.n_expected_passes = t.n_expected_passes + 1
821 if name in t.expected_passes:
822 t.expected_passes[name].append(way)
823 else:
824 t.expected_passes[name] = [way]
825 else:
826 if_verbose(1, '*** unexpected pass for %s' % full_name)
827 t.n_unexpected_passes = t.n_unexpected_passes + 1
828 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
829 elif passFail == 'fail':
830 if _expect_pass(way):
831 reason = result['reason']
832 tag = result.get('tag')
833 if tag == 'stat':
834 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
835 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
836 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
837 else:
838 if_verbose(1, '*** unexpected failure for %s' % full_name)
839 t.n_unexpected_failures = t.n_unexpected_failures + 1
840 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
841 else:
842 if getTestOpts().expect == 'missing-lib':
843 t.n_missing_libs = t.n_missing_libs + 1
844 if name in t.missing_libs:
845 t.missing_libs[name].append(way)
846 else:
847 t.missing_libs[name] = [way]
848 else:
849 t.n_expected_failures = t.n_expected_failures + 1
850 if name in t.expected_failures:
851 t.expected_failures[name].append(way)
852 else:
853 t.expected_failures[name] = [way]
854 else:
855 framework_fail(name, way, 'bad result ' + passFail)
856 except KeyboardInterrupt:
857 stopNow()
858 except:
859 framework_fail(name, way, 'do_test exception')
860 traceback.print_exc()
861
862 def addPassingTestInfo (testInfos, directory, name, way):
863 directory = re.sub('^\\.[/\\\\]', '', directory)
864
865 if not directory in testInfos:
866 testInfos[directory] = {}
867
868 if not name in testInfos[directory]:
869 testInfos[directory][name] = []
870
871 testInfos[directory][name].append(way)
872
873 def addFailingTestInfo (testInfos, directory, name, reason, way):
874 directory = re.sub('^\\.[/\\\\]', '', directory)
875
876 if not directory in testInfos:
877 testInfos[directory] = {}
878
879 if not name in testInfos[directory]:
880 testInfos[directory][name] = {}
881
882 if not reason in testInfos[directory][name]:
883 testInfos[directory][name][reason] = []
884
885 testInfos[directory][name][reason].append(way)
886
887 def skiptest (name, way):
888 # print 'Skipping test \"', name, '\"'
889 t.n_tests_skipped = t.n_tests_skipped + 1
890 if name in t.tests_skipped:
891 t.tests_skipped[name].append(way)
892 else:
893 t.tests_skipped[name] = [way]
894
895 def framework_fail( name, way, reason ):
896 full_name = name + '(' + way + ')'
897 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
898 t.n_framework_failures = t.n_framework_failures + 1
899 if name in t.framework_failures:
900 t.framework_failures[name].append(way)
901 else:
902 t.framework_failures[name] = [way]
903
904 def badResult(result):
905 try:
906 if result['passFail'] == 'pass':
907 return False
908 return True
909 except:
910 return True
911
912 def passed():
913 return {'passFail': 'pass'}
914
915 def failBecause(reason, tag=None):
916 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
917
918 # -----------------------------------------------------------------------------
919 # Generic command tests
920
921 # A generic command test is expected to run and exit successfully.
922 #
923 # The expected exit code can be changed via exit_code() as normal, and
924 # the expected stdout/stderr are stored in <testname>.stdout and
925 # <testname>.stderr. The output of the command can be ignored
926 # altogether by using run_command_ignore_output instead of
927 # run_command.
928
929 def run_command( name, way, cmd ):
930 return simple_run( name, '', cmd, '' )
931
932 # -----------------------------------------------------------------------------
933 # GHCi tests
934
935 def ghci_script_without_flag(flag):
936 def apply(name, way, script):
937 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
938 return ghci_script_override_default_flags(overrides)(name, way, script)
939
940 return apply
941
942 def ghci_script_override_default_flags(overrides):
943 def apply(name, way, script):
944 return ghci_script(name, way, script, overrides)
945
946 return apply
947
948 def ghci_script( name, way, script, override_flags = None ):
949 # filter out -fforce-recomp from compiler_always_flags, because we're
950 # actually testing the recompilation behaviour in the GHCi tests.
951 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
952
953 way_flags = ' '.join(config.way_flags(name)['ghci'])
954
955 # We pass HC and HC_OPTS as environment variables, so that the
956 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
957 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
958 ).format(flags=flags, way_flags=way_flags)
959
960 getTestOpts().stdin = script
961 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
962
963 # -----------------------------------------------------------------------------
964 # Compile-only tests
965
966 def compile_override_default_flags(overrides):
967 def apply(name, way, extra_opts):
968 return do_compile(name, way, 0, '', [], extra_opts, overrides)
969
970 return apply
971
972 def compile_fail_override_default_flags(overrides):
973 def apply(name, way, extra_opts):
974 return do_compile(name, way, 1, '', [], extra_opts, overrides)
975
976 return apply
977
978 def compile_without_flag(flag):
979 def apply(name, way, extra_opts):
980 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
981 return compile_override_default_flags(overrides)(name, way, extra_opts)
982
983 return apply
984
985 def compile_fail_without_flag(flag):
986 def apply(name, way, extra_opts):
987 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
988 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
989
990 return apply
991
992 def compile( name, way, extra_hc_opts ):
993 return do_compile( name, way, 0, '', [], extra_hc_opts )
994
995 def compile_fail( name, way, extra_hc_opts ):
996 return do_compile( name, way, 1, '', [], extra_hc_opts )
997
998 def multimod_compile( name, way, top_mod, extra_hc_opts ):
999 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1000
1001 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1002 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1003
1004 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1005 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1006
1007 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1008 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1009
1010 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1011 # print 'Compile only, extra args = ', extra_hc_opts
1012 pretest_cleanup(name)
1013
1014 result = extras_build( way, extra_mods, extra_hc_opts )
1015 if badResult(result):
1016 return result
1017 extra_hc_opts = result['hc_opts']
1018
1019 force = 0
1020 if extra_mods:
1021 force = 1
1022 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1023
1024 if badResult(result):
1025 return result
1026
1027 # the actual stderr should always match the expected, regardless
1028 # of whether we expected the compilation to fail or not (successful
1029 # compilations may generate warnings).
1030
1031 (_, expected_stderr_file) = find_expected_file(name, 'stderr')
1032 actual_stderr_file = add_suffix(name, 'comp.stderr')
1033
1034 if not compare_outputs(way, 'stderr',
1035 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1036 normalise_errmsg),
1037 expected_stderr_file, actual_stderr_file,
1038 whitespace_normaliser=normalise_whitespace):
1039 return failBecause('stderr mismatch')
1040
1041 # no problems found, this test passed
1042 return passed()
1043
1044 def compile_cmp_asm( name, way, extra_hc_opts ):
1045 print('Compile only, extra args = ', extra_hc_opts)
1046 pretest_cleanup(name)
1047 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1048
1049 if badResult(result):
1050 return result
1051
1052 # the actual stderr should always match the expected, regardless
1053 # of whether we expected the compilation to fail or not (successful
1054 # compilations may generate warnings).
1055
1056 (_, expected_asm_file) = find_expected_file(name, 'asm')
1057 actual_asm_file = add_suffix(name, 's')
1058
1059 if not compare_outputs(way, 'asm',
1060 join_normalisers(normalise_errmsg, normalise_asm),
1061 expected_asm_file, actual_asm_file):
1062 return failBecause('asm mismatch')
1063
1064 # no problems found, this test passed
1065 return passed()
1066
1067 # -----------------------------------------------------------------------------
1068 # Compile-and-run tests
1069
1070 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1071 # print 'Compile and run, extra args = ', extra_hc_opts
1072 pretest_cleanup(name)
1073
1074 result = extras_build( way, extra_mods, extra_hc_opts )
1075 if badResult(result):
1076 return result
1077 extra_hc_opts = result['hc_opts']
1078
1079 if way == 'ghci': # interpreted...
1080 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1081 else: # compiled...
1082 force = 0
1083 if extra_mods:
1084 force = 1
1085
1086 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1087 if badResult(result):
1088 return result
1089
1090 cmd = './' + name;
1091
1092 # we don't check the compiler's stderr for a compile-and-run test
1093 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1094
1095 def compile_and_run( name, way, extra_hc_opts ):
1096 return compile_and_run__( name, way, '', [], extra_hc_opts)
1097
1098 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1099 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1100
1101 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1102 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1103
1104 def stats( name, way, stats_file ):
1105 opts = getTestOpts()
1106 return checkStats(name, way, stats_file, opts.stats_range_fields)
1107
1108 # -----------------------------------------------------------------------------
1109 # Check -t stats info
1110
1111 def checkStats(name, way, stats_file, range_fields):
1112 full_name = name + '(' + way + ')'
1113
1114 result = passed()
1115 if len(range_fields) > 0:
1116 try:
1117 f = open(in_testdir(stats_file))
1118 except IOError as e:
1119 return failBecause(str(e))
1120 contents = f.read()
1121 f.close()
1122
1123 for (field, (expected, dev)) in range_fields.items():
1124 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1125 if m == None:
1126 print('Failed to find field: ', field)
1127 result = failBecause('no such stats field')
1128 val = int(m.group(1))
1129
1130 lowerBound = trunc( expected * ((100 - float(dev))/100))
1131 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1132
1133 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1134
1135 if val < lowerBound:
1136 print(field, 'value is too low:')
1137 print('(If this is because you have improved GHC, please')
1138 print('update the test so that GHC doesn\'t regress again)')
1139 result = failBecause('stat too good', tag='stat')
1140 if val > upperBound:
1141 print(field, 'value is too high:')
1142 result = failBecause('stat not good enough', tag='stat')
1143
1144 if val < lowerBound or val > upperBound or config.verbose >= 4:
1145 valStr = str(val)
1146 valLen = len(valStr)
1147 expectedStr = str(expected)
1148 expectedLen = len(expectedStr)
1149 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1150
1151 def display(descr, val, extra):
1152 print(descr, str(val).rjust(length), extra)
1153
1154 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1155 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1156 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1157 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1158 if val != expected:
1159 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1160
1161 return result
1162
1163 # -----------------------------------------------------------------------------
1164 # Build a single-module program
1165
1166 def extras_build( way, extra_mods, extra_hc_opts ):
1167 for modopts in extra_mods:
1168 mod, opts = modopts
1169 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1170 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1171 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1172 if badResult(result):
1173 return result
1174
1175 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1176
1177
1178 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1179 opts = getTestOpts()
1180 errname = add_suffix(name, 'comp.stderr')
1181 rm_no_fail( qualify(errname, '') )
1182
1183 if top_mod != '':
1184 srcname = top_mod
1185 rm_no_fail( qualify(name, '') )
1186 base, suf = os.path.splitext(top_mod)
1187 rm_no_fail( qualify(base, '') )
1188 rm_no_fail( qualify(base, 'exe') )
1189 elif addsuf:
1190 srcname = add_hs_lhs_suffix(name)
1191 rm_no_fail( qualify(name, '') )
1192 else:
1193 srcname = name
1194 rm_no_fail( qualify(name, 'o') )
1195
1196 rm_no_fail( qualify(replace_suffix(srcname, "o"), '') )
1197
1198 to_do = ''
1199 if top_mod != '':
1200 to_do = '--make '
1201 if link:
1202 to_do = to_do + '-o ' + name
1203 elif link:
1204 to_do = '-o ' + name
1205 elif opts.compile_to_hc:
1206 to_do = '-C'
1207 else:
1208 to_do = '-c' # just compile
1209
1210 stats_file = name + '.comp.stats'
1211 if len(opts.compiler_stats_range_fields) > 0:
1212 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1213
1214 # Required by GHC 7.3+, harmless for earlier versions:
1215 if (getTestOpts().c_src or
1216 getTestOpts().objc_src or
1217 getTestOpts().objcpp_src or
1218 getTestOpts().cmm_src):
1219 extra_hc_opts += ' -no-hs-main '
1220
1221 if getTestOpts().compile_cmd_prefix == '':
1222 cmd_prefix = ''
1223 else:
1224 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1225
1226 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1227 config.way_flags(name)[way])
1228
1229 cmd = ('cd {opts.testdir} && {cmd_prefix} '
1230 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1231 '> {errname} 2>&1'
1232 ).format(**locals())
1233
1234 result = runCmdFor(name, cmd, timeout_multiplier=opts.compile_timeout_multiplier)
1235
1236 if result != 0 and not should_fail:
1237 if config.verbose >= 1 and _expect_pass(way):
1238 print('Compile failed (status ' + repr(result) + ') errors were:')
1239 actual_stderr_path = in_testdir(name, 'comp.stderr')
1240 if_verbose_dump(1, actual_stderr_path)
1241
1242 # ToDo: if the sub-shell was killed by ^C, then exit
1243
1244 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1245
1246 if badResult(statsResult):
1247 return statsResult
1248
1249 if should_fail:
1250 if result == 0:
1251 return failBecause('exit code 0')
1252 else:
1253 if result != 0:
1254 return failBecause('exit code non-0')
1255
1256 return passed()
1257
1258 # -----------------------------------------------------------------------------
1259 # Run a program and check its output
1260 #
1261 # If testname.stdin exists, route input from that, else
1262 # from /dev/null. Route output to testname.run.stdout and
1263 # testname.run.stderr. Returns the exit code of the run.
1264
1265 def simple_run(name, way, prog, extra_run_opts):
1266 opts = getTestOpts()
1267
1268 # figure out what to use for stdin
1269 if opts.stdin != '':
1270 use_stdin = opts.stdin
1271 else:
1272 stdin_file = add_suffix(name, 'stdin')
1273 if os.path.exists(in_testdir(stdin_file)):
1274 use_stdin = stdin_file
1275 else:
1276 use_stdin = '/dev/null'
1277
1278 run_stdout = add_suffix(name,'run.stdout')
1279 run_stderr = add_suffix(name,'run.stderr')
1280
1281 rm_no_fail(qualify(name,'run.stdout'))
1282 rm_no_fail(qualify(name,'run.stderr'))
1283 rm_no_fail(qualify(name, 'hp'))
1284 rm_no_fail(qualify(name,'ps'))
1285 rm_no_fail(qualify(name, 'prof'))
1286
1287 my_rts_flags = rts_flags(way)
1288
1289 stats_file = name + '.stats'
1290 if len(opts.stats_range_fields) > 0:
1291 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1292 else:
1293 stats_args = ''
1294
1295 if opts.no_stdin:
1296 stdin_comes_from = ''
1297 else:
1298 stdin_comes_from = ' <' + use_stdin
1299
1300 if opts.combined_output:
1301 redirection = ' > {0} 2>&1'.format(run_stdout)
1302 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1303 else:
1304 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1305 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1306
1307 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1308 cmd = prog + stats_args + ' ' \
1309 + my_rts_flags + ' ' \
1310 + extra_run_opts + ' ' \
1311 + stdin_comes_from \
1312 + redirection
1313
1314 if opts.cmd_wrapper != None:
1315 cmd = opts.cmd_wrapper(cmd) + redirection_append
1316
1317 cmd = 'cd ' + opts.testdir + ' && ' + cmd
1318
1319 # run the command
1320 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1321
1322 exit_code = result >> 8
1323 signal = result & 0xff
1324
1325 # check the exit code
1326 if exit_code != opts.exit_code:
1327 if config.verbose >= 1 and _expect_pass(way):
1328 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1329 dump_stdout(name)
1330 dump_stderr(name)
1331 return failBecause('bad exit code')
1332
1333 check_hp = my_rts_flags.find("-h") != -1
1334 check_prof = my_rts_flags.find("-p") != -1
1335
1336 if not opts.ignore_output:
1337 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1338 bad_stdout = not check_stdout_ok(name, way)
1339 if bad_stderr:
1340 return failBecause('bad stderr')
1341 if bad_stdout:
1342 return failBecause('bad stdout')
1343 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1344 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1345 return failBecause('bad heap profile')
1346 if check_prof and not check_prof_ok(name, way):
1347 return failBecause('bad profile')
1348
1349 return checkStats(name, way, stats_file, opts.stats_range_fields)
1350
1351 def rts_flags(way):
1352 if (way == ''):
1353 return ''
1354 else:
1355 args = config.way_rts_flags[way]
1356
1357 if args == []:
1358 return ''
1359 else:
1360 return '+RTS ' + ' '.join(args) + ' -RTS'
1361
1362 # -----------------------------------------------------------------------------
1363 # Run a program in the interpreter and check its output
1364
1365 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1366 opts = getTestOpts()
1367
1368 outname = add_suffix(name, 'interp.stdout')
1369 errname = add_suffix(name, 'interp.stderr')
1370 rm_no_fail(outname)
1371 rm_no_fail(errname)
1372 rm_no_fail(name)
1373
1374 if (top_mod == ''):
1375 srcname = add_hs_lhs_suffix(name)
1376 else:
1377 srcname = top_mod
1378
1379 scriptname = add_suffix(name, 'genscript')
1380 qscriptname = in_testdir(scriptname)
1381 rm_no_fail(qscriptname)
1382
1383 delimiter = '===== program output begins here\n'
1384
1385 script = open(qscriptname, 'w')
1386 if not compile_only:
1387 # set the prog name and command-line args to match the compiled
1388 # environment.
1389 script.write(':set prog ' + name + '\n')
1390 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1391 # Add marker lines to the stdout and stderr output files, so we
1392 # can separate GHCi's output from the program's.
1393 script.write(':! echo ' + delimiter)
1394 script.write(':! echo 1>&2 ' + delimiter)
1395 # Set stdout to be line-buffered to match the compiled environment.
1396 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1397 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1398 # in the event of an exception as for the compiled program.
1399 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1400 script.close()
1401
1402 # figure out what to use for stdin
1403 if getTestOpts().stdin != '':
1404 stdin_file = in_testdir(getTestOpts().stdin)
1405 else:
1406 stdin_file = qualify(name, 'stdin')
1407
1408 if os.path.exists(stdin_file):
1409 stdin = open(stdin_file, 'r')
1410 os.system('cat ' + stdin_file + ' >>' + qscriptname)
1411
1412 script.close()
1413
1414 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1415 config.way_flags(name)[way])
1416
1417 if getTestOpts().combined_output:
1418 redirection = ' > {0} 2>&1'.format(outname)
1419 redirection_append = ' >> {0} 2>&1'.format(outname)
1420 else:
1421 redirection = ' > {0} 2> {1}'.format(outname, errname)
1422 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1423
1424 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1425 '< {scriptname} {redirection}'
1426 ).format(**locals())
1427
1428 if getTestOpts().cmd_wrapper != None:
1429 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1430
1431 cmd = 'cd ' + getTestOpts().testdir + " && " + cmd
1432
1433 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1434
1435 exit_code = result >> 8
1436 signal = result & 0xff
1437
1438 # split the stdout into compilation/program output
1439 split_file(in_testdir(outname), delimiter,
1440 in_testdir(name, 'comp.stdout'),
1441 in_testdir(name, 'run.stdout'))
1442 split_file(in_testdir(errname), delimiter,
1443 in_testdir(name, 'comp.stderr'),
1444 in_testdir(name, 'run.stderr'))
1445
1446 # check the exit code
1447 if exit_code != getTestOpts().exit_code:
1448 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1449 dump_stdout(name)
1450 dump_stderr(name)
1451 return failBecause('bad exit code')
1452
1453 # ToDo: if the sub-shell was killed by ^C, then exit
1454
1455 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1456 check_stdout_ok(name, way)):
1457 return passed()
1458 else:
1459 return failBecause('bad stdout or stderr')
1460
1461
1462 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1463 infile = open(in_fn)
1464 out1 = open(out1_fn, 'w')
1465 out2 = open(out2_fn, 'w')
1466
1467 line = infile.readline()
1468 line = re.sub('\r', '', line) # ignore Windows EOL
1469 while (re.sub('^\s*','',line) != delimiter and line != ''):
1470 out1.write(line)
1471 line = infile.readline()
1472 line = re.sub('\r', '', line)
1473 out1.close()
1474
1475 line = infile.readline()
1476 while (line != ''):
1477 out2.write(line)
1478 line = infile.readline()
1479 out2.close()
1480
1481 # -----------------------------------------------------------------------------
1482 # Utils
1483 def get_compiler_flags(override_flags, noforce):
1484 opts = getTestOpts()
1485
1486 if override_flags is not None:
1487 flags = copy.copy(override_flags)
1488 else:
1489 flags = copy.copy(opts.compiler_always_flags)
1490
1491 if noforce:
1492 flags = [f for f in flags if f != '-fforce-recomp']
1493
1494 flags.append(opts.extra_hc_opts)
1495
1496 if opts.outputdir != None:
1497 flags.extend(["-outputdir", opts.outputdir])
1498
1499 return flags
1500
1501 def check_stdout_ok(name, way):
1502 actual_stdout_file = add_suffix(name, 'run.stdout')
1503 (platform_specific, expected_stdout_file) = find_expected_file(name, 'stdout')
1504
1505 def norm(str):
1506 if platform_specific:
1507 return str
1508 else:
1509 return normalise_output(str)
1510
1511 extra_norm = join_normalisers(norm, getTestOpts().extra_normaliser)
1512
1513 check_stdout = getTestOpts().check_stdout
1514 if check_stdout:
1515 actual_stdout_path = in_testdir(actual_stdout_file)
1516 return check_stdout(actual_stdout_path, extra_norm)
1517
1518 return compare_outputs(way, 'stdout', extra_norm,
1519 expected_stdout_file, actual_stdout_file)
1520
1521 def dump_stdout( name ):
1522 print('Stdout:')
1523 print(read_no_crs(in_testdir(name, 'run.stdout')))
1524
1525 def check_stderr_ok(name, way):
1526 actual_stderr_file = add_suffix(name, 'run.stderr')
1527 (platform_specific, expected_stderr_file) = find_expected_file(name, 'stderr')
1528
1529 def norm(str):
1530 if platform_specific:
1531 return str
1532 else:
1533 return normalise_errmsg(str)
1534
1535 return compare_outputs(way, 'stderr',
1536 join_normalisers(norm, getTestOpts().extra_errmsg_normaliser), \
1537 expected_stderr_file, actual_stderr_file)
1538
1539 def dump_stderr( name ):
1540 print("Stderr:")
1541 print(read_no_crs(in_testdir(name, 'run.stderr')))
1542
1543 def read_no_crs(file):
1544 str = ''
1545 try:
1546 h = open(file)
1547 str = h.read()
1548 h.close
1549 except:
1550 # On Windows, if the program fails very early, it seems the
1551 # files stdout/stderr are redirected to may not get created
1552 pass
1553 return re.sub('\r', '', str)
1554
1555 def write_file(file, str):
1556 h = open(file, 'w')
1557 h.write(str)
1558 h.close
1559
1560 def check_hp_ok(name):
1561
1562 # do not qualify for hp2ps because we should be in the right directory
1563 hp2psCmd = "cd " + getTestOpts().testdir + " && {hp2ps} " + name
1564
1565 hp2psResult = runCmdExitCode(hp2psCmd)
1566
1567 actual_ps_path = in_testdir(name, 'ps')
1568
1569 if(hp2psResult == 0):
1570 if (os.path.exists(actual_ps_path)):
1571 if gs_working:
1572 gsResult = runCmdExitCode(genGSCmd(actual_ps_path))
1573 if (gsResult == 0):
1574 return (True)
1575 else:
1576 print("hp2ps output for " + name + "is not valid PostScript")
1577 else: return (True) # assume postscript is valid without ghostscript
1578 else:
1579 print("hp2ps did not generate PostScript for " + name)
1580 return (False)
1581 else:
1582 print("hp2ps error when processing heap profile for " + name)
1583 return(False)
1584
1585 def check_prof_ok(name, way):
1586 actual_prof_file = add_suffix(name, 'prof')
1587 actual_prof_path = in_testdir(actual_prof_file)
1588
1589 if not os.path.exists(actual_prof_path):
1590 print(actual_prof_path + " does not exist")
1591 return(False)
1592
1593 if os.path.getsize(actual_prof_path) == 0:
1594 print(actual_prof_path + " is empty")
1595 return(False)
1596
1597 (_, expected_prof_file) = find_expected_file(name, 'prof.sample')
1598 expected_prof_path = in_testdir(expected_prof_file)
1599
1600 # sample prof file is not required
1601 if not os.path.exists(expected_prof_path):
1602 return True
1603 else:
1604 return compare_outputs(way, 'prof', normalise_prof,
1605 expected_prof_file, actual_prof_file,
1606 whitespace_normaliser=normalise_whitespace)
1607
1608 # Compare expected output to actual output, and optionally accept the
1609 # new output. Returns true if output matched or was accepted, false
1610 # otherwise. See Note [Output comparison] for the meaning of the
1611 # normaliser and whitespace_normaliser parameters.
1612 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1613 whitespace_normaliser=lambda x:x):
1614
1615 expected_path = in_testdir(expected_file)
1616 actual_path = in_testdir(actual_file)
1617
1618 if os.path.exists(expected_path):
1619 expected_str = normaliser(read_no_crs(expected_path))
1620 expected_normalised_file = add_suffix(expected_file, 'normalised')
1621 expected_normalised_path = in_testdir(expected_normalised_file)
1622 else:
1623 expected_str = ''
1624 expected_normalised_path = '/dev/null'
1625
1626 actual_raw = read_no_crs(actual_path)
1627 actual_str = normaliser(actual_raw)
1628
1629 # See Note [Output comparison].
1630 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1631 return 1
1632 else:
1633 if config.verbose >= 1 and _expect_pass(way):
1634 print('Actual ' + kind + ' output differs from expected:')
1635
1636 if expected_normalised_path != '/dev/null':
1637 write_file(expected_normalised_path, expected_str)
1638
1639 actual_normalised_path = add_suffix(actual_path, 'normalised')
1640 write_file(actual_normalised_path, actual_str)
1641
1642 if config.verbose >= 1 and _expect_pass(way):
1643 # See Note [Output comparison].
1644 r = os.system('diff -uw {} {}'.format(expected_normalised_path,
1645 actual_normalised_path))
1646
1647 # If for some reason there were no non-whitespace differences,
1648 # then do a full diff
1649 if r == 0:
1650 r = os.system('diff -u {} {}'.format(expected_normalised_path,
1651 actual_normalised_path))
1652
1653 if config.accept and (getTestOpts().expect == 'fail' or
1654 way in getTestOpts().expect_fail_for):
1655 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1656 return 0
1657 elif config.accept:
1658 if_verbose(1, 'Accepting new output.')
1659 write_file(expected_path, actual_raw)
1660 return 1
1661 else:
1662 return 0
1663
1664 # Note [Output comparison]
1665 #
1666 # We do two types of output comparison:
1667 #
1668 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1669 # optional `whitespace_normaliser` to the expected and the actual
1670 # output, before comparing the two.
1671 #
1672 # 2. To show as a diff to the user when the test indeed failed. We apply
1673 # the same `normaliser` function to the outputs, to make the diff as
1674 # small as possible (only showing the actual problem). But we don't
1675 # apply the `whitespace_normaliser` here, because it might completely
1676 # squash all whitespace, making the diff unreadable. Instead we rely
1677 # on the `diff` program to ignore whitespace changes as much as
1678 # possible (#10152).
1679
1680 def normalise_whitespace( str ):
1681 # Merge contiguous whitespace characters into a single space.
1682 str = re.sub('[ \t\n]+', ' ', str)
1683 return str.strip()
1684
1685 def normalise_errmsg( str ):
1686 # remove " error:" and lower-case " Warning:" to make patch for
1687 # trac issue #10021 smaller
1688 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1689 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1690
1691 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1692 # the colon is there because it appears in error messages; this
1693 # hacky solution is used in place of more sophisticated filename
1694 # mangling
1695 str = re.sub('([^\\s])\\.exe', '\\1', str)
1696 # normalise slashes, minimise Windows/Unix filename differences
1697 str = re.sub('\\\\', '/', str)
1698 # The inplace ghc's are called ghc-stage[123] to avoid filename
1699 # collisions, so we need to normalise that to just "ghc"
1700 str = re.sub('ghc-stage[123]', 'ghc', str)
1701 # Error messages simetimes contain integer implementation package
1702 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1703 return str
1704
1705 # normalise a .prof file, so that we can reasonably compare it against
1706 # a sample. This doesn't compare any of the actual profiling data,
1707 # only the shape of the profile and the number of entries.
1708 def normalise_prof (str):
1709 # strip everything up to the line beginning "COST CENTRE"
1710 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1711
1712 # strip results for CAFs, these tend to change unpredictably
1713 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1714
1715 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1716 # sometimes under MAIN.
1717 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1718
1719 # We have somthing like this:
1720
1721 # MAIN MAIN 101 0 0.0 0.0 100.0 100.0
1722 # k Main 204 1 0.0 0.0 0.0 0.0
1723 # foo Main 205 1 0.0 0.0 0.0 0.0
1724 # foo.bar Main 207 1 0.0 0.0 0.0 0.0
1725
1726 # then we remove all the specific profiling data, leaving only the
1727 # cost centre name, module, and entries, to end up with this:
1728
1729 # MAIN MAIN 0
1730 # k Main 1
1731 # foo Main 1
1732 # foo.bar Main 1
1733
1734 str = re.sub('\n([ \t]*[^ \t]+)([ \t]+[^ \t]+)([ \t]+\\d+)([ \t]+\\d+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)[ \t]+([\\d\\.]+)','\n\\1 \\2 \\4',str)
1735 return str
1736
1737 def normalise_slashes_( str ):
1738 str = re.sub('\\\\', '/', str)
1739 return str
1740
1741 def normalise_exe_( str ):
1742 str = re.sub('\.exe', '', str)
1743 return str
1744
1745 def normalise_output( str ):
1746 # remove " error:" and lower-case " Warning:" to make patch for
1747 # trac issue #10021 smaller
1748 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1749 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1750 # Remove a .exe extension (for Windows)
1751 # This can occur in error messages generated by the program.
1752 str = re.sub('([^\\s])\\.exe', '\\1', str)
1753 return str
1754
1755 def normalise_asm( str ):
1756 lines = str.split('\n')
1757 # Only keep instructions and labels not starting with a dot.
1758 metadata = re.compile('^[ \t]*\\..*$')
1759 out = []
1760 for line in lines:
1761 # Drop metadata directives (e.g. ".type")
1762 if not metadata.match(line):
1763 line = re.sub('@plt', '', line)
1764 instr = line.lstrip().split()
1765 # Drop empty lines.
1766 if not instr:
1767 continue
1768 # Drop operands, except for call instructions.
1769 elif instr[0] == 'call':
1770 out.append(instr[0] + ' ' + instr[1])
1771 else:
1772 out.append(instr[0])
1773 out = '\n'.join(out)
1774 return out
1775
1776 def if_verbose( n, s ):
1777 if config.verbose >= n:
1778 print(s)
1779
1780 def if_verbose_dump( n, f ):
1781 if config.verbose >= n:
1782 try:
1783 print(open(f).read())
1784 except:
1785 print('')
1786
1787 def rawSystem(cmd_and_args):
1788 # We prefer subprocess.call to os.spawnv as the latter
1789 # seems to send its arguments through a shell or something
1790 # with the Windows (non-cygwin) python. An argument "a b c"
1791 # turns into three arguments ["a", "b", "c"].
1792
1793 cmd = cmd_and_args[0]
1794 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1795
1796 # Note that this doesn't handle the timeout itself; it is just used for
1797 # commands that have timeout handling built-in.
1798 def rawSystemWithTimeout(cmd_and_args):
1799 r = rawSystem(cmd_and_args)
1800 if r == 98:
1801 # The python timeout program uses 98 to signal that ^C was pressed
1802 stopNow()
1803 return r
1804
1805 # cmd is a complex command in Bourne-shell syntax
1806 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1807 # Hence it must ultimately be run by a Bourne shell
1808 #
1809 # Mostly it invokes the command wrapped in 'timeout' thus
1810 # timeout 300 'cd . && ...blah blah'
1811 # so it's timeout's job to invoke the Bourne shell
1812 #
1813 # But watch out for the case when there is no timeout program!
1814 # Then, when using the native Python, os.system will invoke the cmd shell
1815
1816 def runCmd( cmd ):
1817 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1818 cmd = cmd.format(**config.__dict__)
1819
1820 if_verbose( 3, cmd )
1821 r = 0
1822 if config.os == 'mingw32':
1823 # On MinGW, we will always have timeout
1824 assert config.timeout_prog!=''
1825
1826 if config.timeout_prog != '':
1827 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1828 else:
1829 r = os.system(cmd)
1830 return r << 8
1831
1832 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1833 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1834 cmd = cmd.format(**config.__dict__)
1835
1836 if_verbose( 3, cmd )
1837 r = 0
1838 if config.os == 'mingw32':
1839 # On MinGW, we will always have timeout
1840 assert config.timeout_prog!=''
1841 timeout = int(ceil(config.timeout * timeout_multiplier))
1842
1843 if config.timeout_prog != '':
1844 if config.check_files_written:
1845 fn = name + ".strace"
1846 r = rawSystemWithTimeout(
1847 ["strace", "-o", fn, "-fF",
1848 "-e", "creat,open,chdir,clone,vfork",
1849 strip_quotes(config.timeout_prog), str(timeout), cmd])
1850 addTestFilesWritten(name, fn)
1851 rm_no_fail(fn)
1852 else:
1853 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1854 else:
1855 r = os.system(cmd)
1856 return r << 8
1857
1858 def runCmdExitCode( cmd ):
1859 return (runCmd(cmd) >> 8);
1860
1861
1862 # -----------------------------------------------------------------------------
1863 # checking for files being written to by multiple tests
1864
1865 re_strace_call_end = '(\) += ([0-9]+|-1 E.*)| <unfinished ...>)$'
1866 re_strace_unavailable_end ='\) += \? <unavailable>$'
1867
1868 re_strace_unavailable_line = re.compile('^' + re_strace_unavailable_end)
1869 re_strace_unavailable_cntnt = re.compile('^<\.\.\. .* resumed> ' + re_strace_unavailable_end)
1870 re_strace_pid = re.compile('^([0-9]+) +(.*)')
1871 re_strace_clone = re.compile('^(clone\(|<... clone resumed> ).*\) = ([0-9]+)$')
1872 re_strace_clone_unfinished = re.compile('^clone\( <unfinished \.\.\.>$')
1873 re_strace_vfork = re.compile('^(vfork\(\)|<\.\.\. vfork resumed> \)) += ([0-9]+)$')
1874 re_strace_vfork_unfinished = re.compile('^vfork\( <unfinished \.\.\.>$')
1875 re_strace_chdir = re.compile('^chdir\("([^"]*)"(\) += 0| <unfinished ...>)$')
1876 re_strace_chdir_resumed = re.compile('^<\.\.\. chdir resumed> \) += 0$')
1877 re_strace_open = re.compile('^open\("([^"]*)", ([A-Z_|]*)(, [0-9]+)?' + re_strace_call_end)
1878 re_strace_open_resumed = re.compile('^<... open resumed> ' + re_strace_call_end)
1879 re_strace_ignore_sigchild = re.compile('^--- SIGCHLD \(Child exited\) @ 0 \(0\) ---$')
1880 re_strace_ignore_sigchild2 = re.compile('^--- SIGCHLD {si_signo=SIGCHLD, si_code=CLD_EXITED, .*} ---$')
1881 re_strace_ignore_exited = re.compile('^\+\+\+ exited with [0-9]* \+\+\+$')
1882 re_strace_ignore_sigvtalarm = re.compile('^--- SIGVTALRM \(Virtual timer expired\) @ 0 \(0\) ---$')
1883 re_strace_ignore_sigvtalarm2= re.compile('^--- SIGVTALRM {si_signo=SIGVTALRM, si_code=SI_TIMER, .*} ---$')
1884 re_strace_ignore_sigint = re.compile('^--- SIGINT \(Interrupt\) @ 0 \(0\) ---$')
1885 re_strace_ignore_sigfpe = re.compile('^--- SIGFPE \(Floating point exception\) @ 0 \(0\) ---$')
1886 re_strace_ignore_sigsegv = re.compile('^--- SIGSEGV \(Segmentation fault\) @ 0 \(0\) ---$')
1887 re_strace_ignore_sigpipe = re.compile('^--- SIGPIPE \(Broken pipe\) @ 0 \(0\) ---$')
1888
1889 # Files that are read or written but shouldn't be:
1890 # * ghci_history shouldn't be read or written by tests
1891 # * things under package.conf.d shouldn't be written by tests
1892 bad_file_usages = {}
1893
1894 # Mapping from tests to the list of files that they write
1895 files_written = {}
1896
1897 # Mapping from tests to the list of files that they write but don't clean
1898 files_written_not_removed = {}
1899
1900 def add_bad_file_usage(name, file):
1901 try:
1902 if not file in bad_file_usages[name]:
1903 bad_file_usages[name].append(file)
1904 except:
1905 bad_file_usages[name] = [file]
1906
1907 def mkPath(curdir, path):
1908 # Given the current full directory is 'curdir', what is the full
1909 # path to 'path'?
1910 return os.path.realpath(os.path.join(curdir, path))
1911
1912 def addTestFilesWritten(name, fn):
1913 if config.use_threads:
1914 with t.lockFilesWritten:
1915 addTestFilesWrittenHelper(name, fn)
1916 else:
1917 addTestFilesWrittenHelper(name, fn)
1918
1919 def addTestFilesWrittenHelper(name, fn):
1920 started = False
1921 working_directories = {}
1922
1923 with open(fn, 'r') as f:
1924 for line in f:
1925 m_pid = re_strace_pid.match(line)
1926 if m_pid:
1927 pid = m_pid.group(1)
1928 content = m_pid.group(2)
1929 elif re_strace_unavailable_line.match(line):
1930 next
1931 else:
1932 framework_fail(name, 'strace', "Can't find pid in strace line: " + line)
1933
1934 m_open = re_strace_open.match(content)
1935 m_chdir = re_strace_chdir.match(content)
1936 m_clone = re_strace_clone.match(content)
1937 m_vfork = re_strace_vfork.match(content)
1938
1939 if not started:
1940 working_directories[pid] = os.getcwd()
1941 started = True
1942
1943 if m_open:
1944 file = m_open.group(1)
1945 file = mkPath(working_directories[pid], file)
1946 if file.endswith("ghci_history"):
1947 add_bad_file_usage(name, file)
1948 elif not file in ['/dev/tty', '/dev/null'] and not file.startswith("/tmp/ghc"):
1949 flags = m_open.group(2).split('|')
1950 if 'O_WRONLY' in flags or 'O_RDWR' in flags:
1951 if re.match('package\.conf\.d', file):
1952 add_bad_file_usage(name, file)
1953 else:
1954 try:
1955 if not file in files_written[name]:
1956 files_written[name].append(file)
1957 except:
1958 files_written[name] = [file]
1959 elif 'O_RDONLY' in flags:
1960 pass
1961 else:
1962 framework_fail(name, 'strace', "Can't understand flags in open strace line: " + line)
1963 elif m_chdir:
1964 # We optimistically assume that unfinished chdir's are going to succeed
1965 dir = m_chdir.group(1)
1966 working_directories[pid] = mkPath(working_directories[pid], dir)
1967 elif m_clone:
1968 working_directories[m_clone.group(2)] = working_directories[pid]
1969 elif m_vfork:
1970 working_directories[m_vfork.group(2)] = working_directories[pid]
1971 elif re_strace_open_resumed.match(content):
1972 pass
1973 elif re_strace_chdir_resumed.match(content):
1974 pass
1975 elif re_strace_vfork_unfinished.match(content):
1976 pass
1977 elif re_strace_clone_unfinished.match(content):
1978 pass
1979 elif re_strace_ignore_sigchild.match(content):
1980 pass
1981 elif re_strace_ignore_sigchild2.match(content):
1982 pass
1983 elif re_strace_ignore_exited.match(content):
1984 pass
1985 elif re_strace_ignore_sigvtalarm.match(content):
1986 pass
1987 elif re_strace_ignore_sigvtalarm2.match(content):
1988 pass
1989 elif re_strace_ignore_sigint.match(content):
1990 pass
1991 elif re_strace_ignore_sigfpe.match(content):
1992 pass
1993 elif re_strace_ignore_sigsegv.match(content):
1994 pass
1995 elif re_strace_ignore_sigpipe.match(content):
1996 pass
1997 elif re_strace_unavailable_cntnt.match(content):
1998 pass
1999 else:
2000 framework_fail(name, 'strace', "Can't understand strace line: " + line)
2001
2002 def checkForFilesWrittenProblems(file):
2003 foundProblem = False
2004
2005 files_written_inverted = {}
2006 for t in files_written.keys():
2007 for f in files_written[t]:
2008 try:
2009 files_written_inverted[f].append(t)
2010 except:
2011 files_written_inverted[f] = [t]
2012
2013 for f in files_written_inverted.keys():
2014 if len(files_written_inverted[f]) > 1:
2015 if not foundProblem:
2016 foundProblem = True
2017 file.write("\n")
2018 file.write("\nSome files are written by multiple tests:\n")
2019 file.write(" " + f + " (" + str(files_written_inverted[f]) + ")\n")
2020 if foundProblem:
2021 file.write("\n")
2022
2023 # -----
2024
2025 if len(files_written_not_removed) > 0:
2026 file.write("\n")
2027 file.write("\nSome files written but not removed:\n")
2028 tests = list(files_written_not_removed.keys())
2029 tests.sort()
2030 for t in tests:
2031 for f in files_written_not_removed[t]:
2032 file.write(" " + t + ": " + f + "\n")
2033 file.write("\n")
2034
2035 # -----
2036
2037 if len(bad_file_usages) > 0:
2038 file.write("\n")
2039 file.write("\nSome bad file usages:\n")
2040 tests = list(bad_file_usages.keys())
2041 tests.sort()
2042 for t in tests:
2043 for f in bad_file_usages[t]:
2044 file.write(" " + t + ": " + f + "\n")
2045 file.write("\n")
2046
2047 # -----------------------------------------------------------------------------
2048 # checking if ghostscript is available for checking the output of hp2ps
2049
2050 def genGSCmd(psfile):
2051 return (config.gs + ' -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE ' + psfile);
2052
2053 def gsNotWorking():
2054 global gs_working
2055 print("GhostScript not available for hp2ps tests")
2056
2057 global gs_working
2058 gs_working = 0
2059 if config.have_profiling:
2060 if config.gs != '':
2061 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
2062 if resultGood == 0:
2063 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
2064 ' >/dev/null 2>&1')
2065 if resultBad != 0:
2066 print("GhostScript available for hp2ps tests")
2067 gs_working = 1;
2068 else:
2069 gsNotWorking();
2070 else:
2071 gsNotWorking();
2072 else:
2073 gsNotWorking();
2074
2075 def rm_no_fail( file ):
2076 try:
2077 os.remove( file )
2078 finally:
2079 return
2080
2081 def add_suffix( name, suffix ):
2082 if suffix == '':
2083 return name
2084 else:
2085 return name + '.' + suffix
2086
2087 def add_hs_lhs_suffix(name):
2088 if getTestOpts().c_src:
2089 return add_suffix(name, 'c')
2090 elif getTestOpts().cmm_src:
2091 return add_suffix(name, 'cmm')
2092 elif getTestOpts().objc_src:
2093 return add_suffix(name, 'm')
2094 elif getTestOpts().objcpp_src:
2095 return add_suffix(name, 'mm')
2096 elif getTestOpts().literate:
2097 return add_suffix(name, 'lhs')
2098 else:
2099 return add_suffix(name, 'hs')
2100
2101 def replace_suffix( name, suffix ):
2102 base, suf = os.path.splitext(name)
2103 return base + '.' + suffix
2104
2105 def in_testdir(name, suffix=''):
2106 return getTestOpts().testdir + '/' + add_suffix(name, suffix)
2107
2108 def qualify( name, suff ):
2109 return in_testdir(add_suffix(name, suff))
2110
2111
2112 # Finding the sample output. The filename is of the form
2113 #
2114 # <test>.stdout[-ws-<wordsize>][-<platform>]
2115 #
2116 # and we pick the most specific version available. The <version> is
2117 # the major version of the compiler (e.g. 6.8.2 would be "6.8"). For
2118 # more fine-grained control use compiler_lt().
2119 #
2120 def find_expected_file(name, suff):
2121 basename = add_suffix(name, suff)
2122 basepath = in_testdir(basename)
2123
2124 files = [(platformSpecific, basename + ws + plat)
2125 for (platformSpecific, plat) in [(1, '-' + config.platform),
2126 (1, '-' + config.os),
2127 (0, '')]
2128 for ws in ['-ws-' + config.wordsize, '']]
2129
2130 dir = glob.glob(basepath + '*')
2131 dir = [normalise_slashes_(d) for d in dir]
2132
2133 for (platformSpecific, f) in files:
2134 if in_testdir(f) in dir:
2135 return (platformSpecific,f)
2136
2137 return (0, basename)
2138
2139 # Clean up prior to the test, so that we can't spuriously conclude
2140 # that it passed on the basis of old run outputs.
2141 def pretest_cleanup(name):
2142 if getTestOpts().outputdir != None:
2143 odir = in_testdir(getTestOpts().outputdir)
2144 try:
2145 shutil.rmtree(odir)
2146 except:
2147 pass
2148 os.mkdir(odir)
2149
2150 rm_no_fail(qualify(name,'interp.stderr'))
2151 rm_no_fail(qualify(name,'interp.stdout'))
2152 rm_no_fail(qualify(name,'comp.stderr'))
2153 rm_no_fail(qualify(name,'comp.stdout'))
2154 rm_no_fail(qualify(name,'run.stderr'))
2155 rm_no_fail(qualify(name,'run.stdout'))
2156 rm_no_fail(qualify(name,'tix'))
2157 rm_no_fail(qualify(name,'exe.tix'))
2158 # simple_build zaps the following:
2159 # rm_nofail(qualify("o"))
2160 # rm_nofail(qualify(""))
2161 # not interested in the return code
2162
2163 # -----------------------------------------------------------------------------
2164 # Return a list of all the files ending in '.T' below directories roots.
2165
2166 def findTFiles(roots):
2167 # It would be better to use os.walk, but that
2168 # gives backslashes on Windows, which trip the
2169 # testsuite later :-(
2170 return [filename for root in roots for filename in findTFiles_(root)]
2171
2172 def findTFiles_(path):
2173 if os.path.isdir(path):
2174 paths = [path + '/' + x for x in os.listdir(path)]
2175 return findTFiles(paths)
2176 elif path[-2:] == '.T':
2177 return [path]
2178 else:
2179 return []
2180
2181 # -----------------------------------------------------------------------------
2182 # Output a test summary to the specified file object
2183
2184 def summary(t, file, short=False):
2185
2186 file.write('\n')
2187 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2188
2189 if short:
2190 # Only print the list of unexpected tests above.
2191 return
2192
2193 file.write('OVERALL SUMMARY for test run started at '
2194 + time.strftime("%c %Z", t.start_time) + '\n'
2195 + str(datetime.timedelta(seconds=
2196 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2197 + ' spent to go through\n'
2198 + repr(t.total_tests).rjust(8)
2199 + ' total tests, which gave rise to\n'
2200 + repr(t.total_test_cases).rjust(8)
2201 + ' test cases, of which\n'
2202 + repr(t.n_tests_skipped).rjust(8)
2203 + ' were skipped\n'
2204 + '\n'
2205 + repr(t.n_missing_libs).rjust(8)
2206 + ' had missing libraries\n'
2207 + repr(t.n_expected_passes).rjust(8)
2208 + ' expected passes\n'
2209 + repr(t.n_expected_failures).rjust(8)
2210 + ' expected failures\n'
2211 + '\n'
2212 + repr(t.n_framework_failures).rjust(8)
2213 + ' caused framework failures\n'
2214 + repr(t.n_unexpected_passes).rjust(8)
2215 + ' unexpected passes\n'
2216 + repr(t.n_unexpected_failures).rjust(8)
2217 + ' unexpected failures\n'
2218 + repr(t.n_unexpected_stat_failures).rjust(8)
2219 + ' unexpected stat failures\n'
2220 + '\n')
2221
2222 if t.n_unexpected_passes > 0:
2223 file.write('Unexpected passes:\n')
2224 printPassingTestInfosSummary(file, t.unexpected_passes)
2225
2226 if t.n_unexpected_failures > 0:
2227 file.write('Unexpected failures:\n')
2228 printFailingTestInfosSummary(file, t.unexpected_failures)
2229
2230 if t.n_unexpected_stat_failures > 0:
2231 file.write('Unexpected stat failures:\n')
2232 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2233
2234 if config.check_files_written:
2235 checkForFilesWrittenProblems(file)
2236
2237 if stopping():
2238 file.write('WARNING: Testsuite run was terminated early\n')
2239
2240 def printUnexpectedTests(file, testInfoss):
2241 unexpected = []
2242 for testInfos in testInfoss:
2243 directories = testInfos.keys()
2244 for directory in directories:
2245 tests = list(testInfos[directory].keys())
2246 unexpected += tests
2247 if unexpected != []:
2248 file.write('Unexpected results from:\n')
2249 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2250 file.write('\n')
2251
2252 def printPassingTestInfosSummary(file, testInfos):
2253 directories = list(testInfos.keys())
2254 directories.sort()
2255 maxDirLen = max(len(x) for x in directories)
2256 for directory in directories:
2257 tests = list(testInfos[directory].keys())
2258 tests.sort()
2259 for test in tests:
2260 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2261 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2262 file.write('\n')
2263
2264 def printFailingTestInfosSummary(file, testInfos):
2265 directories = list(testInfos.keys())
2266 directories.sort()
2267 maxDirLen = max(len(d) for d in directories)
2268 for directory in directories:
2269 tests = list(testInfos[directory].keys())
2270 tests.sort()
2271 for test in tests:
2272 reasons = testInfos[directory][test].keys()
2273 for reason in reasons:
2274 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2275 ' [' + reason + ']' + \
2276 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2277 file.write('\n')
2278
2279 def modify_lines(s, f):
2280 return '\n'.join([f(l) for l in s.splitlines()])