Testsuite: fix WAY=ghci when LOCAL=0
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import io
9 import shutil
10 import sys
11 import os
12 import errno
13 import string
14 import re
15 import traceback
16 import time
17 import datetime
18 import copy
19 import glob
20 from math import ceil, trunc
21 import collections
22 import subprocess
23
24 from testglobals import *
25 from testutil import *
26 from extra_files import extra_src_files
27
28 try:
29 basestring
30 except: # Python 3
31 basestring = (str,bytes)
32
33 if config.use_threads:
34 import threading
35 try:
36 import thread
37 except ImportError: # Python 3
38 import _thread as thread
39
40 global wantToStop
41 wantToStop = False
42 def stopNow():
43 global wantToStop
44 wantToStop = True
45 def stopping():
46 return wantToStop
47
48 # Options valid for the current test only (these get reset to
49 # testdir_testopts after each test).
50
51 global testopts_local
52 if config.use_threads:
53 testopts_local = threading.local()
54 else:
55 class TestOpts_Local:
56 pass
57 testopts_local = TestOpts_Local()
58
59 def getTestOpts():
60 return testopts_local.x
61
62 def setLocalTestOpts(opts):
63 global testopts_local
64 testopts_local.x=opts
65
66 def isStatsTest():
67 opts = getTestOpts()
68 return len(opts.compiler_stats_range_fields) > 0 or len(opts.stats_range_fields) > 0
69
70
71 # This can be called at the top of a file of tests, to set default test options
72 # for the following tests.
73 def setTestOpts( f ):
74 global thisdir_settings
75 thisdir_settings = [thisdir_settings, f]
76
77 # -----------------------------------------------------------------------------
78 # Canned setup functions for common cases. eg. for a test you might say
79 #
80 # test('test001', normal, compile, [''])
81 #
82 # to run it without any options, but change it to
83 #
84 # test('test001', expect_fail, compile, [''])
85 #
86 # to expect failure for this test.
87
88 def normal( name, opts ):
89 return;
90
91 def skip( name, opts ):
92 opts.skip = 1
93
94 def expect_fail( name, opts ):
95 # The compiler, testdriver, OS or platform is missing a certain
96 # feature, and we don't plan to or can't fix it now or in the
97 # future.
98 opts.expect = 'fail';
99
100 def reqlib( lib ):
101 return lambda name, opts, l=lib: _reqlib (name, opts, l )
102
103 def stage1(name, opts):
104 # See Note [Why is there no stage1 setup function?]
105 framework_fail(name, 'stage1 setup function does not exist',
106 'add your test to testsuite/tests/stage1 instead')
107
108 # Note [Why is there no stage1 setup function?]
109 #
110 # Presumably a stage1 setup function would signal that the stage1
111 # compiler should be used to compile a test.
112 #
113 # Trouble is, the path to the compiler + the `ghc --info` settings for
114 # that compiler are currently passed in from the `make` part of the
115 # testsuite driver.
116 #
117 # Switching compilers in the Python part would be entirely too late, as
118 # all ghc_with_* settings would be wrong. See config/ghc for possible
119 # consequences (for example, config.run_ways would still be
120 # based on the default compiler, quite likely causing ./validate --slow
121 # to fail).
122 #
123 # It would be possible to let the Python part of the testsuite driver
124 # make the call to `ghc --info`, but doing so would require quite some
125 # work. Care has to be taken to not affect the run_command tests for
126 # example, as they also use the `ghc --info` settings:
127 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
128 #
129 # If you want a test to run using the stage1 compiler, add it to the
130 # testsuite/tests/stage1 directory. Validate runs the tests in that
131 # directory with `make stage=1`.
132
133 # Cache the results of looking to see if we have a library or not.
134 # This makes quite a difference, especially on Windows.
135 have_lib = {}
136
137 def _reqlib( name, opts, lib ):
138 if lib in have_lib:
139 got_it = have_lib[lib]
140 else:
141 cmd = strip_quotes(config.ghc_pkg)
142 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
143 stdout=subprocess.PIPE,
144 stderr=subprocess.PIPE)
145 # read from stdout and stderr to avoid blocking due to
146 # buffers filling
147 p.communicate()
148 r = p.wait()
149 got_it = r == 0
150 have_lib[lib] = got_it
151
152 if not got_it:
153 opts.expect = 'missing-lib'
154
155 def req_haddock( name, opts ):
156 if not config.haddock:
157 opts.expect = 'missing-lib'
158
159 def req_profiling( name, opts ):
160 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
161 if not config.have_profiling:
162 opts.expect = 'fail'
163
164 def req_shared_libs( name, opts ):
165 if not config.have_shared_libs:
166 opts.expect = 'fail'
167
168 def req_interp( name, opts ):
169 if not config.have_interp:
170 opts.expect = 'fail'
171
172 def req_smp( name, opts ):
173 if not config.have_smp:
174 opts.expect = 'fail'
175
176 def ignore_output( name, opts ):
177 opts.ignore_output = 1
178
179 def no_stdin( name, opts ):
180 opts.no_stdin = 1
181
182 def combined_output( name, opts ):
183 opts.combined_output = True
184
185 # -----
186
187 def expect_fail_for( ways ):
188 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
189
190 def _expect_fail_for( name, opts, ways ):
191 opts.expect_fail_for = ways
192
193 def expect_broken( bug ):
194 # This test is a expected not to work due to the indicated trac bug
195 # number.
196 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
197
198 def _expect_broken( name, opts, bug ):
199 record_broken(name, opts, bug)
200 opts.expect = 'fail';
201
202 def expect_broken_for( bug, ways ):
203 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
204
205 def _expect_broken_for( name, opts, bug, ways ):
206 record_broken(name, opts, bug)
207 opts.expect_fail_for = ways
208
209 def record_broken(name, opts, bug):
210 global brokens
211 me = (bug, opts.testdir, name)
212 if not me in brokens:
213 brokens.append(me)
214
215 def _expect_pass(way):
216 # Helper function. Not intended for use in .T files.
217 opts = getTestOpts()
218 return opts.expect == 'pass' and way not in opts.expect_fail_for
219
220 # -----
221
222 def omit_ways( ways ):
223 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
224
225 def _omit_ways( name, opts, ways ):
226 opts.omit_ways = ways
227
228 # -----
229
230 def only_ways( ways ):
231 return lambda name, opts, w=ways: _only_ways( name, opts, w )
232
233 def _only_ways( name, opts, ways ):
234 opts.only_ways = ways
235
236 # -----
237
238 def extra_ways( ways ):
239 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
240
241 def _extra_ways( name, opts, ways ):
242 opts.extra_ways = ways
243
244 # -----
245
246 def set_stdin( file ):
247 return lambda name, opts, f=file: _set_stdin(name, opts, f);
248
249 def _set_stdin( name, opts, f ):
250 opts.stdin = f
251
252 # -----
253
254 def exit_code( val ):
255 return lambda name, opts, v=val: _exit_code(name, opts, v);
256
257 def _exit_code( name, opts, v ):
258 opts.exit_code = v
259
260 def signal_exit_code( val ):
261 if opsys('solaris2'):
262 return exit_code( val );
263 else:
264 # When application running on Linux receives fatal error
265 # signal, then its exit code is encoded as 128 + signal
266 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
267 # I assume that Mac OS X behaves in the same way at least Mac
268 # OS X builder behavior suggests this.
269 return exit_code( val+128 );
270
271 # -----
272
273 def compile_timeout_multiplier( val ):
274 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
275
276 def _compile_timeout_multiplier( name, opts, v ):
277 opts.compile_timeout_multiplier = v
278
279 def run_timeout_multiplier( val ):
280 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
281
282 def _run_timeout_multiplier( name, opts, v ):
283 opts.run_timeout_multiplier = v
284
285 # -----
286
287 def extra_run_opts( val ):
288 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
289
290 def _extra_run_opts( name, opts, v ):
291 opts.extra_run_opts = v
292
293 # -----
294
295 def extra_hc_opts( val ):
296 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
297
298 def _extra_hc_opts( name, opts, v ):
299 opts.extra_hc_opts = v
300
301 # -----
302
303 def extra_clean( files ):
304 # TODO. Remove all calls to extra_clean.
305 return lambda _name, _opts: None
306
307 def extra_files(files):
308 return lambda name, opts: _extra_files(name, opts, files)
309
310 def _extra_files(name, opts, files):
311 opts.extra_files.extend(files)
312
313 # -----
314
315 def stats_num_field( field, expecteds ):
316 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
317
318 def _stats_num_field( name, opts, field, expecteds ):
319 if field in opts.stats_range_fields:
320 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
321
322 if type(expecteds) is list:
323 for (b, expected, dev) in expecteds:
324 if b:
325 opts.stats_range_fields[field] = (expected, dev)
326 return
327 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
328
329 else:
330 (expected, dev) = expecteds
331 opts.stats_range_fields[field] = (expected, dev)
332
333 def compiler_stats_num_field( field, expecteds ):
334 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
335
336 def _compiler_stats_num_field( name, opts, field, expecteds ):
337 if field in opts.compiler_stats_range_fields:
338 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
339
340 # Compiler performance numbers change when debugging is on, making the results
341 # useless and confusing. Therefore, skip if debugging is on.
342 if compiler_debugged():
343 skip(name, opts)
344
345 for (b, expected, dev) in expecteds:
346 if b:
347 opts.compiler_stats_range_fields[field] = (expected, dev)
348 return
349
350 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
351
352 # -----
353
354 def when(b, f):
355 # When list_brokens is on, we want to see all expect_broken calls,
356 # so we always do f
357 if b or config.list_broken:
358 return f
359 else:
360 return normal
361
362 def unless(b, f):
363 return when(not b, f)
364
365 def doing_ghci():
366 return 'ghci' in config.run_ways
367
368 def ghc_dynamic():
369 return config.ghc_dynamic
370
371 def fast():
372 return config.speed == 2
373
374 def platform( plat ):
375 return config.platform == plat
376
377 def opsys( os ):
378 return config.os == os
379
380 def arch( arch ):
381 return config.arch == arch
382
383 def wordsize( ws ):
384 return config.wordsize == str(ws)
385
386 def msys( ):
387 return config.msys
388
389 def cygwin( ):
390 return config.cygwin
391
392 def have_vanilla( ):
393 return config.have_vanilla
394
395 def have_dynamic( ):
396 return config.have_dynamic
397
398 def have_profiling( ):
399 return config.have_profiling
400
401 def in_tree_compiler( ):
402 return config.in_tree_compiler
403
404 def unregisterised( ):
405 return config.unregisterised
406
407 def compiler_profiled( ):
408 return config.compiler_profiled
409
410 def compiler_debugged( ):
411 return config.compiler_debugged
412
413 # ---
414
415 def high_memory_usage(name, opts):
416 opts.alone = True
417
418 # If a test is for a multi-CPU race, then running the test alone
419 # increases the chance that we'll actually see it.
420 def multi_cpu_race(name, opts):
421 opts.alone = True
422
423 # ---
424 def literate( name, opts ):
425 opts.literate = 1;
426
427 def c_src( name, opts ):
428 opts.c_src = 1;
429
430 def objc_src( name, opts ):
431 opts.objc_src = 1;
432
433 def objcpp_src( name, opts ):
434 opts.objcpp_src = 1;
435
436 def cmm_src( name, opts ):
437 opts.cmm_src = 1;
438
439 def outputdir( odir ):
440 return lambda name, opts, d=odir: _outputdir(name, opts, d)
441
442 def _outputdir( name, opts, odir ):
443 opts.outputdir = odir;
444
445 # ----
446
447 def pre_cmd( cmd ):
448 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
449
450 def _pre_cmd( name, opts, cmd ):
451 opts.pre_cmd = cmd
452
453 # ----
454
455 def clean_cmd( cmd ):
456 # TODO. Remove all calls to clean_cmd.
457 return lambda _name, _opts: None
458
459 # ----
460
461 def cmd_prefix( prefix ):
462 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
463
464 def _cmd_prefix( name, opts, prefix ):
465 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
466
467 # ----
468
469 def cmd_wrapper( fun ):
470 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
471
472 def _cmd_wrapper( name, opts, fun ):
473 opts.cmd_wrapper = fun
474
475 # ----
476
477 def compile_cmd_prefix( prefix ):
478 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
479
480 def _compile_cmd_prefix( name, opts, prefix ):
481 opts.compile_cmd_prefix = prefix
482
483 # ----
484
485 def check_stdout( f ):
486 return lambda name, opts, f=f: _check_stdout(name, opts, f)
487
488 def _check_stdout( name, opts, f ):
489 opts.check_stdout = f
490
491 # ----
492
493 def normalise_slashes( name, opts ):
494 _normalise_fun(name, opts, normalise_slashes_)
495
496 def normalise_exe( name, opts ):
497 _normalise_fun(name, opts, normalise_exe_)
498
499 def normalise_fun( *fs ):
500 return lambda name, opts: _normalise_fun(name, opts, fs)
501
502 def _normalise_fun( name, opts, *fs ):
503 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
504
505 def normalise_errmsg_fun( *fs ):
506 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
507
508 def _normalise_errmsg_fun( name, opts, *fs ):
509 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
510
511 def normalise_version_( *pkgs ):
512 def normalise_version__( str ):
513 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
514 '\\1-<VERSION>', str)
515 return normalise_version__
516
517 def normalise_version( *pkgs ):
518 def normalise_version__( name, opts ):
519 _normalise_fun(name, opts, normalise_version_(*pkgs))
520 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
521 return normalise_version__
522
523 def normalise_drive_letter(name, opts):
524 # Windows only. Change D:\\ to C:\\.
525 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
526
527 def keep_prof_callstacks(name, opts):
528 """Keep profiling callstacks.
529
530 Use together with `only_ways(prof_ways)`.
531 """
532 opts.keep_prof_callstacks = True
533
534 def join_normalisers(*a):
535 """
536 Compose functions, flattening sequences.
537
538 join_normalisers(f1,[f2,f3],f4)
539
540 is the same as
541
542 lambda x: f1(f2(f3(f4(x))))
543 """
544
545 def flatten(l):
546 """
547 Taken from http://stackoverflow.com/a/2158532/946226
548 """
549 for el in l:
550 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
551 for sub in flatten(el):
552 yield sub
553 else:
554 yield el
555
556 a = flatten(a)
557
558 fn = lambda x:x # identity function
559 for f in a:
560 assert callable(f)
561 fn = lambda x,f=f,fn=fn: fn(f(x))
562 return fn
563
564 # ----
565 # Function for composing two opt-fns together
566
567 def executeSetups(fs, name, opts):
568 if type(fs) is list:
569 # If we have a list of setups, then execute each one
570 for f in fs:
571 executeSetups(f, name, opts)
572 else:
573 # fs is a single function, so just apply it
574 fs(name, opts)
575
576 # -----------------------------------------------------------------------------
577 # The current directory of tests
578
579 def newTestDir(tempdir, dir):
580
581 global thisdir_settings
582 # reset the options for this test directory
583 def settings(name, opts, tempdir=tempdir, dir=dir):
584 return _newTestDir(name, opts, tempdir, dir)
585 thisdir_settings = settings
586
587 # Should be equal to entry in toplevel .gitignore.
588 testdir_suffix = '.run'
589
590 def _newTestDir(name, opts, tempdir, dir):
591 opts.srcdir = os.path.join(os.getcwd(), dir)
592 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
593 opts.compiler_always_flags = config.compiler_always_flags
594
595 # -----------------------------------------------------------------------------
596 # Actually doing tests
597
598 parallelTests = []
599 aloneTests = []
600 allTestNames = set([])
601
602 def runTest (opts, name, func, args):
603 ok = 0
604
605 if config.use_threads:
606 t.thread_pool.acquire()
607 try:
608 while config.threads<(t.running_threads+1):
609 t.thread_pool.wait()
610 t.running_threads = t.running_threads+1
611 ok=1
612 t.thread_pool.release()
613 thread.start_new_thread(test_common_thread, (name, opts, func, args))
614 except:
615 if not ok:
616 t.thread_pool.release()
617 else:
618 test_common_work (name, opts, func, args)
619
620 # name :: String
621 # setup :: TestOpts -> IO ()
622 def test (name, setup, func, args):
623 if config.run_only_some_tests:
624 if name not in config.only:
625 return
626 else:
627 # Note [Mutating config.only]
628 # config.only is initiallly the set of tests requested by
629 # the user (via 'make TEST='). We then remove all tests that
630 # we've already seen (in .T files), so that we can later
631 # report on any tests we couldn't find and error out.
632 config.only.remove(name)
633
634 global aloneTests
635 global parallelTests
636 global allTestNames
637 global thisdir_settings
638 if name in allTestNames:
639 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
640 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
641 framework_fail(name, 'bad_name', 'This test has an invalid name')
642
643 # Make a deep copy of the default_testopts, as we need our own copy
644 # of any dictionaries etc inside it. Otherwise, if one test modifies
645 # them, all tests will see the modified version!
646 myTestOpts = copy.deepcopy(default_testopts)
647
648 executeSetups([thisdir_settings, setup], name, myTestOpts)
649
650 thisTest = lambda : runTest(myTestOpts, name, func, args)
651 if myTestOpts.alone:
652 aloneTests.append(thisTest)
653 else:
654 parallelTests.append(thisTest)
655 allTestNames.add(name)
656
657 if config.use_threads:
658 def test_common_thread(name, opts, func, args):
659 t.lock.acquire()
660 try:
661 test_common_work(name,opts,func,args)
662 finally:
663 t.lock.release()
664 t.thread_pool.acquire()
665 t.running_threads = t.running_threads - 1
666 t.thread_pool.notify()
667 t.thread_pool.release()
668
669 def get_package_cache_timestamp():
670 if config.package_conf_cache_file == '':
671 return 0.0
672 else:
673 try:
674 return os.stat(config.package_conf_cache_file).st_mtime
675 except:
676 return 0.0
677
678
679 def test_common_work (name, opts, func, args):
680 try:
681 t.total_tests = t.total_tests+1
682 setLocalTestOpts(opts)
683
684 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
685
686 # All the ways we might run this test
687 if func == compile or func == multimod_compile:
688 all_ways = config.compile_ways
689 elif func == compile_and_run or func == multimod_compile_and_run:
690 all_ways = config.run_ways
691 elif func == ghci_script:
692 if 'ghci' in config.run_ways:
693 all_ways = ['ghci']
694 else:
695 all_ways = []
696 else:
697 all_ways = ['normal']
698
699 # A test itself can request extra ways by setting opts.extra_ways
700 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
701
702 t.total_test_cases = t.total_test_cases + len(all_ways)
703
704 ok_way = lambda way: \
705 not getTestOpts().skip \
706 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
707 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
708 and (not (config.skip_perf_tests and isStatsTest())) \
709 and way not in getTestOpts().omit_ways
710
711 # Which ways we are asked to skip
712 do_ways = list(filter (ok_way,all_ways))
713
714 # Only run all ways in slow mode.
715 # See Note [validate and testsuite speed] in toplevel Makefile.
716 if config.accept:
717 # Only ever run one way
718 do_ways = do_ways[:1]
719 elif config.speed > 0:
720 # However, if we EXPLICITLY asked for a way (with extra_ways)
721 # please test it!
722 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
723 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
724 do_ways = other_ways[:1] + explicit_ways
725
726 # Find all files in the source directory that this test
727 # depends on. Do this only once for all ways.
728 # Generously add all filenames that start with the name of
729 # the test to this set, as a convenience to test authors.
730 # They will have to use the `extra_files` setup function to
731 # specify all other files that their test depends on (but
732 # this seems to be necessary for only about 10% of all
733 # tests).
734 files = set((f for f in os.listdir(opts.srcdir)
735 if f.startswith(name) and
736 not f.endswith(testdir_suffix)))
737 for filename in (opts.extra_files + extra_src_files.get(name, [])):
738 if filename.startswith('/'):
739 framework_fail(name, 'whole-test',
740 'no absolute paths in extra_files please: ' + filename)
741
742 elif '*' in filename:
743 # Don't use wildcards in extra_files too much, as
744 # globbing is slow.
745 files.update((os.path.relpath(f, opts.srcdir)
746 for f in glob.iglob(in_srcdir(filename))))
747
748 else:
749 files.add(filename)
750
751 # Run the required tests...
752 for way in do_ways:
753 if stopping():
754 break
755 do_test(name, way, func, args, files)
756
757 for way in all_ways:
758 if way not in do_ways:
759 skiptest (name,way)
760
761 if config.cleanup and do_ways:
762 cleanup()
763
764 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
765
766 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
767 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
768
769 try:
770 for f in files_written[name]:
771 if os.path.exists(f):
772 try:
773 if not f in files_written_not_removed[name]:
774 files_written_not_removed[name].append(f)
775 except:
776 files_written_not_removed[name] = [f]
777 except:
778 pass
779 except Exception as e:
780 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
781
782 def do_test(name, way, func, args, files):
783 opts = getTestOpts()
784
785 full_name = name + '(' + way + ')'
786
787 try:
788 if_verbose(2, "=====> %s %d of %d %s " % \
789 (full_name, t.total_tests, len(allTestNames), \
790 [t.n_unexpected_passes, \
791 t.n_unexpected_failures, \
792 t.n_framework_failures]))
793
794 # Clean up prior to the test, so that we can't spuriously conclude
795 # that it passed on the basis of old run outputs.
796 cleanup()
797
798 # Link all source files for this test into a new directory in
799 # /tmp, and run the test in that directory. This makes it
800 # possible to run tests in parallel, without modification, that
801 # would otherwise (accidentally) write to the same output file.
802 # It also makes it easier to keep the testsuite clean.
803
804 for extra_file in files:
805 src = in_srcdir(extra_file)
806 if extra_file.startswith('..'):
807 # In case the extra_file is a file in an ancestor
808 # directory (e.g. extra_files(['../shell.hs'])), make
809 # sure it is copied to the test directory
810 # (testdir/shell.hs), instead of ending up somewhere
811 # else in the tree (testdir/../shell.hs)
812 filename = os.path.basename(extra_file)
813 else:
814 filename = extra_file
815 assert not '..' in filename # no funny stuff (foo/../../bar)
816 dst = in_testdir(filename)
817
818 if os.path.isfile(src):
819 dirname = os.path.dirname(dst)
820 if dirname:
821 mkdirp(dirname)
822 try:
823 link_or_copy_file(src, dst)
824 except OSError as e:
825 if e.errno == errno.EEXIST and os.path.isfile(dst):
826 # Some tests depend on files from ancestor
827 # directories (e.g. '../shell.hs'). It is
828 # possible such a file was already copied over
829 # for another test, since cleanup() doesn't
830 # delete them.
831 pass
832 else:
833 raise
834 elif os.path.isdir(src):
835 os.makedirs(dst)
836 lndir(src, dst)
837 else:
838 if not config.haddock and os.path.splitext(filename)[1] == '.t':
839 # When using a ghc built without haddock support, .t
840 # files are rightfully missing. Don't
841 # framework_fail. Test will be skipped later.
842 pass
843 else:
844 framework_fail(name, way,
845 'extra_file does not exist: ' + extra_file)
846
847 if not files:
848 # Always create the testdir, even when no files were copied
849 # (because user forgot to specify extra_files setup function), to
850 # prevent the confusing error: can't cd to <testdir>.
851 os.makedirs(opts.testdir)
852
853 if func.__name__ == 'run_command' or opts.pre_cmd:
854 # When running 'MAKE' make sure 'TOP' still points to the
855 # root of the testsuite.
856 src_makefile = in_srcdir('Makefile')
857 dst_makefile = in_testdir('Makefile')
858 if os.path.exists(src_makefile):
859 with open(src_makefile, 'r') as src:
860 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
861 with open(dst_makefile, 'w') as dst:
862 dst.write(makefile)
863
864 if config.use_threads:
865 t.lock.release()
866
867 try:
868 preCmd = getTestOpts().pre_cmd
869 if preCmd != None:
870 result = runCmdFor(name, 'cd "{opts.testdir}" && {preCmd}'.format(**locals()))
871 if result != 0:
872 framework_fail(name, way, 'pre-command failed: ' + str(result))
873 except:
874 framework_fail(name, way, 'pre-command exception')
875
876 try:
877 result = func(*[name,way] + args)
878 finally:
879 if config.use_threads:
880 t.lock.acquire()
881
882 if getTestOpts().expect != 'pass' and \
883 getTestOpts().expect != 'fail' and \
884 getTestOpts().expect != 'missing-lib':
885 framework_fail(name, way, 'bad expected ' + getTestOpts().expect)
886
887 try:
888 passFail = result['passFail']
889 except:
890 passFail = 'No passFail found'
891
892 if passFail == 'pass':
893 if _expect_pass(way):
894 t.n_expected_passes = t.n_expected_passes + 1
895 if name in t.expected_passes:
896 t.expected_passes[name].append(way)
897 else:
898 t.expected_passes[name] = [way]
899 else:
900 if_verbose(1, '*** unexpected pass for %s' % full_name)
901 t.n_unexpected_passes = t.n_unexpected_passes + 1
902 addPassingTestInfo(t.unexpected_passes, getTestOpts().testdir, name, way)
903 elif passFail == 'fail':
904 if _expect_pass(way):
905 reason = result['reason']
906 tag = result.get('tag')
907 if tag == 'stat':
908 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
909 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
910 addFailingTestInfo(t.unexpected_stat_failures, getTestOpts().testdir, name, reason, way)
911 else:
912 if_verbose(1, '*** unexpected failure for %s' % full_name)
913 t.n_unexpected_failures = t.n_unexpected_failures + 1
914 addFailingTestInfo(t.unexpected_failures, getTestOpts().testdir, name, reason, way)
915 else:
916 if getTestOpts().expect == 'missing-lib':
917 t.n_missing_libs = t.n_missing_libs + 1
918 if name in t.missing_libs:
919 t.missing_libs[name].append(way)
920 else:
921 t.missing_libs[name] = [way]
922 else:
923 t.n_expected_failures = t.n_expected_failures + 1
924 if name in t.expected_failures:
925 t.expected_failures[name].append(way)
926 else:
927 t.expected_failures[name] = [way]
928 else:
929 framework_fail(name, way, 'bad result ' + passFail)
930 except KeyboardInterrupt:
931 stopNow()
932 except:
933 framework_fail(name, way, 'do_test exception')
934 traceback.print_exc()
935
936 def addPassingTestInfo (testInfos, directory, name, way):
937 directory = re.sub('^\\.[/\\\\]', '', directory)
938
939 if not directory in testInfos:
940 testInfos[directory] = {}
941
942 if not name in testInfos[directory]:
943 testInfos[directory][name] = []
944
945 testInfos[directory][name].append(way)
946
947 def addFailingTestInfo (testInfos, directory, name, reason, way):
948 directory = re.sub('^\\.[/\\\\]', '', directory)
949
950 if not directory in testInfos:
951 testInfos[directory] = {}
952
953 if not name in testInfos[directory]:
954 testInfos[directory][name] = {}
955
956 if not reason in testInfos[directory][name]:
957 testInfos[directory][name][reason] = []
958
959 testInfos[directory][name][reason].append(way)
960
961 def skiptest (name, way):
962 # print 'Skipping test \"', name, '\"'
963 t.n_tests_skipped = t.n_tests_skipped + 1
964 if name in t.tests_skipped:
965 t.tests_skipped[name].append(way)
966 else:
967 t.tests_skipped[name] = [way]
968
969 def framework_fail( name, way, reason ):
970 full_name = name + '(' + way + ')'
971 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
972 t.n_framework_failures = t.n_framework_failures + 1
973 if name in t.framework_failures:
974 t.framework_failures[name].append(way)
975 else:
976 t.framework_failures[name] = [way]
977
978 def badResult(result):
979 try:
980 if result['passFail'] == 'pass':
981 return False
982 return True
983 except:
984 return True
985
986 def passed():
987 return {'passFail': 'pass'}
988
989 def failBecause(reason, tag=None):
990 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
991
992 # -----------------------------------------------------------------------------
993 # Generic command tests
994
995 # A generic command test is expected to run and exit successfully.
996 #
997 # The expected exit code can be changed via exit_code() as normal, and
998 # the expected stdout/stderr are stored in <testname>.stdout and
999 # <testname>.stderr. The output of the command can be ignored
1000 # altogether by using run_command_ignore_output instead of
1001 # run_command.
1002
1003 def run_command( name, way, cmd ):
1004 return simple_run( name, '', cmd, '' )
1005
1006 # -----------------------------------------------------------------------------
1007 # GHCi tests
1008
1009 def ghci_script( name, way, script, override_flags = None ):
1010 # filter out -fforce-recomp from compiler_always_flags, because we're
1011 # actually testing the recompilation behaviour in the GHCi tests.
1012 flags = ' '.join(get_compiler_flags(override_flags, noforce=True))
1013
1014 way_flags = ' '.join(config.way_flags(name)[way])
1015
1016 # We pass HC and HC_OPTS as environment variables, so that the
1017 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
1018 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
1019 ).format(flags=flags, way_flags=way_flags)
1020
1021 getTestOpts().stdin = script
1022 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1023
1024 # -----------------------------------------------------------------------------
1025 # Compile-only tests
1026
1027 def compile_override_default_flags(overrides):
1028 def apply(name, way, extra_opts):
1029 return do_compile(name, way, 0, '', [], extra_opts, overrides)
1030
1031 return apply
1032
1033 def compile_fail_override_default_flags(overrides):
1034 def apply(name, way, extra_opts):
1035 return do_compile(name, way, 1, '', [], extra_opts, overrides)
1036
1037 return apply
1038
1039 def compile_without_flag(flag):
1040 def apply(name, way, extra_opts):
1041 overrides = [f for f in getTestOpts().compiler_always_flags if f != flag]
1042 return compile_override_default_flags(overrides)(name, way, extra_opts)
1043
1044 return apply
1045
1046 def compile_fail_without_flag(flag):
1047 def apply(name, way, extra_opts):
1048 overrides = [f for f in getTestOpts.compiler_always_flags if f != flag]
1049 return compile_fail_override_default_flags(overrides)(name, way, extra_opts)
1050
1051 return apply
1052
1053 def compile( name, way, extra_hc_opts ):
1054 return do_compile( name, way, 0, '', [], extra_hc_opts )
1055
1056 def compile_fail( name, way, extra_hc_opts ):
1057 return do_compile( name, way, 1, '', [], extra_hc_opts )
1058
1059 def multimod_compile( name, way, top_mod, extra_hc_opts ):
1060 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
1061
1062 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
1063 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
1064
1065 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
1066 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
1067
1068 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
1069 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
1070
1071 def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts, override_flags = None ):
1072 # print 'Compile only, extra args = ', extra_hc_opts
1073
1074 result = extras_build( way, extra_mods, extra_hc_opts )
1075 if badResult(result):
1076 return result
1077 extra_hc_opts = result['hc_opts']
1078
1079 force = 0
1080 if extra_mods:
1081 force = 1
1082 result = simple_build( name, way, extra_hc_opts, should_fail, top_mod, 0, 1, force, override_flags )
1083
1084 if badResult(result):
1085 return result
1086
1087 # the actual stderr should always match the expected, regardless
1088 # of whether we expected the compilation to fail or not (successful
1089 # compilations may generate warnings).
1090
1091 expected_stderr_file = find_expected_file(name, 'stderr')
1092 actual_stderr_file = add_suffix(name, 'comp.stderr')
1093
1094 if not compare_outputs(way, 'stderr',
1095 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1096 normalise_errmsg),
1097 expected_stderr_file, actual_stderr_file,
1098 whitespace_normaliser=normalise_whitespace):
1099 return failBecause('stderr mismatch')
1100
1101 # no problems found, this test passed
1102 return passed()
1103
1104 def compile_cmp_asm( name, way, extra_hc_opts ):
1105 print('Compile only, extra args = ', extra_hc_opts)
1106 result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0)
1107
1108 if badResult(result):
1109 return result
1110
1111 # the actual stderr should always match the expected, regardless
1112 # of whether we expected the compilation to fail or not (successful
1113 # compilations may generate warnings).
1114
1115 expected_asm_file = find_expected_file(name, 'asm')
1116 actual_asm_file = add_suffix(name, 's')
1117
1118 if not compare_outputs(way, 'asm',
1119 join_normalisers(normalise_errmsg, normalise_asm),
1120 expected_asm_file, actual_asm_file):
1121 return failBecause('asm mismatch')
1122
1123 # no problems found, this test passed
1124 return passed()
1125
1126 # -----------------------------------------------------------------------------
1127 # Compile-and-run tests
1128
1129 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1130 # print 'Compile and run, extra args = ', extra_hc_opts
1131
1132 result = extras_build( way, extra_mods, extra_hc_opts )
1133 if badResult(result):
1134 return result
1135 extra_hc_opts = result['hc_opts']
1136
1137 if way.startswith('ghci'): # interpreted...
1138 return interpreter_run( name, way, extra_hc_opts, 0, top_mod )
1139 else: # compiled...
1140 force = 0
1141 if extra_mods:
1142 force = 1
1143
1144 result = simple_build( name, way, extra_hc_opts, 0, top_mod, 1, 1, force)
1145 if badResult(result):
1146 return result
1147
1148 cmd = './' + name;
1149
1150 # we don't check the compiler's stderr for a compile-and-run test
1151 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1152
1153 def compile_and_run( name, way, extra_hc_opts ):
1154 return compile_and_run__( name, way, '', [], extra_hc_opts)
1155
1156 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1157 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1158
1159 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1160 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1161
1162 def stats( name, way, stats_file ):
1163 opts = getTestOpts()
1164 return checkStats(name, way, stats_file, opts.stats_range_fields)
1165
1166 # -----------------------------------------------------------------------------
1167 # Check -t stats info
1168
1169 def checkStats(name, way, stats_file, range_fields):
1170 full_name = name + '(' + way + ')'
1171
1172 result = passed()
1173 if len(range_fields) > 0:
1174 try:
1175 f = open(in_testdir(stats_file))
1176 except IOError as e:
1177 return failBecause(str(e))
1178 contents = f.read()
1179 f.close()
1180
1181 for (field, (expected, dev)) in range_fields.items():
1182 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1183 if m == None:
1184 print('Failed to find field: ', field)
1185 result = failBecause('no such stats field')
1186 val = int(m.group(1))
1187
1188 lowerBound = trunc( expected * ((100 - float(dev))/100))
1189 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1190
1191 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1192
1193 if val < lowerBound:
1194 print(field, 'value is too low:')
1195 print('(If this is because you have improved GHC, please')
1196 print('update the test so that GHC doesn\'t regress again)')
1197 result = failBecause('stat too good', tag='stat')
1198 if val > upperBound:
1199 print(field, 'value is too high:')
1200 result = failBecause('stat not good enough', tag='stat')
1201
1202 if val < lowerBound or val > upperBound or config.verbose >= 4:
1203 valStr = str(val)
1204 valLen = len(valStr)
1205 expectedStr = str(expected)
1206 expectedLen = len(expectedStr)
1207 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1208
1209 def display(descr, val, extra):
1210 print(descr, str(val).rjust(length), extra)
1211
1212 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1213 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1214 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1215 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1216 if val != expected:
1217 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1218
1219 return result
1220
1221 # -----------------------------------------------------------------------------
1222 # Build a single-module program
1223
1224 def extras_build( way, extra_mods, extra_hc_opts ):
1225 for modopts in extra_mods:
1226 mod, opts = modopts
1227 result = simple_build( mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0, 0)
1228 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1229 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1230 if badResult(result):
1231 return result
1232
1233 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1234
1235
1236 def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, noforce, override_flags = None ):
1237 opts = getTestOpts()
1238 errname = add_suffix(name, 'comp.stderr')
1239
1240 if top_mod != '':
1241 srcname = top_mod
1242 base, suf = os.path.splitext(top_mod)
1243 elif addsuf:
1244 srcname = add_hs_lhs_suffix(name)
1245 else:
1246 srcname = name
1247
1248 to_do = ''
1249 if top_mod != '':
1250 to_do = '--make '
1251 if link:
1252 to_do = to_do + '-o ' + name
1253 elif link:
1254 to_do = '-o ' + name
1255 elif opts.compile_to_hc:
1256 to_do = '-C'
1257 else:
1258 to_do = '-c' # just compile
1259
1260 stats_file = name + '.comp.stats'
1261 if len(opts.compiler_stats_range_fields) > 0:
1262 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1263
1264 # Required by GHC 7.3+, harmless for earlier versions:
1265 if (getTestOpts().c_src or
1266 getTestOpts().objc_src or
1267 getTestOpts().objcpp_src or
1268 getTestOpts().cmm_src):
1269 extra_hc_opts += ' -no-hs-main '
1270
1271 if getTestOpts().compile_cmd_prefix == '':
1272 cmd_prefix = ''
1273 else:
1274 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1275
1276 flags = ' '.join(get_compiler_flags(override_flags, noforce) +
1277 config.way_flags(name)[way])
1278
1279 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1280 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts} '
1281 '> {errname} 2>&1'
1282 ).format(**locals())
1283
1284 result = runCmdFor(name, cmd, timeout_multiplier=opts.compile_timeout_multiplier)
1285
1286 if result != 0 and not should_fail:
1287 if config.verbose >= 1 and _expect_pass(way):
1288 print('Compile failed (status ' + repr(result) + ') errors were:')
1289 actual_stderr_path = in_testdir(name, 'comp.stderr')
1290 if_verbose_dump(1, actual_stderr_path)
1291
1292 # ToDo: if the sub-shell was killed by ^C, then exit
1293
1294 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1295
1296 if badResult(statsResult):
1297 return statsResult
1298
1299 if should_fail:
1300 if result == 0:
1301 return failBecause('exit code 0')
1302 else:
1303 if result != 0:
1304 return failBecause('exit code non-0')
1305
1306 return passed()
1307
1308 # -----------------------------------------------------------------------------
1309 # Run a program and check its output
1310 #
1311 # If testname.stdin exists, route input from that, else
1312 # from /dev/null. Route output to testname.run.stdout and
1313 # testname.run.stderr. Returns the exit code of the run.
1314
1315 def simple_run(name, way, prog, extra_run_opts):
1316 opts = getTestOpts()
1317
1318 # figure out what to use for stdin
1319 if opts.stdin != '':
1320 use_stdin = opts.stdin
1321 else:
1322 stdin_file = add_suffix(name, 'stdin')
1323 if os.path.exists(in_testdir(stdin_file)):
1324 use_stdin = stdin_file
1325 else:
1326 use_stdin = '/dev/null'
1327
1328 run_stdout = add_suffix(name,'run.stdout')
1329 run_stderr = add_suffix(name,'run.stderr')
1330
1331 my_rts_flags = rts_flags(way)
1332
1333 stats_file = name + '.stats'
1334 if len(opts.stats_range_fields) > 0:
1335 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1336 else:
1337 stats_args = ''
1338
1339 if opts.no_stdin:
1340 stdin_comes_from = ''
1341 else:
1342 stdin_comes_from = ' <' + use_stdin
1343
1344 if opts.combined_output:
1345 redirection = ' > {0} 2>&1'.format(run_stdout)
1346 redirection_append = ' >> {0} 2>&1'.format(run_stdout)
1347 else:
1348 redirection = ' > {0} 2> {1}'.format(run_stdout, run_stderr)
1349 redirection_append = ' >> {0} 2>> {1}'.format(run_stdout, run_stderr)
1350
1351 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1352 cmd = prog + stats_args + ' ' \
1353 + my_rts_flags + ' ' \
1354 + extra_run_opts + ' ' \
1355 + stdin_comes_from \
1356 + redirection
1357
1358 if opts.cmd_wrapper != None:
1359 cmd = opts.cmd_wrapper(cmd) + redirection_append
1360
1361 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1362
1363 # run the command
1364 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1365
1366 exit_code = result >> 8
1367 signal = result & 0xff
1368
1369 # check the exit code
1370 if exit_code != opts.exit_code:
1371 if config.verbose >= 1 and _expect_pass(way):
1372 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1373 dump_stdout(name)
1374 dump_stderr(name)
1375 return failBecause('bad exit code')
1376
1377 check_hp = my_rts_flags.find("-h") != -1
1378 check_prof = my_rts_flags.find("-p") != -1
1379
1380 if not opts.ignore_output:
1381 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1382 bad_stdout = not check_stdout_ok(name, way)
1383 if bad_stderr:
1384 return failBecause('bad stderr')
1385 if bad_stdout:
1386 return failBecause('bad stdout')
1387 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1388 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1389 return failBecause('bad heap profile')
1390 if check_prof and not check_prof_ok(name, way):
1391 return failBecause('bad profile')
1392
1393 return checkStats(name, way, stats_file, opts.stats_range_fields)
1394
1395 def rts_flags(way):
1396 if (way == ''):
1397 return ''
1398 else:
1399 args = config.way_rts_flags[way]
1400
1401 if args == []:
1402 return ''
1403 else:
1404 return '+RTS ' + ' '.join(args) + ' -RTS'
1405
1406 # -----------------------------------------------------------------------------
1407 # Run a program in the interpreter and check its output
1408
1409 def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ):
1410 opts = getTestOpts()
1411
1412 outname = add_suffix(name, 'interp.stdout')
1413 errname = add_suffix(name, 'interp.stderr')
1414
1415 if (top_mod == ''):
1416 srcname = add_hs_lhs_suffix(name)
1417 else:
1418 srcname = top_mod
1419
1420 scriptname = add_suffix(name, 'genscript')
1421 qscriptname = in_testdir(scriptname)
1422
1423 delimiter = '===== program output begins here\n'
1424
1425 script = open(qscriptname, 'w')
1426 if not compile_only:
1427 # set the prog name and command-line args to match the compiled
1428 # environment.
1429 script.write(':set prog ' + name + '\n')
1430 script.write(':set args ' + getTestOpts().extra_run_opts + '\n')
1431 # Add marker lines to the stdout and stderr output files, so we
1432 # can separate GHCi's output from the program's.
1433 script.write(':! echo ' + delimiter)
1434 script.write(':! echo 1>&2 ' + delimiter)
1435 # Set stdout to be line-buffered to match the compiled environment.
1436 script.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1437 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1438 # in the event of an exception as for the compiled program.
1439 script.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1440 script.close()
1441
1442 # figure out what to use for stdin
1443 if getTestOpts().stdin != '':
1444 stdin_file = in_testdir(opts.stdin)
1445 else:
1446 stdin_file = in_testdir(name, 'stdin')
1447
1448 if os.path.exists(stdin_file):
1449 os.system('cat "{0}" >> "{1}"'.format(stdin_file, qscriptname))
1450
1451 flags = ' '.join(get_compiler_flags(override_flags=None, noforce=False) +
1452 config.way_flags(name)[way])
1453
1454 if getTestOpts().combined_output:
1455 redirection = ' > {0} 2>&1'.format(outname)
1456 redirection_append = ' >> {0} 2>&1'.format(outname)
1457 else:
1458 redirection = ' > {0} 2> {1}'.format(outname, errname)
1459 redirection_append = ' >> {0} 2>> {1}'.format(outname, errname)
1460
1461 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts} '
1462 '< {scriptname} {redirection}'
1463 ).format(**locals())
1464
1465 if getTestOpts().cmd_wrapper != None:
1466 cmd = getTestOpts().cmd_wrapper(cmd) + redirection_append;
1467
1468 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1469
1470 result = runCmdFor(name, cmd, timeout_multiplier=opts.run_timeout_multiplier)
1471
1472 exit_code = result >> 8
1473 signal = result & 0xff
1474
1475 # split the stdout into compilation/program output
1476 split_file(in_testdir(outname), delimiter,
1477 in_testdir(name, 'comp.stdout'),
1478 in_testdir(name, 'run.stdout'))
1479 split_file(in_testdir(errname), delimiter,
1480 in_testdir(name, 'comp.stderr'),
1481 in_testdir(name, 'run.stderr'))
1482
1483 # check the exit code
1484 if exit_code != getTestOpts().exit_code:
1485 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1486 dump_stdout(name)
1487 dump_stderr(name)
1488 return failBecause('bad exit code')
1489
1490 # ToDo: if the sub-shell was killed by ^C, then exit
1491
1492 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1493 check_stdout_ok(name, way)):
1494 return passed()
1495 else:
1496 return failBecause('bad stdout or stderr')
1497
1498
1499 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1500 # See Note [Universal newlines].
1501 infile = io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None)
1502 out1 = io.open(out1_fn, 'w', encoding='utf8', newline='')
1503 out2 = io.open(out2_fn, 'w', encoding='utf8', newline='')
1504
1505 line = infile.readline()
1506 while (re.sub('^\s*','',line) != delimiter and line != ''):
1507 out1.write(line)
1508 line = infile.readline()
1509 out1.close()
1510
1511 line = infile.readline()
1512 while (line != ''):
1513 out2.write(line)
1514 line = infile.readline()
1515 out2.close()
1516
1517 # -----------------------------------------------------------------------------
1518 # Utils
1519 def get_compiler_flags(override_flags, noforce):
1520 opts = getTestOpts()
1521
1522 if override_flags is not None:
1523 flags = copy.copy(override_flags)
1524 else:
1525 flags = copy.copy(opts.compiler_always_flags)
1526
1527 if noforce:
1528 flags = [f for f in flags if f != '-fforce-recomp']
1529
1530 flags.append(opts.extra_hc_opts)
1531
1532 if opts.outputdir != None:
1533 flags.extend(["-outputdir", opts.outputdir])
1534
1535 return flags
1536
1537 def check_stdout_ok(name, way):
1538 actual_stdout_file = add_suffix(name, 'run.stdout')
1539 expected_stdout_file = find_expected_file(name, 'stdout')
1540
1541 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1542
1543 check_stdout = getTestOpts().check_stdout
1544 if check_stdout:
1545 actual_stdout_path = in_testdir(actual_stdout_file)
1546 return check_stdout(actual_stdout_path, extra_norm)
1547
1548 return compare_outputs(way, 'stdout', extra_norm,
1549 expected_stdout_file, actual_stdout_file)
1550
1551 def dump_stdout( name ):
1552 print('Stdout:')
1553 print(read_no_crs(in_testdir(name, 'run.stdout')))
1554
1555 def check_stderr_ok(name, way):
1556 actual_stderr_file = add_suffix(name, 'run.stderr')
1557 expected_stderr_file = find_expected_file(name, 'stderr')
1558
1559 return compare_outputs(way, 'stderr',
1560 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1561 expected_stderr_file, actual_stderr_file,
1562 whitespace_normaliser=normalise_whitespace)
1563
1564 def dump_stderr( name ):
1565 print("Stderr:")
1566 print(read_no_crs(in_testdir(name, 'run.stderr')))
1567
1568 def read_no_crs(file):
1569 str = ''
1570 try:
1571 # See Note [Universal newlines].
1572 h = io.open(file, 'r', encoding='utf8', errors='replace', newline=None)
1573 str = h.read()
1574 h.close
1575 except:
1576 # On Windows, if the program fails very early, it seems the
1577 # files stdout/stderr are redirected to may not get created
1578 pass
1579 return str
1580
1581 def write_file(file, str):
1582 # See Note [Universal newlines].
1583 h = io.open(file, 'w', encoding='utf8', newline='')
1584 h.write(str)
1585 h.close
1586
1587 # Note [Universal newlines]
1588 #
1589 # We don't want to write any Windows style line endings ever, because
1590 # it would mean that `make accept` would touch every line of the file
1591 # when switching between Linux and Windows.
1592 #
1593 # Furthermore, when reading a file, it is convenient to translate all
1594 # Windows style endings to '\n', as it simplifies searching or massaging
1595 # the content.
1596 #
1597 # Solution: use `io.open` instead of `open`
1598 # * when reading: use newline=None to translate '\r\n' to '\n'
1599 # * when writing: use newline='' to not translate '\n' to '\r\n'
1600 #
1601 # See https://docs.python.org/2/library/io.html#io.open.
1602 #
1603 # This should work with both python2 and python3, and with both mingw*
1604 # as msys2 style Python.
1605 #
1606 # Do note that io.open returns unicode strings. So we have to specify
1607 # the expected encoding. But there is at least one file which is not
1608 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1609 # Another solution would be to open files in binary mode always, and
1610 # operate on bytes.
1611
1612 def check_hp_ok(name):
1613 opts = getTestOpts()
1614
1615 # do not qualify for hp2ps because we should be in the right directory
1616 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1617
1618 hp2psResult = runCmdExitCode(hp2psCmd)
1619
1620 actual_ps_path = in_testdir(name, 'ps')
1621
1622 if(hp2psResult == 0):
1623 if (os.path.exists(actual_ps_path)):
1624 if gs_working:
1625 gsResult = runCmdExitCode(genGSCmd(actual_ps_path))
1626 if (gsResult == 0):
1627 return (True)
1628 else:
1629 print("hp2ps output for " + name + "is not valid PostScript")
1630 else: return (True) # assume postscript is valid without ghostscript
1631 else:
1632 print("hp2ps did not generate PostScript for " + name)
1633 return (False)
1634 else:
1635 print("hp2ps error when processing heap profile for " + name)
1636 return(False)
1637
1638 def check_prof_ok(name, way):
1639 expected_prof_file = find_expected_file(name, 'prof.sample')
1640 expected_prof_path = in_testdir(expected_prof_file)
1641
1642 # Check actual prof file only if we have an expected prof file to
1643 # compare it with.
1644 if not os.path.exists(expected_prof_path):
1645 return True
1646
1647 actual_prof_file = add_suffix(name, 'prof')
1648 actual_prof_path = in_testdir(actual_prof_file)
1649
1650 if not os.path.exists(actual_prof_path):
1651 print(actual_prof_path + " does not exist")
1652 return(False)
1653
1654 if os.path.getsize(actual_prof_path) == 0:
1655 print(actual_prof_path + " is empty")
1656 return(False)
1657
1658 return compare_outputs(way, 'prof', normalise_prof,
1659 expected_prof_file, actual_prof_file,
1660 whitespace_normaliser=normalise_whitespace)
1661
1662 # Compare expected output to actual output, and optionally accept the
1663 # new output. Returns true if output matched or was accepted, false
1664 # otherwise. See Note [Output comparison] for the meaning of the
1665 # normaliser and whitespace_normaliser parameters.
1666 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1667 whitespace_normaliser=lambda x:x):
1668
1669 expected_path = in_srcdir(expected_file)
1670 actual_path = in_testdir(actual_file)
1671
1672 if os.path.exists(expected_path):
1673 expected_str = normaliser(read_no_crs(expected_path))
1674 # Create the .normalised file in the testdir, not in the srcdir.
1675 expected_normalised_file = add_suffix(expected_file, 'normalised')
1676 expected_normalised_path = in_testdir(expected_normalised_file)
1677 else:
1678 expected_str = ''
1679 expected_normalised_path = '/dev/null'
1680
1681 actual_raw = read_no_crs(actual_path)
1682 actual_str = normaliser(actual_raw)
1683
1684 # See Note [Output comparison].
1685 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1686 return 1
1687 else:
1688 if config.verbose >= 1 and _expect_pass(way):
1689 print('Actual ' + kind + ' output differs from expected:')
1690
1691 if expected_normalised_path != '/dev/null':
1692 write_file(expected_normalised_path, expected_str)
1693
1694 actual_normalised_path = add_suffix(actual_path, 'normalised')
1695 write_file(actual_normalised_path, actual_str)
1696
1697 if config.verbose >= 1 and _expect_pass(way):
1698 # See Note [Output comparison].
1699 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1700 actual_normalised_path))
1701
1702 # If for some reason there were no non-whitespace differences,
1703 # then do a full diff
1704 if r == 0:
1705 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1706 actual_normalised_path))
1707
1708 if config.accept and (getTestOpts().expect == 'fail' or
1709 way in getTestOpts().expect_fail_for):
1710 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1711 return 0
1712 elif config.accept and actual_raw:
1713 if_verbose(1, 'Accepting new output.')
1714 write_file(expected_path, actual_raw)
1715 return 1
1716 elif config.accept:
1717 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1718 os.remove(expected_path)
1719 return 1
1720 else:
1721 return 0
1722
1723 # Note [Output comparison]
1724 #
1725 # We do two types of output comparison:
1726 #
1727 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1728 # optional `whitespace_normaliser` to the expected and the actual
1729 # output, before comparing the two.
1730 #
1731 # 2. To show as a diff to the user when the test indeed failed. We apply
1732 # the same `normaliser` function to the outputs, to make the diff as
1733 # small as possible (only showing the actual problem). But we don't
1734 # apply the `whitespace_normaliser` here, because it might completely
1735 # squash all whitespace, making the diff unreadable. Instead we rely
1736 # on the `diff` program to ignore whitespace changes as much as
1737 # possible (#10152).
1738
1739 def normalise_whitespace( str ):
1740 # Merge contiguous whitespace characters into a single space.
1741 return u' '.join(w for w in str.split())
1742
1743 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1744
1745 def normalise_callstacks(s):
1746 opts = getTestOpts()
1747 def repl(matches):
1748 location = matches.group(1)
1749 location = normalise_slashes_(location)
1750 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1751 # Ignore line number differences in call stacks (#10834).
1752 s = re.sub(callSite_re, repl, s)
1753 # Ignore the change in how we identify implicit call-stacks
1754 s = s.replace('from ImplicitParams', 'from HasCallStack')
1755 if not opts.keep_prof_callstacks:
1756 # Don't output prof callstacks. Test output should be
1757 # independent from the WAY we run the test.
1758 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1759 return s
1760
1761 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1762
1763 def normalise_type_reps(str):
1764 """ Normalise out fingerprints from Typeable TyCon representations """
1765 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1766
1767 def normalise_errmsg( str ):
1768 """Normalise error-messages emitted via stderr"""
1769 # IBM AIX's `ld` is a bit chatty
1770 if opsys('aix'):
1771 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1772 # remove " error:" and lower-case " Warning:" to make patch for
1773 # trac issue #10021 smaller
1774 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1775 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1776 str = normalise_callstacks(str)
1777 str = normalise_type_reps(str)
1778
1779 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1780 # the colon is there because it appears in error messages; this
1781 # hacky solution is used in place of more sophisticated filename
1782 # mangling
1783 str = re.sub('([^\\s])\\.exe', '\\1', str)
1784 # normalise slashes, minimise Windows/Unix filename differences
1785 str = re.sub('\\\\', '/', str)
1786 # The inplace ghc's are called ghc-stage[123] to avoid filename
1787 # collisions, so we need to normalise that to just "ghc"
1788 str = re.sub('ghc-stage[123]', 'ghc', str)
1789 # Error messages simetimes contain integer implementation package
1790 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1791 # Also filter out bullet characters. This is because bullets are used to
1792 # separate error sections, and tests shouldn't be sensitive to how the
1793 # the division happens.
1794 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1795 str = str.replace(bullet, '')
1796 return str
1797
1798 # normalise a .prof file, so that we can reasonably compare it against
1799 # a sample. This doesn't compare any of the actual profiling data,
1800 # only the shape of the profile and the number of entries.
1801 def normalise_prof (str):
1802 # strip everything up to the line beginning "COST CENTRE"
1803 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1804
1805 # strip results for CAFs, these tend to change unpredictably
1806 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1807
1808 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1809 # sometimes under MAIN.
1810 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1811
1812 # We have somthing like this:
1813 #
1814 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1815 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1816 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1817 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1818 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1819 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1820 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1821 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1822 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1823 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1824 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1825 #
1826 # then we remove all the specific profiling data, leaving only the cost
1827 # centre name, module, src, and entries, to end up with this: (modulo
1828 # whitespace between columns)
1829 #
1830 # MAIN MAIN <built-in> 0
1831 # readPrec Main Main_1.hs:7:13-16 1
1832 # readPrec Main Main_1.hs:4:13-16 1
1833 # == Main Main_1.hs:7:25-26 1
1834 # == Main Main_1.hs:4:25-26 1
1835 # showsPrec Main Main_1.hs:7:19-22 2
1836 # showsPrec Main Main_1.hs:4:19-22 2
1837 # readPrec Main Main_1.hs:7:13-16 0
1838 # readPrec Main Main_1.hs:4:13-16 0
1839
1840 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1841 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1842 # this works fine.
1843 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1844 '\\1 \\2 \\3 \\5\n', str)
1845 return str
1846
1847 def normalise_slashes_( str ):
1848 str = re.sub('\\\\', '/', str)
1849 return str
1850
1851 def normalise_exe_( str ):
1852 str = re.sub('\.exe', '', str)
1853 return str
1854
1855 def normalise_output( str ):
1856 # remove " error:" and lower-case " Warning:" to make patch for
1857 # trac issue #10021 smaller
1858 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1859 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1860 # Remove a .exe extension (for Windows)
1861 # This can occur in error messages generated by the program.
1862 str = re.sub('([^\\s])\\.exe', '\\1', str)
1863 str = normalise_callstacks(str)
1864 str = normalise_type_reps(str)
1865 return str
1866
1867 def normalise_asm( str ):
1868 lines = str.split('\n')
1869 # Only keep instructions and labels not starting with a dot.
1870 metadata = re.compile('^[ \t]*\\..*$')
1871 out = []
1872 for line in lines:
1873 # Drop metadata directives (e.g. ".type")
1874 if not metadata.match(line):
1875 line = re.sub('@plt', '', line)
1876 instr = line.lstrip().split()
1877 # Drop empty lines.
1878 if not instr:
1879 continue
1880 # Drop operands, except for call instructions.
1881 elif instr[0] == 'call':
1882 out.append(instr[0] + ' ' + instr[1])
1883 else:
1884 out.append(instr[0])
1885 out = u'\n'.join(out)
1886 return out
1887
1888 def if_verbose( n, s ):
1889 if config.verbose >= n:
1890 print(s)
1891
1892 def if_verbose_dump( n, f ):
1893 if config.verbose >= n:
1894 try:
1895 print(open(f).read())
1896 except:
1897 print('')
1898
1899 def rawSystem(cmd_and_args):
1900 # We prefer subprocess.call to os.spawnv as the latter
1901 # seems to send its arguments through a shell or something
1902 # with the Windows (non-cygwin) python. An argument "a b c"
1903 # turns into three arguments ["a", "b", "c"].
1904
1905 cmd = cmd_and_args[0]
1906 return subprocess.call([strip_quotes(cmd)] + cmd_and_args[1:])
1907
1908 # Note that this doesn't handle the timeout itself; it is just used for
1909 # commands that have timeout handling built-in.
1910 def rawSystemWithTimeout(cmd_and_args):
1911 r = rawSystem(cmd_and_args)
1912 if r == 98:
1913 # The python timeout program uses 98 to signal that ^C was pressed
1914 stopNow()
1915 if r == 99 and getTestOpts().exit_code != 99:
1916 # Only print a message when timeout killed the process unexpectedly.
1917 cmd = cmd_and_args[-1]
1918 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1919 return r
1920
1921 # cmd is a complex command in Bourne-shell syntax
1922 # e.g (cd . && 'c:/users/simonpj/darcs/HEAD/compiler/stage1/ghc-inplace' ...etc)
1923 # Hence it must ultimately be run by a Bourne shell
1924 #
1925 # Mostly it invokes the command wrapped in 'timeout' thus
1926 # timeout 300 'cd . && ...blah blah'
1927 # so it's timeout's job to invoke the Bourne shell
1928 #
1929 # But watch out for the case when there is no timeout program!
1930 # Then, when using the native Python, os.system will invoke the cmd shell
1931
1932 def runCmd( cmd ):
1933 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1934 cmd = cmd.format(**config.__dict__)
1935
1936 if_verbose( 3, cmd )
1937 r = 0
1938 if config.os == 'mingw32':
1939 # On MinGW, we will always have timeout
1940 assert config.timeout_prog!=''
1941
1942 if config.timeout_prog != '':
1943 r = rawSystemWithTimeout([config.timeout_prog, str(config.timeout), cmd])
1944 else:
1945 r = os.system(cmd)
1946 return r << 8
1947
1948 def runCmdFor( name, cmd, timeout_multiplier=1.0 ):
1949 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1950 cmd = cmd.format(**config.__dict__)
1951
1952 if_verbose( 3, cmd )
1953 r = 0
1954 if config.os == 'mingw32':
1955 # On MinGW, we will always have timeout
1956 assert config.timeout_prog!=''
1957 timeout = int(ceil(config.timeout * timeout_multiplier))
1958
1959 if config.timeout_prog != '':
1960 r = rawSystemWithTimeout([config.timeout_prog, str(timeout), cmd])
1961 else:
1962 r = os.system(cmd)
1963 return r << 8
1964
1965 def runCmdExitCode( cmd ):
1966 return (runCmd(cmd) >> 8);
1967
1968 # -----------------------------------------------------------------------------
1969 # checking if ghostscript is available for checking the output of hp2ps
1970
1971 def genGSCmd(psfile):
1972 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1973
1974 def gsNotWorking():
1975 global gs_working
1976 print("GhostScript not available for hp2ps tests")
1977
1978 global gs_working
1979 gs_working = 0
1980 if config.have_profiling:
1981 if config.gs != '':
1982 resultGood = runCmdExitCode(genGSCmd(config.confdir + '/good.ps'));
1983 if resultGood == 0:
1984 resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps') +
1985 ' >/dev/null 2>&1')
1986 if resultBad != 0:
1987 print("GhostScript available for hp2ps tests")
1988 gs_working = 1;
1989 else:
1990 gsNotWorking();
1991 else:
1992 gsNotWorking();
1993 else:
1994 gsNotWorking();
1995
1996 def add_suffix( name, suffix ):
1997 if suffix == '':
1998 return name
1999 else:
2000 return name + '.' + suffix
2001
2002 def add_hs_lhs_suffix(name):
2003 if getTestOpts().c_src:
2004 return add_suffix(name, 'c')
2005 elif getTestOpts().cmm_src:
2006 return add_suffix(name, 'cmm')
2007 elif getTestOpts().objc_src:
2008 return add_suffix(name, 'm')
2009 elif getTestOpts().objcpp_src:
2010 return add_suffix(name, 'mm')
2011 elif getTestOpts().literate:
2012 return add_suffix(name, 'lhs')
2013 else:
2014 return add_suffix(name, 'hs')
2015
2016 def replace_suffix( name, suffix ):
2017 base, suf = os.path.splitext(name)
2018 return base + '.' + suffix
2019
2020 def in_testdir(name, suffix=''):
2021 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
2022
2023 def in_srcdir(name, suffix=''):
2024 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
2025
2026 # Finding the sample output. The filename is of the form
2027 #
2028 # <test>.stdout[-ws-<wordsize>][-<platform>]
2029 #
2030 def find_expected_file(name, suff):
2031 basename = add_suffix(name, suff)
2032
2033 files = [basename + ws + plat
2034 for plat in ['-' + config.platform, '-' + config.os, '']
2035 for ws in ['-ws-' + config.wordsize, '']]
2036
2037 for f in files:
2038 if os.path.exists(in_srcdir(f)):
2039 return f
2040
2041 return basename
2042
2043 def cleanup():
2044 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
2045
2046
2047 # -----------------------------------------------------------------------------
2048 # Return a list of all the files ending in '.T' below directories roots.
2049
2050 def findTFiles(roots):
2051 # It would be better to use os.walk, but that
2052 # gives backslashes on Windows, which trip the
2053 # testsuite later :-(
2054 return [filename for root in roots for filename in findTFiles_(root)]
2055
2056 def findTFiles_(path):
2057 if os.path.isdir(path):
2058 paths = [os.path.join(path, x) for x in os.listdir(path)]
2059 return findTFiles(paths)
2060 elif path[-2:] == '.T':
2061 return [path]
2062 else:
2063 return []
2064
2065 # -----------------------------------------------------------------------------
2066 # Output a test summary to the specified file object
2067
2068 def summary(t, file, short=False):
2069
2070 file.write('\n')
2071 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
2072
2073 if short:
2074 # Only print the list of unexpected tests above.
2075 return
2076
2077 file.write('SUMMARY for test run started at '
2078 + time.strftime("%c %Z", t.start_time) + '\n'
2079 + str(datetime.timedelta(seconds=
2080 round(time.time() - time.mktime(t.start_time)))).rjust(8)
2081 + ' spent to go through\n'
2082 + repr(t.total_tests).rjust(8)
2083 + ' total tests, which gave rise to\n'
2084 + repr(t.total_test_cases).rjust(8)
2085 + ' test cases, of which\n'
2086 + repr(t.n_tests_skipped).rjust(8)
2087 + ' were skipped\n'
2088 + '\n'
2089 + repr(t.n_missing_libs).rjust(8)
2090 + ' had missing libraries\n'
2091 + repr(t.n_expected_passes).rjust(8)
2092 + ' expected passes\n'
2093 + repr(t.n_expected_failures).rjust(8)
2094 + ' expected failures\n'
2095 + '\n'
2096 + repr(t.n_framework_failures).rjust(8)
2097 + ' caused framework failures\n'
2098 + repr(t.n_unexpected_passes).rjust(8)
2099 + ' unexpected passes\n'
2100 + repr(t.n_unexpected_failures).rjust(8)
2101 + ' unexpected failures\n'
2102 + repr(t.n_unexpected_stat_failures).rjust(8)
2103 + ' unexpected stat failures\n'
2104 + '\n')
2105
2106 if t.n_unexpected_passes > 0:
2107 file.write('Unexpected passes:\n')
2108 printPassingTestInfosSummary(file, t.unexpected_passes)
2109
2110 if t.n_unexpected_failures > 0:
2111 file.write('Unexpected failures:\n')
2112 printFailingTestInfosSummary(file, t.unexpected_failures)
2113
2114 if t.n_unexpected_stat_failures > 0:
2115 file.write('Unexpected stat failures:\n')
2116 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
2117
2118 if t.n_framework_failures > 0:
2119 file.write('Test framework failures:\n')
2120 printFrameworkFailureSummary(file, t.framework_failures)
2121
2122 if stopping():
2123 file.write('WARNING: Testsuite run was terminated early\n')
2124
2125 def printUnexpectedTests(file, testInfoss):
2126 unexpected = []
2127 for testInfos in testInfoss:
2128 directories = testInfos.keys()
2129 for directory in directories:
2130 tests = list(testInfos[directory].keys())
2131 unexpected += tests
2132 if unexpected != []:
2133 file.write('Unexpected results from:\n')
2134 file.write('TEST="' + ' '.join(unexpected) + '"\n')
2135 file.write('\n')
2136
2137 def printPassingTestInfosSummary(file, testInfos):
2138 directories = list(testInfos.keys())
2139 directories.sort()
2140 maxDirLen = max(len(x) for x in directories)
2141 for directory in directories:
2142 tests = list(testInfos[directory].keys())
2143 tests.sort()
2144 for test in tests:
2145 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2146 ' (' + ','.join(testInfos[directory][test]) + ')\n')
2147 file.write('\n')
2148
2149 def printFailingTestInfosSummary(file, testInfos):
2150 directories = list(testInfos.keys())
2151 directories.sort()
2152 maxDirLen = max(len(d) for d in directories)
2153 for directory in directories:
2154 tests = list(testInfos[directory].keys())
2155 tests.sort()
2156 for test in tests:
2157 reasons = testInfos[directory][test].keys()
2158 for reason in reasons:
2159 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
2160 ' [' + reason + ']' + \
2161 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
2162 file.write('\n')
2163
2164 def printFrameworkFailureSummary(file, testInfos):
2165 names = list(testInfos.keys())
2166 names.sort()
2167 maxNameLen = max(len(n) for n in names)
2168 for name in names:
2169 ways = testInfos[name]
2170 file.write(' ' + name.ljust(maxNameLen + 2) + \
2171 ' (' + ','.join(ways) + ')\n')
2172 file.write('\n')
2173
2174 def modify_lines(s, f):
2175 s = u'\n'.join([f(l) for l in s.splitlines()])
2176 if s and s[-1] != '\n':
2177 # Prevent '\ No newline at end of file' warnings when diffing.
2178 s += '\n'
2179 return s