Testsuite: cleanup printing of summary
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import io
9 import shutil
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 from math import ceil, trunc
20 import collections
21 import subprocess
22
23 from testglobals import *
24 from testutil import *
25 from extra_files import extra_src_files
26
27 try:
28 basestring
29 except: # Python 3
30 basestring = (str,bytes)
31
32 if config.use_threads:
33 import threading
34 try:
35 import thread
36 except ImportError: # Python 3
37 import _thread as thread
38
39 global wantToStop
40 wantToStop = False
41 def stopNow():
42 global wantToStop
43 wantToStop = True
44 def stopping():
45 return wantToStop
46
47 # Options valid for the current test only (these get reset to
48 # testdir_testopts after each test).
49
50 global testopts_local
51 if config.use_threads:
52 testopts_local = threading.local()
53 else:
54 class TestOpts_Local:
55 pass
56 testopts_local = TestOpts_Local()
57
58 def getTestOpts():
59 return testopts_local.x
60
61 def setLocalTestOpts(opts):
62 global testopts_local
63 testopts_local.x=opts
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return bool(opts.compiler_stats_range_fields or opts.stats_range_fields)
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86
87 def normal( name, opts ):
88 return;
89
90 def skip( name, opts ):
91 opts.skip = 1
92
93 def expect_fail( name, opts ):
94 # The compiler, testdriver, OS or platform is missing a certain
95 # feature, and we don't plan to or can't fix it now or in the
96 # future.
97 opts.expect = 'fail';
98
99 def reqlib( lib ):
100 return lambda name, opts, l=lib: _reqlib (name, opts, l )
101
102 def stage1(name, opts):
103 # See Note [Why is there no stage1 setup function?]
104 framework_fail(name, 'stage1 setup function does not exist',
105 'add your test to testsuite/tests/stage1 instead')
106
107 # Note [Why is there no stage1 setup function?]
108 #
109 # Presumably a stage1 setup function would signal that the stage1
110 # compiler should be used to compile a test.
111 #
112 # Trouble is, the path to the compiler + the `ghc --info` settings for
113 # that compiler are currently passed in from the `make` part of the
114 # testsuite driver.
115 #
116 # Switching compilers in the Python part would be entirely too late, as
117 # all ghc_with_* settings would be wrong. See config/ghc for possible
118 # consequences (for example, config.run_ways would still be
119 # based on the default compiler, quite likely causing ./validate --slow
120 # to fail).
121 #
122 # It would be possible to let the Python part of the testsuite driver
123 # make the call to `ghc --info`, but doing so would require quite some
124 # work. Care has to be taken to not affect the run_command tests for
125 # example, as they also use the `ghc --info` settings:
126 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
127 #
128 # If you want a test to run using the stage1 compiler, add it to the
129 # testsuite/tests/stage1 directory. Validate runs the tests in that
130 # directory with `make stage=1`.
131
132 # Cache the results of looking to see if we have a library or not.
133 # This makes quite a difference, especially on Windows.
134 have_lib = {}
135
136 def _reqlib( name, opts, lib ):
137 if lib in have_lib:
138 got_it = have_lib[lib]
139 else:
140 cmd = strip_quotes(config.ghc_pkg)
141 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
142 stdout=subprocess.PIPE,
143 stderr=subprocess.PIPE)
144 # read from stdout and stderr to avoid blocking due to
145 # buffers filling
146 p.communicate()
147 r = p.wait()
148 got_it = r == 0
149 have_lib[lib] = got_it
150
151 if not got_it:
152 opts.expect = 'missing-lib'
153
154 def req_haddock( name, opts ):
155 if not config.haddock:
156 opts.expect = 'missing-lib'
157
158 def req_profiling( name, opts ):
159 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
160 if not config.have_profiling:
161 opts.expect = 'fail'
162
163 def req_shared_libs( name, opts ):
164 if not config.have_shared_libs:
165 opts.expect = 'fail'
166
167 def req_interp( name, opts ):
168 if not config.have_interp:
169 opts.expect = 'fail'
170
171 def req_smp( name, opts ):
172 if not config.have_smp:
173 opts.expect = 'fail'
174
175 def ignore_output( name, opts ):
176 opts.ignore_output = 1
177
178 def combined_output( name, opts ):
179 opts.combined_output = True
180
181 # -----
182
183 def expect_fail_for( ways ):
184 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
185
186 def _expect_fail_for( name, opts, ways ):
187 opts.expect_fail_for = ways
188
189 def expect_broken( bug ):
190 # This test is a expected not to work due to the indicated trac bug
191 # number.
192 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
193
194 def _expect_broken( name, opts, bug ):
195 record_broken(name, opts, bug)
196 opts.expect = 'fail';
197
198 def expect_broken_for( bug, ways ):
199 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
200
201 def _expect_broken_for( name, opts, bug, ways ):
202 record_broken(name, opts, bug)
203 opts.expect_fail_for = ways
204
205 def record_broken(name, opts, bug):
206 global brokens
207 me = (bug, opts.testdir, name)
208 if not me in brokens:
209 brokens.append(me)
210
211 def _expect_pass(way):
212 # Helper function. Not intended for use in .T files.
213 opts = getTestOpts()
214 return opts.expect == 'pass' and way not in opts.expect_fail_for
215
216 # -----
217
218 def omit_ways( ways ):
219 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
220
221 def _omit_ways( name, opts, ways ):
222 opts.omit_ways = ways
223
224 # -----
225
226 def only_ways( ways ):
227 return lambda name, opts, w=ways: _only_ways( name, opts, w )
228
229 def _only_ways( name, opts, ways ):
230 opts.only_ways = ways
231
232 # -----
233
234 def extra_ways( ways ):
235 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
236
237 def _extra_ways( name, opts, ways ):
238 opts.extra_ways = ways
239
240 # -----
241
242 def set_stdin( file ):
243 return lambda name, opts, f=file: _set_stdin(name, opts, f);
244
245 def _set_stdin( name, opts, f ):
246 opts.stdin = f
247
248 # -----
249
250 def exit_code( val ):
251 return lambda name, opts, v=val: _exit_code(name, opts, v);
252
253 def _exit_code( name, opts, v ):
254 opts.exit_code = v
255
256 def signal_exit_code( val ):
257 if opsys('solaris2'):
258 return exit_code( val );
259 else:
260 # When application running on Linux receives fatal error
261 # signal, then its exit code is encoded as 128 + signal
262 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
263 # I assume that Mac OS X behaves in the same way at least Mac
264 # OS X builder behavior suggests this.
265 return exit_code( val+128 );
266
267 # -----
268
269 def compile_timeout_multiplier( val ):
270 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
271
272 def _compile_timeout_multiplier( name, opts, v ):
273 opts.compile_timeout_multiplier = v
274
275 def run_timeout_multiplier( val ):
276 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
277
278 def _run_timeout_multiplier( name, opts, v ):
279 opts.run_timeout_multiplier = v
280
281 # -----
282
283 def extra_run_opts( val ):
284 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
285
286 def _extra_run_opts( name, opts, v ):
287 opts.extra_run_opts = v
288
289 # -----
290
291 def extra_hc_opts( val ):
292 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
293
294 def _extra_hc_opts( name, opts, v ):
295 opts.extra_hc_opts = v
296
297 # -----
298
299 def extra_clean( files ):
300 # TODO. Remove all calls to extra_clean.
301 return lambda _name, _opts: None
302
303 def extra_files(files):
304 return lambda name, opts: _extra_files(name, opts, files)
305
306 def _extra_files(name, opts, files):
307 opts.extra_files.extend(files)
308
309 # -----
310
311 def stats_num_field( field, expecteds ):
312 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
313
314 def _stats_num_field( name, opts, field, expecteds ):
315 if field in opts.stats_range_fields:
316 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
317
318 if type(expecteds) is list:
319 for (b, expected, dev) in expecteds:
320 if b:
321 opts.stats_range_fields[field] = (expected, dev)
322 return
323 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
324
325 else:
326 (expected, dev) = expecteds
327 opts.stats_range_fields[field] = (expected, dev)
328
329 def compiler_stats_num_field( field, expecteds ):
330 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
331
332 def _compiler_stats_num_field( name, opts, field, expecteds ):
333 if field in opts.compiler_stats_range_fields:
334 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
335
336 # Compiler performance numbers change when debugging is on, making the results
337 # useless and confusing. Therefore, skip if debugging is on.
338 if compiler_debugged():
339 skip(name, opts)
340
341 for (b, expected, dev) in expecteds:
342 if b:
343 opts.compiler_stats_range_fields[field] = (expected, dev)
344 return
345
346 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
347
348 # -----
349
350 def when(b, f):
351 # When list_brokens is on, we want to see all expect_broken calls,
352 # so we always do f
353 if b or config.list_broken:
354 return f
355 else:
356 return normal
357
358 def unless(b, f):
359 return when(not b, f)
360
361 def doing_ghci():
362 return 'ghci' in config.run_ways
363
364 def ghc_dynamic():
365 return config.ghc_dynamic
366
367 def fast():
368 return config.speed == 2
369
370 def platform( plat ):
371 return config.platform == plat
372
373 def opsys( os ):
374 return config.os == os
375
376 def arch( arch ):
377 return config.arch == arch
378
379 def wordsize( ws ):
380 return config.wordsize == str(ws)
381
382 def msys( ):
383 return config.msys
384
385 def cygwin( ):
386 return config.cygwin
387
388 def have_vanilla( ):
389 return config.have_vanilla
390
391 def have_dynamic( ):
392 return config.have_dynamic
393
394 def have_profiling( ):
395 return config.have_profiling
396
397 def in_tree_compiler( ):
398 return config.in_tree_compiler
399
400 def unregisterised( ):
401 return config.unregisterised
402
403 def compiler_profiled( ):
404 return config.compiler_profiled
405
406 def compiler_debugged( ):
407 return config.compiler_debugged
408
409 # ---
410
411 def high_memory_usage(name, opts):
412 opts.alone = True
413
414 # If a test is for a multi-CPU race, then running the test alone
415 # increases the chance that we'll actually see it.
416 def multi_cpu_race(name, opts):
417 opts.alone = True
418
419 # ---
420 def literate( name, opts ):
421 opts.literate = 1;
422
423 def c_src( name, opts ):
424 opts.c_src = 1;
425
426 def objc_src( name, opts ):
427 opts.objc_src = 1;
428
429 def objcpp_src( name, opts ):
430 opts.objcpp_src = 1;
431
432 def cmm_src( name, opts ):
433 opts.cmm_src = 1;
434
435 def outputdir( odir ):
436 return lambda name, opts, d=odir: _outputdir(name, opts, d)
437
438 def _outputdir( name, opts, odir ):
439 opts.outputdir = odir;
440
441 # ----
442
443 def pre_cmd( cmd ):
444 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
445
446 def _pre_cmd( name, opts, cmd ):
447 opts.pre_cmd = cmd
448
449 # ----
450
451 def clean_cmd( cmd ):
452 # TODO. Remove all calls to clean_cmd.
453 return lambda _name, _opts: None
454
455 # ----
456
457 def cmd_prefix( prefix ):
458 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
459
460 def _cmd_prefix( name, opts, prefix ):
461 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
462
463 # ----
464
465 def cmd_wrapper( fun ):
466 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
467
468 def _cmd_wrapper( name, opts, fun ):
469 opts.cmd_wrapper = fun
470
471 # ----
472
473 def compile_cmd_prefix( prefix ):
474 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
475
476 def _compile_cmd_prefix( name, opts, prefix ):
477 opts.compile_cmd_prefix = prefix
478
479 # ----
480
481 def check_stdout( f ):
482 return lambda name, opts, f=f: _check_stdout(name, opts, f)
483
484 def _check_stdout( name, opts, f ):
485 opts.check_stdout = f
486
487 # ----
488
489 def normalise_slashes( name, opts ):
490 _normalise_fun(name, opts, normalise_slashes_)
491
492 def normalise_exe( name, opts ):
493 _normalise_fun(name, opts, normalise_exe_)
494
495 def normalise_fun( *fs ):
496 return lambda name, opts: _normalise_fun(name, opts, fs)
497
498 def _normalise_fun( name, opts, *fs ):
499 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
500
501 def normalise_errmsg_fun( *fs ):
502 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
503
504 def _normalise_errmsg_fun( name, opts, *fs ):
505 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
506
507 def normalise_version_( *pkgs ):
508 def normalise_version__( str ):
509 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
510 '\\1-<VERSION>', str)
511 return normalise_version__
512
513 def normalise_version( *pkgs ):
514 def normalise_version__( name, opts ):
515 _normalise_fun(name, opts, normalise_version_(*pkgs))
516 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
517 return normalise_version__
518
519 def normalise_drive_letter(name, opts):
520 # Windows only. Change D:\\ to C:\\.
521 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
522
523 def keep_prof_callstacks(name, opts):
524 """Keep profiling callstacks.
525
526 Use together with `only_ways(prof_ways)`.
527 """
528 opts.keep_prof_callstacks = True
529
530 def join_normalisers(*a):
531 """
532 Compose functions, flattening sequences.
533
534 join_normalisers(f1,[f2,f3],f4)
535
536 is the same as
537
538 lambda x: f1(f2(f3(f4(x))))
539 """
540
541 def flatten(l):
542 """
543 Taken from http://stackoverflow.com/a/2158532/946226
544 """
545 for el in l:
546 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
547 for sub in flatten(el):
548 yield sub
549 else:
550 yield el
551
552 a = flatten(a)
553
554 fn = lambda x:x # identity function
555 for f in a:
556 assert callable(f)
557 fn = lambda x,f=f,fn=fn: fn(f(x))
558 return fn
559
560 # ----
561 # Function for composing two opt-fns together
562
563 def executeSetups(fs, name, opts):
564 if type(fs) is list:
565 # If we have a list of setups, then execute each one
566 for f in fs:
567 executeSetups(f, name, opts)
568 else:
569 # fs is a single function, so just apply it
570 fs(name, opts)
571
572 # -----------------------------------------------------------------------------
573 # The current directory of tests
574
575 def newTestDir(tempdir, dir):
576
577 global thisdir_settings
578 # reset the options for this test directory
579 def settings(name, opts, tempdir=tempdir, dir=dir):
580 return _newTestDir(name, opts, tempdir, dir)
581 thisdir_settings = settings
582
583 # Should be equal to entry in toplevel .gitignore.
584 testdir_suffix = '.run'
585
586 def _newTestDir(name, opts, tempdir, dir):
587 opts.srcdir = os.path.join(os.getcwd(), dir)
588 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
589 opts.compiler_always_flags = config.compiler_always_flags
590
591 # -----------------------------------------------------------------------------
592 # Actually doing tests
593
594 parallelTests = []
595 aloneTests = []
596 allTestNames = set([])
597
598 def runTest (opts, name, func, args):
599 ok = 0
600
601 if config.use_threads:
602 t.thread_pool.acquire()
603 try:
604 while config.threads<(t.running_threads+1):
605 t.thread_pool.wait()
606 t.running_threads = t.running_threads+1
607 ok=1
608 t.thread_pool.release()
609 thread.start_new_thread(test_common_thread, (name, opts, func, args))
610 except:
611 if not ok:
612 t.thread_pool.release()
613 else:
614 test_common_work (name, opts, func, args)
615
616 # name :: String
617 # setup :: TestOpts -> IO ()
618 def test (name, setup, func, args):
619 global aloneTests
620 global parallelTests
621 global allTestNames
622 global thisdir_settings
623 if name in allTestNames:
624 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
625 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
626 framework_fail(name, 'bad_name', 'This test has an invalid name')
627
628 if config.run_only_some_tests:
629 if name not in config.only:
630 return
631 else:
632 # Note [Mutating config.only]
633 # config.only is initiallly the set of tests requested by
634 # the user (via 'make TEST='). We then remove all tests that
635 # we've already seen (in .T files), so that we can later
636 # report on any tests we couldn't find and error out.
637 config.only.remove(name)
638
639 # Make a deep copy of the default_testopts, as we need our own copy
640 # of any dictionaries etc inside it. Otherwise, if one test modifies
641 # them, all tests will see the modified version!
642 myTestOpts = copy.deepcopy(default_testopts)
643
644 executeSetups([thisdir_settings, setup], name, myTestOpts)
645
646 thisTest = lambda : runTest(myTestOpts, name, func, args)
647 if myTestOpts.alone:
648 aloneTests.append(thisTest)
649 else:
650 parallelTests.append(thisTest)
651 allTestNames.add(name)
652
653 if config.use_threads:
654 def test_common_thread(name, opts, func, args):
655 t.lock.acquire()
656 try:
657 test_common_work(name,opts,func,args)
658 finally:
659 t.lock.release()
660 t.thread_pool.acquire()
661 t.running_threads = t.running_threads - 1
662 t.thread_pool.notify()
663 t.thread_pool.release()
664
665 def get_package_cache_timestamp():
666 if config.package_conf_cache_file == '':
667 return 0.0
668 else:
669 try:
670 return os.stat(config.package_conf_cache_file).st_mtime
671 except:
672 return 0.0
673
674 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o') # 12112
675
676 def test_common_work (name, opts, func, args):
677 try:
678 t.total_tests += 1
679 setLocalTestOpts(opts)
680
681 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
682
683 # All the ways we might run this test
684 if func == compile or func == multimod_compile:
685 all_ways = config.compile_ways
686 elif func == compile_and_run or func == multimod_compile_and_run:
687 all_ways = config.run_ways
688 elif func == ghci_script:
689 if 'ghci' in config.run_ways:
690 all_ways = ['ghci']
691 else:
692 all_ways = []
693 else:
694 all_ways = ['normal']
695
696 # A test itself can request extra ways by setting opts.extra_ways
697 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
698
699 t.total_test_cases += len(all_ways)
700
701 ok_way = lambda way: \
702 not getTestOpts().skip \
703 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
704 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
705 and (not (config.skip_perf_tests and isStatsTest())) \
706 and way not in getTestOpts().omit_ways
707
708 # Which ways we are asked to skip
709 do_ways = list(filter (ok_way,all_ways))
710
711 # Only run all ways in slow mode.
712 # See Note [validate and testsuite speed] in toplevel Makefile.
713 if config.accept:
714 # Only ever run one way
715 do_ways = do_ways[:1]
716 elif config.speed > 0:
717 # However, if we EXPLICITLY asked for a way (with extra_ways)
718 # please test it!
719 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
720 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
721 do_ways = other_ways[:1] + explicit_ways
722
723 # Find all files in the source directory that this test
724 # depends on. Do this only once for all ways.
725 # Generously add all filenames that start with the name of
726 # the test to this set, as a convenience to test authors.
727 # They will have to use the `extra_files` setup function to
728 # specify all other files that their test depends on (but
729 # this seems to be necessary for only about 10% of all
730 # tests).
731 files = set(f for f in os.listdir(opts.srcdir)
732 if f.startswith(name) and not f == name and
733 not f.endswith(testdir_suffix) and
734 not os.path.splitext(f)[1] in do_not_copy)
735 for filename in (opts.extra_files + extra_src_files.get(name, [])):
736 if filename.startswith('/'):
737 framework_fail(name, 'whole-test',
738 'no absolute paths in extra_files please: ' + filename)
739
740 elif '*' in filename:
741 # Don't use wildcards in extra_files too much, as
742 # globbing is slow.
743 files.update((os.path.relpath(f, opts.srcdir)
744 for f in glob.iglob(in_srcdir(filename))))
745
746 elif filename:
747 files.add(filename)
748
749 else:
750 framework_fail(name, 'whole-test', 'extra_file is empty string')
751
752 # Run the required tests...
753 for way in do_ways:
754 if stopping():
755 break
756 try:
757 do_test(name, way, func, args, files)
758 except KeyboardInterrupt:
759 stopNow()
760 except Exception as e:
761 framework_fail(name, way, str(e))
762 traceback.print_exc()
763
764 t.n_tests_skipped += len(set(all_ways) - set(do_ways))
765
766 if config.cleanup and do_ways:
767 cleanup()
768
769 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
770
771 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
772 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
773
774 except Exception as e:
775 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
776
777 def do_test(name, way, func, args, files):
778 opts = getTestOpts()
779
780 full_name = name + '(' + way + ')'
781
782 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
783 full_name, t.total_tests, len(allTestNames),
784 [len(t.unexpected_passes),
785 len(t.unexpected_failures),
786 len(t.framework_failures)]))
787
788 # Clean up prior to the test, so that we can't spuriously conclude
789 # that it passed on the basis of old run outputs.
790 cleanup()
791 os.makedirs(opts.testdir)
792
793 # Link all source files for this test into a new directory in
794 # /tmp, and run the test in that directory. This makes it
795 # possible to run tests in parallel, without modification, that
796 # would otherwise (accidentally) write to the same output file.
797 # It also makes it easier to keep the testsuite clean.
798
799 for extra_file in files:
800 src = in_srcdir(extra_file)
801 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
802 if os.path.isfile(src):
803 link_or_copy_file(src, dst)
804 elif os.path.isdir(src):
805 os.mkdir(dst)
806 lndir(src, dst)
807 else:
808 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
809 # When using a ghc built without haddock support, .t
810 # files are rightfully missing. Don't
811 # framework_fail. Test will be skipped later.
812 pass
813 else:
814 framework_fail(name, way,
815 'extra_file does not exist: ' + extra_file)
816
817 if func.__name__ == 'run_command' or opts.pre_cmd:
818 # When running 'MAKE' make sure 'TOP' still points to the
819 # root of the testsuite.
820 src_makefile = in_srcdir('Makefile')
821 dst_makefile = in_testdir('Makefile')
822 if os.path.exists(src_makefile):
823 with open(src_makefile, 'r') as src:
824 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
825 with open(dst_makefile, 'w') as dst:
826 dst.write(makefile)
827
828 if config.use_threads:
829 t.lock.release()
830
831 if opts.pre_cmd:
832 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, opts.pre_cmd))
833 if exit_code != 0:
834 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
835
836 try:
837 result = func(*[name,way] + args)
838 finally:
839 if config.use_threads:
840 t.lock.acquire()
841
842 if opts.expect not in ['pass', 'fail', 'missing-lib']:
843 framework_fail(name, way, 'bad expected ' + opts.expect)
844
845 try:
846 passFail = result['passFail']
847 except:
848 passFail = 'No passFail found'
849
850 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
851
852 if passFail == 'pass':
853 if _expect_pass(way):
854 t.n_expected_passes += 1
855 else:
856 if_verbose(1, '*** unexpected pass for %s' % full_name)
857 t.unexpected_passes.append((directory, name, 'unexpected', way))
858 elif passFail == 'fail':
859 if _expect_pass(way):
860 reason = result['reason']
861 tag = result.get('tag')
862 if tag == 'stat':
863 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
864 t.unexpected_stat_failures.append((directory, name, reason, way))
865 else:
866 if_verbose(1, '*** unexpected failure for %s' % full_name)
867 t.unexpected_failures.append((directory, name, reason, way))
868 else:
869 if opts.expect == 'missing-lib':
870 t.missing_libs.append((directory, name, 'missing-lib', way))
871 else:
872 t.n_expected_failures += 1
873 else:
874 framework_fail(name, way, 'bad result ' + passFail)
875
876 def framework_fail(name, way, reason):
877 opts = getTestOpts()
878 directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
879 full_name = name + '(' + way + ')'
880 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
881 t.framework_failures.append((directory, name, way, reason))
882
883 def badResult(result):
884 try:
885 if result['passFail'] == 'pass':
886 return False
887 return True
888 except:
889 return True
890
891 def passed():
892 return {'passFail': 'pass'}
893
894 def failBecause(reason, tag=None):
895 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
896
897 # -----------------------------------------------------------------------------
898 # Generic command tests
899
900 # A generic command test is expected to run and exit successfully.
901 #
902 # The expected exit code can be changed via exit_code() as normal, and
903 # the expected stdout/stderr are stored in <testname>.stdout and
904 # <testname>.stderr. The output of the command can be ignored
905 # altogether by using run_command_ignore_output instead of
906 # run_command.
907
908 def run_command( name, way, cmd ):
909 return simple_run( name, '', cmd, '' )
910
911 # -----------------------------------------------------------------------------
912 # GHCi tests
913
914 def ghci_script( name, way, script):
915 flags = ' '.join(get_compiler_flags())
916 way_flags = ' '.join(config.way_flags[way])
917
918 # We pass HC and HC_OPTS as environment variables, so that the
919 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
920 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
921 ).format(flags=flags, way_flags=way_flags)
922
923 getTestOpts().stdin = script
924 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
925
926 # -----------------------------------------------------------------------------
927 # Compile-only tests
928
929 def compile( name, way, extra_hc_opts ):
930 return do_compile( name, way, 0, '', [], extra_hc_opts )
931
932 def compile_fail( name, way, extra_hc_opts ):
933 return do_compile( name, way, 1, '', [], extra_hc_opts )
934
935 def multimod_compile( name, way, top_mod, extra_hc_opts ):
936 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
937
938 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
939 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
940
941 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
942 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
943
944 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
945 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
946
947 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts):
948 # print 'Compile only, extra args = ', extra_hc_opts
949
950 result = extras_build( way, extra_mods, extra_hc_opts )
951 if badResult(result):
952 return result
953 extra_hc_opts = result['hc_opts']
954
955 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1)
956
957 if badResult(result):
958 return result
959
960 # the actual stderr should always match the expected, regardless
961 # of whether we expected the compilation to fail or not (successful
962 # compilations may generate warnings).
963
964 expected_stderr_file = find_expected_file(name, 'stderr')
965 actual_stderr_file = add_suffix(name, 'comp.stderr')
966
967 if not compare_outputs(way, 'stderr',
968 join_normalisers(getTestOpts().extra_errmsg_normaliser,
969 normalise_errmsg),
970 expected_stderr_file, actual_stderr_file,
971 whitespace_normaliser=normalise_whitespace):
972 return failBecause('stderr mismatch')
973
974 # no problems found, this test passed
975 return passed()
976
977 def compile_cmp_asm( name, way, extra_hc_opts ):
978 print('Compile only, extra args = ', extra_hc_opts)
979 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
980
981 if badResult(result):
982 return result
983
984 # the actual stderr should always match the expected, regardless
985 # of whether we expected the compilation to fail or not (successful
986 # compilations may generate warnings).
987
988 expected_asm_file = find_expected_file(name, 'asm')
989 actual_asm_file = add_suffix(name, 's')
990
991 if not compare_outputs(way, 'asm',
992 join_normalisers(normalise_errmsg, normalise_asm),
993 expected_asm_file, actual_asm_file):
994 return failBecause('asm mismatch')
995
996 # no problems found, this test passed
997 return passed()
998
999 # -----------------------------------------------------------------------------
1000 # Compile-and-run tests
1001
1002 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1003 # print 'Compile and run, extra args = ', extra_hc_opts
1004
1005 result = extras_build( way, extra_mods, extra_hc_opts )
1006 if badResult(result):
1007 return result
1008 extra_hc_opts = result['hc_opts']
1009
1010 if way.startswith('ghci'): # interpreted...
1011 return interpreter_run(name, way, extra_hc_opts, top_mod)
1012 else: # compiled...
1013 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1)
1014 if badResult(result):
1015 return result
1016
1017 cmd = './' + name;
1018
1019 # we don't check the compiler's stderr for a compile-and-run test
1020 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1021
1022 def compile_and_run( name, way, extra_hc_opts ):
1023 return compile_and_run__( name, way, '', [], extra_hc_opts)
1024
1025 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1026 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1027
1028 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1029 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1030
1031 def stats( name, way, stats_file ):
1032 opts = getTestOpts()
1033 return checkStats(name, way, stats_file, opts.stats_range_fields)
1034
1035 # -----------------------------------------------------------------------------
1036 # Check -t stats info
1037
1038 def checkStats(name, way, stats_file, range_fields):
1039 full_name = name + '(' + way + ')'
1040
1041 result = passed()
1042 if range_fields:
1043 try:
1044 f = open(in_testdir(stats_file))
1045 except IOError as e:
1046 return failBecause(str(e))
1047 contents = f.read()
1048 f.close()
1049
1050 for (field, (expected, dev)) in range_fields.items():
1051 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1052 if m == None:
1053 print('Failed to find field: ', field)
1054 result = failBecause('no such stats field')
1055 val = int(m.group(1))
1056
1057 lowerBound = trunc( expected * ((100 - float(dev))/100))
1058 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1059
1060 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1061
1062 if val < lowerBound:
1063 print(field, 'value is too low:')
1064 print('(If this is because you have improved GHC, please')
1065 print('update the test so that GHC doesn\'t regress again)')
1066 result = failBecause('stat too good', tag='stat')
1067 if val > upperBound:
1068 print(field, 'value is too high:')
1069 result = failBecause('stat not good enough', tag='stat')
1070
1071 if val < lowerBound or val > upperBound or config.verbose >= 4:
1072 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1073
1074 def display(descr, val, extra):
1075 print(descr, str(val).rjust(length), extra)
1076
1077 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1078 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1079 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1080 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1081 if val != expected:
1082 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1083
1084 return result
1085
1086 # -----------------------------------------------------------------------------
1087 # Build a single-module program
1088
1089 def extras_build( way, extra_mods, extra_hc_opts ):
1090 for mod, opts in extra_mods:
1091 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1092 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1093 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1094 if badResult(result):
1095 return result
1096
1097 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1098
1099 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf):
1100 opts = getTestOpts()
1101
1102 # Redirect stdout and stderr to the same file
1103 stdout = in_testdir(name, 'comp.stderr')
1104 stderr = subprocess.STDOUT
1105
1106 if top_mod != '':
1107 srcname = top_mod
1108 elif addsuf:
1109 srcname = add_hs_lhs_suffix(name)
1110 else:
1111 srcname = name
1112
1113 if top_mod != '':
1114 to_do = '--make '
1115 if link:
1116 to_do = to_do + '-o ' + name
1117 elif link:
1118 to_do = '-o ' + name
1119 else:
1120 to_do = '-c' # just compile
1121
1122 stats_file = name + '.comp.stats'
1123 if opts.compiler_stats_range_fields:
1124 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1125
1126 # Required by GHC 7.3+, harmless for earlier versions:
1127 if (getTestOpts().c_src or
1128 getTestOpts().objc_src or
1129 getTestOpts().objcpp_src or
1130 getTestOpts().cmm_src):
1131 extra_hc_opts += ' -no-hs-main '
1132
1133 if getTestOpts().compile_cmd_prefix == '':
1134 cmd_prefix = ''
1135 else:
1136 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1137
1138 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1139
1140 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1141 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1142 ).format(**locals())
1143
1144 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1145
1146 if exit_code != 0 and not should_fail:
1147 if config.verbose >= 1 and _expect_pass(way):
1148 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1149 actual_stderr_path = in_testdir(name, 'comp.stderr')
1150 if_verbose_dump(1, actual_stderr_path)
1151
1152 # ToDo: if the sub-shell was killed by ^C, then exit
1153
1154 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1155
1156 if badResult(statsResult):
1157 return statsResult
1158
1159 if should_fail:
1160 if exit_code == 0:
1161 return failBecause('exit code 0')
1162 else:
1163 if exit_code != 0:
1164 return failBecause('exit code non-0')
1165
1166 return passed()
1167
1168 # -----------------------------------------------------------------------------
1169 # Run a program and check its output
1170 #
1171 # If testname.stdin exists, route input from that, else
1172 # from /dev/null. Route output to testname.run.stdout and
1173 # testname.run.stderr. Returns the exit code of the run.
1174
1175 def simple_run(name, way, prog, extra_run_opts):
1176 opts = getTestOpts()
1177
1178 # figure out what to use for stdin
1179 if opts.stdin:
1180 stdin = in_testdir(opts.stdin)
1181 elif os.path.exists(in_testdir(name, 'stdin')):
1182 stdin = in_testdir(name, 'stdin')
1183 else:
1184 stdin = None
1185
1186 stdout = in_testdir(name, 'run.stdout')
1187 if opts.combined_output:
1188 stderr = subprocess.STDOUT
1189 else:
1190 stderr = in_testdir(name, 'run.stderr')
1191
1192 my_rts_flags = rts_flags(way)
1193
1194 stats_file = name + '.stats'
1195 if opts.stats_range_fields:
1196 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1197 else:
1198 stats_args = ''
1199
1200 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1201 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1202
1203 if opts.cmd_wrapper != None:
1204 cmd = opts.cmd_wrapper(cmd)
1205
1206 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1207
1208 # run the command
1209 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1210
1211 # check the exit code
1212 if exit_code != opts.exit_code:
1213 if config.verbose >= 1 and _expect_pass(way):
1214 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1215 dump_stdout(name)
1216 dump_stderr(name)
1217 return failBecause('bad exit code')
1218
1219 check_hp = '-h' in my_rts_flags
1220 check_prof = '-p' in my_rts_flags
1221
1222 if not opts.ignore_output:
1223 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1224 bad_stdout = not check_stdout_ok(name, way)
1225 if bad_stderr:
1226 return failBecause('bad stderr')
1227 if bad_stdout:
1228 return failBecause('bad stdout')
1229 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1230 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1231 return failBecause('bad heap profile')
1232 if check_prof and not check_prof_ok(name, way):
1233 return failBecause('bad profile')
1234
1235 return checkStats(name, way, stats_file, opts.stats_range_fields)
1236
1237 def rts_flags(way):
1238 args = config.way_rts_flags.get(way, [])
1239 return '+RTS {} -RTS'.format(' '.join(args)) if args else ''
1240
1241 # -----------------------------------------------------------------------------
1242 # Run a program in the interpreter and check its output
1243
1244 def interpreter_run(name, way, extra_hc_opts, top_mod):
1245 opts = getTestOpts()
1246
1247 stdout = in_testdir(name, 'interp.stdout')
1248 stderr = in_testdir(name, 'interp.stderr')
1249 script = in_testdir(name, 'genscript')
1250
1251 if opts.combined_output:
1252 framework_fail(name, 'unsupported',
1253 'WAY=ghci and combined_output together is not supported')
1254
1255 if (top_mod == ''):
1256 srcname = add_hs_lhs_suffix(name)
1257 else:
1258 srcname = top_mod
1259
1260 delimiter = '===== program output begins here\n'
1261
1262 with open(script, 'w') as f:
1263 # set the prog name and command-line args to match the compiled
1264 # environment.
1265 f.write(':set prog ' + name + '\n')
1266 f.write(':set args ' + opts.extra_run_opts + '\n')
1267 # Add marker lines to the stdout and stderr output files, so we
1268 # can separate GHCi's output from the program's.
1269 f.write(':! echo ' + delimiter)
1270 f.write(':! echo 1>&2 ' + delimiter)
1271 # Set stdout to be line-buffered to match the compiled environment.
1272 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1273 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1274 # in the event of an exception as for the compiled program.
1275 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1276
1277 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1278 if os.path.exists(stdin):
1279 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1280
1281 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1282
1283 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1284 ).format(**locals())
1285
1286 if getTestOpts().cmd_wrapper != None:
1287 cmd = opts.cmd_wrapper(cmd);
1288
1289 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1290
1291 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1292
1293 # split the stdout into compilation/program output
1294 split_file(stdout, delimiter,
1295 in_testdir(name, 'comp.stdout'),
1296 in_testdir(name, 'run.stdout'))
1297 split_file(stderr, delimiter,
1298 in_testdir(name, 'comp.stderr'),
1299 in_testdir(name, 'run.stderr'))
1300
1301 # check the exit code
1302 if exit_code != getTestOpts().exit_code:
1303 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1304 dump_stdout(name)
1305 dump_stderr(name)
1306 return failBecause('bad exit code')
1307
1308 # ToDo: if the sub-shell was killed by ^C, then exit
1309
1310 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1311 check_stdout_ok(name, way)):
1312 return passed()
1313 else:
1314 return failBecause('bad stdout or stderr')
1315
1316 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1317 # See Note [Universal newlines].
1318 infile = io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None)
1319 out1 = io.open(out1_fn, 'w', encoding='utf8', newline='')
1320 out2 = io.open(out2_fn, 'w', encoding='utf8', newline='')
1321
1322 line = infile.readline()
1323 while (re.sub('^\s*','',line) != delimiter and line != ''):
1324 out1.write(line)
1325 line = infile.readline()
1326 out1.close()
1327
1328 line = infile.readline()
1329 while (line != ''):
1330 out2.write(line)
1331 line = infile.readline()
1332 out2.close()
1333
1334 # -----------------------------------------------------------------------------
1335 # Utils
1336 def get_compiler_flags():
1337 opts = getTestOpts()
1338
1339 flags = copy.copy(opts.compiler_always_flags)
1340
1341 flags.append(opts.extra_hc_opts)
1342
1343 if opts.outputdir != None:
1344 flags.extend(["-outputdir", opts.outputdir])
1345
1346 return flags
1347
1348 def check_stdout_ok(name, way):
1349 actual_stdout_file = add_suffix(name, 'run.stdout')
1350 expected_stdout_file = find_expected_file(name, 'stdout')
1351
1352 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1353
1354 check_stdout = getTestOpts().check_stdout
1355 if check_stdout:
1356 actual_stdout_path = in_testdir(actual_stdout_file)
1357 return check_stdout(actual_stdout_path, extra_norm)
1358
1359 return compare_outputs(way, 'stdout', extra_norm,
1360 expected_stdout_file, actual_stdout_file)
1361
1362 def dump_stdout( name ):
1363 print('Stdout:')
1364 print(read_no_crs(in_testdir(name, 'run.stdout')))
1365
1366 def check_stderr_ok(name, way):
1367 actual_stderr_file = add_suffix(name, 'run.stderr')
1368 expected_stderr_file = find_expected_file(name, 'stderr')
1369
1370 return compare_outputs(way, 'stderr',
1371 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1372 expected_stderr_file, actual_stderr_file,
1373 whitespace_normaliser=normalise_whitespace)
1374
1375 def dump_stderr( name ):
1376 print("Stderr:")
1377 print(read_no_crs(in_testdir(name, 'run.stderr')))
1378
1379 def read_no_crs(file):
1380 str = ''
1381 try:
1382 # See Note [Universal newlines].
1383 h = io.open(file, 'r', encoding='utf8', errors='replace', newline=None)
1384 str = h.read()
1385 h.close
1386 except:
1387 # On Windows, if the program fails very early, it seems the
1388 # files stdout/stderr are redirected to may not get created
1389 pass
1390 return str
1391
1392 def write_file(file, str):
1393 # See Note [Universal newlines].
1394 h = io.open(file, 'w', encoding='utf8', newline='')
1395 h.write(str)
1396 h.close
1397
1398 # Note [Universal newlines]
1399 #
1400 # We don't want to write any Windows style line endings ever, because
1401 # it would mean that `make accept` would touch every line of the file
1402 # when switching between Linux and Windows.
1403 #
1404 # Furthermore, when reading a file, it is convenient to translate all
1405 # Windows style endings to '\n', as it simplifies searching or massaging
1406 # the content.
1407 #
1408 # Solution: use `io.open` instead of `open`
1409 # * when reading: use newline=None to translate '\r\n' to '\n'
1410 # * when writing: use newline='' to not translate '\n' to '\r\n'
1411 #
1412 # See https://docs.python.org/2/library/io.html#io.open.
1413 #
1414 # This should work with both python2 and python3, and with both mingw*
1415 # as msys2 style Python.
1416 #
1417 # Do note that io.open returns unicode strings. So we have to specify
1418 # the expected encoding. But there is at least one file which is not
1419 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1420 # Another solution would be to open files in binary mode always, and
1421 # operate on bytes.
1422
1423 def check_hp_ok(name):
1424 opts = getTestOpts()
1425
1426 # do not qualify for hp2ps because we should be in the right directory
1427 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1428
1429 hp2psResult = runCmd(hp2psCmd)
1430
1431 actual_ps_path = in_testdir(name, 'ps')
1432
1433 if hp2psResult == 0:
1434 if os.path.exists(actual_ps_path):
1435 if gs_working:
1436 gsResult = runCmd(genGSCmd(actual_ps_path))
1437 if (gsResult == 0):
1438 return (True)
1439 else:
1440 print("hp2ps output for " + name + "is not valid PostScript")
1441 else: return (True) # assume postscript is valid without ghostscript
1442 else:
1443 print("hp2ps did not generate PostScript for " + name)
1444 return (False)
1445 else:
1446 print("hp2ps error when processing heap profile for " + name)
1447 return(False)
1448
1449 def check_prof_ok(name, way):
1450 expected_prof_file = find_expected_file(name, 'prof.sample')
1451 expected_prof_path = in_testdir(expected_prof_file)
1452
1453 # Check actual prof file only if we have an expected prof file to
1454 # compare it with.
1455 if not os.path.exists(expected_prof_path):
1456 return True
1457
1458 actual_prof_file = add_suffix(name, 'prof')
1459 actual_prof_path = in_testdir(actual_prof_file)
1460
1461 if not os.path.exists(actual_prof_path):
1462 print(actual_prof_path + " does not exist")
1463 return(False)
1464
1465 if os.path.getsize(actual_prof_path) == 0:
1466 print(actual_prof_path + " is empty")
1467 return(False)
1468
1469 return compare_outputs(way, 'prof', normalise_prof,
1470 expected_prof_file, actual_prof_file,
1471 whitespace_normaliser=normalise_whitespace)
1472
1473 # Compare expected output to actual output, and optionally accept the
1474 # new output. Returns true if output matched or was accepted, false
1475 # otherwise. See Note [Output comparison] for the meaning of the
1476 # normaliser and whitespace_normaliser parameters.
1477 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1478 whitespace_normaliser=lambda x:x):
1479
1480 expected_path = in_srcdir(expected_file)
1481 actual_path = in_testdir(actual_file)
1482
1483 if os.path.exists(expected_path):
1484 expected_str = normaliser(read_no_crs(expected_path))
1485 # Create the .normalised file in the testdir, not in the srcdir.
1486 expected_normalised_file = add_suffix(expected_file, 'normalised')
1487 expected_normalised_path = in_testdir(expected_normalised_file)
1488 else:
1489 expected_str = ''
1490 expected_normalised_path = '/dev/null'
1491
1492 actual_raw = read_no_crs(actual_path)
1493 actual_str = normaliser(actual_raw)
1494
1495 # See Note [Output comparison].
1496 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1497 return 1
1498 else:
1499 if config.verbose >= 1 and _expect_pass(way):
1500 print('Actual ' + kind + ' output differs from expected:')
1501
1502 if expected_normalised_path != '/dev/null':
1503 write_file(expected_normalised_path, expected_str)
1504
1505 actual_normalised_path = add_suffix(actual_path, 'normalised')
1506 write_file(actual_normalised_path, actual_str)
1507
1508 if config.verbose >= 1 and _expect_pass(way):
1509 # See Note [Output comparison].
1510 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1511 actual_normalised_path))
1512
1513 # If for some reason there were no non-whitespace differences,
1514 # then do a full diff
1515 if r == 0:
1516 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1517 actual_normalised_path))
1518
1519 if config.accept and (getTestOpts().expect == 'fail' or
1520 way in getTestOpts().expect_fail_for):
1521 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1522 return 0
1523 elif config.accept and actual_raw:
1524 if_verbose(1, 'Accepting new output.')
1525 write_file(expected_path, actual_raw)
1526 return 1
1527 elif config.accept:
1528 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1529 os.remove(expected_path)
1530 return 1
1531 else:
1532 return 0
1533
1534 # Note [Output comparison]
1535 #
1536 # We do two types of output comparison:
1537 #
1538 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1539 # optional `whitespace_normaliser` to the expected and the actual
1540 # output, before comparing the two.
1541 #
1542 # 2. To show as a diff to the user when the test indeed failed. We apply
1543 # the same `normaliser` function to the outputs, to make the diff as
1544 # small as possible (only showing the actual problem). But we don't
1545 # apply the `whitespace_normaliser` here, because it might completely
1546 # squash all whitespace, making the diff unreadable. Instead we rely
1547 # on the `diff` program to ignore whitespace changes as much as
1548 # possible (#10152).
1549
1550 def normalise_whitespace( str ):
1551 # Merge contiguous whitespace characters into a single space.
1552 return u' '.join(w for w in str.split())
1553
1554 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1555
1556 def normalise_callstacks(s):
1557 opts = getTestOpts()
1558 def repl(matches):
1559 location = matches.group(1)
1560 location = normalise_slashes_(location)
1561 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1562 # Ignore line number differences in call stacks (#10834).
1563 s = re.sub(callSite_re, repl, s)
1564 # Ignore the change in how we identify implicit call-stacks
1565 s = s.replace('from ImplicitParams', 'from HasCallStack')
1566 if not opts.keep_prof_callstacks:
1567 # Don't output prof callstacks. Test output should be
1568 # independent from the WAY we run the test.
1569 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1570 return s
1571
1572 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1573
1574 def normalise_type_reps(str):
1575 """ Normalise out fingerprints from Typeable TyCon representations """
1576 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1577
1578 def normalise_errmsg( str ):
1579 """Normalise error-messages emitted via stderr"""
1580 # IBM AIX's `ld` is a bit chatty
1581 if opsys('aix'):
1582 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1583 # remove " error:" and lower-case " Warning:" to make patch for
1584 # trac issue #10021 smaller
1585 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1586 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1587 str = normalise_callstacks(str)
1588 str = normalise_type_reps(str)
1589
1590 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1591 # the colon is there because it appears in error messages; this
1592 # hacky solution is used in place of more sophisticated filename
1593 # mangling
1594 str = re.sub('([^\\s])\\.exe', '\\1', str)
1595 # normalise slashes, minimise Windows/Unix filename differences
1596 str = re.sub('\\\\', '/', str)
1597 # The inplace ghc's are called ghc-stage[123] to avoid filename
1598 # collisions, so we need to normalise that to just "ghc"
1599 str = re.sub('ghc-stage[123]', 'ghc', str)
1600 # Error messages simetimes contain integer implementation package
1601 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1602 # Also filter out bullet characters. This is because bullets are used to
1603 # separate error sections, and tests shouldn't be sensitive to how the
1604 # the division happens.
1605 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1606 str = str.replace(bullet, '')
1607 return str
1608
1609 # normalise a .prof file, so that we can reasonably compare it against
1610 # a sample. This doesn't compare any of the actual profiling data,
1611 # only the shape of the profile and the number of entries.
1612 def normalise_prof (str):
1613 # strip everything up to the line beginning "COST CENTRE"
1614 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1615
1616 # strip results for CAFs, these tend to change unpredictably
1617 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1618
1619 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1620 # sometimes under MAIN.
1621 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1622
1623 # We have somthing like this:
1624 #
1625 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1626 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1627 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1628 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1629 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1630 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1631 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1632 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1633 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1634 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1635 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1636 #
1637 # then we remove all the specific profiling data, leaving only the cost
1638 # centre name, module, src, and entries, to end up with this: (modulo
1639 # whitespace between columns)
1640 #
1641 # MAIN MAIN <built-in> 0
1642 # readPrec Main Main_1.hs:7:13-16 1
1643 # readPrec Main Main_1.hs:4:13-16 1
1644 # == Main Main_1.hs:7:25-26 1
1645 # == Main Main_1.hs:4:25-26 1
1646 # showsPrec Main Main_1.hs:7:19-22 2
1647 # showsPrec Main Main_1.hs:4:19-22 2
1648 # readPrec Main Main_1.hs:7:13-16 0
1649 # readPrec Main Main_1.hs:4:13-16 0
1650
1651 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1652 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1653 # this works fine.
1654 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1655 '\\1 \\2 \\3 \\5\n', str)
1656 return str
1657
1658 def normalise_slashes_( str ):
1659 str = re.sub('\\\\', '/', str)
1660 return str
1661
1662 def normalise_exe_( str ):
1663 str = re.sub('\.exe', '', str)
1664 return str
1665
1666 def normalise_output( str ):
1667 # remove " error:" and lower-case " Warning:" to make patch for
1668 # trac issue #10021 smaller
1669 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1670 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1671 # Remove a .exe extension (for Windows)
1672 # This can occur in error messages generated by the program.
1673 str = re.sub('([^\\s])\\.exe', '\\1', str)
1674 str = normalise_callstacks(str)
1675 str = normalise_type_reps(str)
1676 return str
1677
1678 def normalise_asm( str ):
1679 lines = str.split('\n')
1680 # Only keep instructions and labels not starting with a dot.
1681 metadata = re.compile('^[ \t]*\\..*$')
1682 out = []
1683 for line in lines:
1684 # Drop metadata directives (e.g. ".type")
1685 if not metadata.match(line):
1686 line = re.sub('@plt', '', line)
1687 instr = line.lstrip().split()
1688 # Drop empty lines.
1689 if not instr:
1690 continue
1691 # Drop operands, except for call instructions.
1692 elif instr[0] == 'call':
1693 out.append(instr[0] + ' ' + instr[1])
1694 else:
1695 out.append(instr[0])
1696 out = u'\n'.join(out)
1697 return out
1698
1699 def if_verbose( n, s ):
1700 if config.verbose >= n:
1701 print(s)
1702
1703 def if_verbose_dump( n, f ):
1704 if config.verbose >= n:
1705 try:
1706 print(open(f).read())
1707 except:
1708 print('')
1709
1710 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0):
1711 timeout_prog = strip_quotes(config.timeout_prog)
1712 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1713
1714 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1715 cmd = cmd.format(**config.__dict__)
1716 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1717
1718 if stdin:
1719 stdin = open(stdin, 'r')
1720 if stdout:
1721 stdout = open(stdout, 'w')
1722 if stderr and stderr is not subprocess.STDOUT:
1723 stderr = open(stderr, 'w')
1724
1725 # cmd is a complex command in Bourne-shell syntax
1726 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1727 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1728 # to invoke the Bourne shell
1729 r = subprocess.call([timeout_prog, timeout, cmd],
1730 stdin=stdin, stdout=stdout, stderr=stderr)
1731
1732 if stdin:
1733 stdin.close()
1734 if stdout:
1735 stdout.close()
1736 if stderr and stderr is not subprocess.STDOUT:
1737 stderr.close()
1738
1739 if r == 98:
1740 # The python timeout program uses 98 to signal that ^C was pressed
1741 stopNow()
1742 if r == 99 and getTestOpts().exit_code != 99:
1743 # Only print a message when timeout killed the process unexpectedly.
1744 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1745 return r
1746
1747 # -----------------------------------------------------------------------------
1748 # checking if ghostscript is available for checking the output of hp2ps
1749
1750 def genGSCmd(psfile):
1751 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1752
1753 def gsNotWorking():
1754 global gs_working
1755 print("GhostScript not available for hp2ps tests")
1756
1757 global gs_working
1758 gs_working = 0
1759 if config.have_profiling:
1760 if config.gs != '':
1761 resultGood = runCmd(genGSCmd(config.confdir + '/good.ps'));
1762 if resultGood == 0:
1763 resultBad = runCmd(genGSCmd(config.confdir + '/bad.ps') +
1764 ' >/dev/null 2>&1')
1765 if resultBad != 0:
1766 print("GhostScript available for hp2ps tests")
1767 gs_working = 1;
1768 else:
1769 gsNotWorking();
1770 else:
1771 gsNotWorking();
1772 else:
1773 gsNotWorking();
1774
1775 def add_suffix( name, suffix ):
1776 if suffix == '':
1777 return name
1778 else:
1779 return name + '.' + suffix
1780
1781 def add_hs_lhs_suffix(name):
1782 if getTestOpts().c_src:
1783 return add_suffix(name, 'c')
1784 elif getTestOpts().cmm_src:
1785 return add_suffix(name, 'cmm')
1786 elif getTestOpts().objc_src:
1787 return add_suffix(name, 'm')
1788 elif getTestOpts().objcpp_src:
1789 return add_suffix(name, 'mm')
1790 elif getTestOpts().literate:
1791 return add_suffix(name, 'lhs')
1792 else:
1793 return add_suffix(name, 'hs')
1794
1795 def replace_suffix( name, suffix ):
1796 base, suf = os.path.splitext(name)
1797 return base + '.' + suffix
1798
1799 def in_testdir(name, suffix=''):
1800 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1801
1802 def in_srcdir(name, suffix=''):
1803 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1804
1805 # Finding the sample output. The filename is of the form
1806 #
1807 # <test>.stdout[-ws-<wordsize>][-<platform>]
1808 #
1809 def find_expected_file(name, suff):
1810 basename = add_suffix(name, suff)
1811
1812 files = [basename + ws + plat
1813 for plat in ['-' + config.platform, '-' + config.os, '']
1814 for ws in ['-ws-' + config.wordsize, '']]
1815
1816 for f in files:
1817 if os.path.exists(in_srcdir(f)):
1818 return f
1819
1820 return basename
1821
1822 def cleanup():
1823 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
1824
1825 # -----------------------------------------------------------------------------
1826 # Return a list of all the files ending in '.T' below directories roots.
1827
1828 def findTFiles(roots):
1829 for root in roots:
1830 for path, dirs, files in os.walk(root, topdown=True):
1831 # Never pick up .T files in uncleaned .run directories.
1832 dirs[:] = [dir for dir in sorted(dirs)
1833 if not dir.endswith(testdir_suffix)]
1834 for filename in files:
1835 if filename.endswith('.T'):
1836 yield os.path.join(path, filename)
1837
1838 # -----------------------------------------------------------------------------
1839 # Output a test summary to the specified file object
1840
1841 def summary(t, file, short=False):
1842
1843 file.write('\n')
1844 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
1845
1846 if short:
1847 # Only print the list of unexpected tests above.
1848 return
1849
1850 file.write('SUMMARY for test run started at '
1851 + time.strftime("%c %Z", t.start_time) + '\n'
1852 + str(datetime.timedelta(seconds=
1853 round(time.time() - time.mktime(t.start_time)))).rjust(8)
1854 + ' spent to go through\n'
1855 + repr(t.total_tests).rjust(8)
1856 + ' total tests, which gave rise to\n'
1857 + repr(t.total_test_cases).rjust(8)
1858 + ' test cases, of which\n'
1859 + repr(t.n_tests_skipped).rjust(8)
1860 + ' were skipped\n'
1861 + '\n'
1862 + repr(len(t.missing_libs)).rjust(8)
1863 + ' had missing libraries\n'
1864 + repr(t.n_expected_passes).rjust(8)
1865 + ' expected passes\n'
1866 + repr(t.n_expected_failures).rjust(8)
1867 + ' expected failures\n'
1868 + '\n'
1869 + repr(len(t.framework_failures)).rjust(8)
1870 + ' caused framework failures\n'
1871 + repr(len(t.unexpected_passes)).rjust(8)
1872 + ' unexpected passes\n'
1873 + repr(len(t.unexpected_failures)).rjust(8)
1874 + ' unexpected failures\n'
1875 + repr(len(t.unexpected_stat_failures)).rjust(8)
1876 + ' unexpected stat failures\n'
1877 + '\n')
1878
1879 if t.unexpected_passes:
1880 file.write('Unexpected passes:\n')
1881 printTestInfosSummary(file, t.unexpected_passes)
1882
1883 if t.unexpected_failures:
1884 file.write('Unexpected failures:\n')
1885 printTestInfosSummary(file, t.unexpected_failures)
1886
1887 if t.unexpected_stat_failures:
1888 file.write('Unexpected stat failures:\n')
1889 printTestInfosSummary(file, t.unexpected_stat_failures)
1890
1891 if t.framework_failures:
1892 file.write('Framework failures:\n')
1893 printTestInfosSummary(file, t.framework_failures)
1894
1895 if stopping():
1896 file.write('WARNING: Testsuite run was terminated early\n')
1897
1898 def printUnexpectedTests(file, testInfoss):
1899 unexpected = {name for testInfos in testInfoss
1900 for (_, name, _, _) in testInfos}
1901 if unexpected:
1902 file.write('Unexpected results from:\n')
1903 file.write('TEST="' + ' '.join(unexpected) + '"\n')
1904 file.write('\n')
1905
1906 def printTestInfosSummary(file, testInfos):
1907 maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
1908 for (directory, name, reason, way) in testInfos:
1909 directory = directory.ljust(maxDirLen)
1910 file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
1911 file.write('\n')
1912
1913 def modify_lines(s, f):
1914 s = u'\n'.join([f(l) for l in s.splitlines()])
1915 if s and s[-1] != '\n':
1916 # Prevent '\ No newline at end of file' warnings when diffing.
1917 s += '\n'
1918 return s