af1fc66c38a959a601f64f7d7072d1240dd7a165
[ghc.git] / testsuite / driver / testlib.py
1 # coding=utf8
2 #
3 # (c) Simon Marlow 2002
4 #
5
6 from __future__ import print_function
7
8 import io
9 import shutil
10 import os
11 import errno
12 import string
13 import re
14 import traceback
15 import time
16 import datetime
17 import copy
18 import glob
19 from math import ceil, trunc
20 import collections
21 import subprocess
22
23 from testglobals import *
24 from testutil import *
25 from extra_files import extra_src_files
26
27 try:
28 basestring
29 except: # Python 3
30 basestring = (str,bytes)
31
32 if config.use_threads:
33 import threading
34 try:
35 import thread
36 except ImportError: # Python 3
37 import _thread as thread
38
39 global wantToStop
40 wantToStop = False
41 def stopNow():
42 global wantToStop
43 wantToStop = True
44 def stopping():
45 return wantToStop
46
47 # Options valid for the current test only (these get reset to
48 # testdir_testopts after each test).
49
50 global testopts_local
51 if config.use_threads:
52 testopts_local = threading.local()
53 else:
54 class TestOpts_Local:
55 pass
56 testopts_local = TestOpts_Local()
57
58 def getTestOpts():
59 return testopts_local.x
60
61 def setLocalTestOpts(opts):
62 global testopts_local
63 testopts_local.x=opts
64
65 def isStatsTest():
66 opts = getTestOpts()
67 return bool(opts.compiler_stats_range_fields or opts.stats_range_fields)
68
69
70 # This can be called at the top of a file of tests, to set default test options
71 # for the following tests.
72 def setTestOpts( f ):
73 global thisdir_settings
74 thisdir_settings = [thisdir_settings, f]
75
76 # -----------------------------------------------------------------------------
77 # Canned setup functions for common cases. eg. for a test you might say
78 #
79 # test('test001', normal, compile, [''])
80 #
81 # to run it without any options, but change it to
82 #
83 # test('test001', expect_fail, compile, [''])
84 #
85 # to expect failure for this test.
86
87 def normal( name, opts ):
88 return;
89
90 def skip( name, opts ):
91 opts.skip = 1
92
93 def expect_fail( name, opts ):
94 # The compiler, testdriver, OS or platform is missing a certain
95 # feature, and we don't plan to or can't fix it now or in the
96 # future.
97 opts.expect = 'fail';
98
99 def reqlib( lib ):
100 return lambda name, opts, l=lib: _reqlib (name, opts, l )
101
102 def stage1(name, opts):
103 # See Note [Why is there no stage1 setup function?]
104 framework_fail(name, 'stage1 setup function does not exist',
105 'add your test to testsuite/tests/stage1 instead')
106
107 # Note [Why is there no stage1 setup function?]
108 #
109 # Presumably a stage1 setup function would signal that the stage1
110 # compiler should be used to compile a test.
111 #
112 # Trouble is, the path to the compiler + the `ghc --info` settings for
113 # that compiler are currently passed in from the `make` part of the
114 # testsuite driver.
115 #
116 # Switching compilers in the Python part would be entirely too late, as
117 # all ghc_with_* settings would be wrong. See config/ghc for possible
118 # consequences (for example, config.run_ways would still be
119 # based on the default compiler, quite likely causing ./validate --slow
120 # to fail).
121 #
122 # It would be possible to let the Python part of the testsuite driver
123 # make the call to `ghc --info`, but doing so would require quite some
124 # work. Care has to be taken to not affect the run_command tests for
125 # example, as they also use the `ghc --info` settings:
126 # quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
127 #
128 # If you want a test to run using the stage1 compiler, add it to the
129 # testsuite/tests/stage1 directory. Validate runs the tests in that
130 # directory with `make stage=1`.
131
132 # Cache the results of looking to see if we have a library or not.
133 # This makes quite a difference, especially on Windows.
134 have_lib = {}
135
136 def _reqlib( name, opts, lib ):
137 if lib in have_lib:
138 got_it = have_lib[lib]
139 else:
140 cmd = strip_quotes(config.ghc_pkg)
141 p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
142 stdout=subprocess.PIPE,
143 stderr=subprocess.PIPE)
144 # read from stdout and stderr to avoid blocking due to
145 # buffers filling
146 p.communicate()
147 r = p.wait()
148 got_it = r == 0
149 have_lib[lib] = got_it
150
151 if not got_it:
152 opts.expect = 'missing-lib'
153
154 def req_haddock( name, opts ):
155 if not config.haddock:
156 opts.expect = 'missing-lib'
157
158 def req_profiling( name, opts ):
159 '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
160 if not config.have_profiling:
161 opts.expect = 'fail'
162
163 def req_shared_libs( name, opts ):
164 if not config.have_shared_libs:
165 opts.expect = 'fail'
166
167 def req_interp( name, opts ):
168 if not config.have_interp:
169 opts.expect = 'fail'
170
171 def req_smp( name, opts ):
172 if not config.have_smp:
173 opts.expect = 'fail'
174
175 def ignore_output( name, opts ):
176 opts.ignore_output = 1
177
178 def combined_output( name, opts ):
179 opts.combined_output = True
180
181 # -----
182
183 def expect_fail_for( ways ):
184 return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
185
186 def _expect_fail_for( name, opts, ways ):
187 opts.expect_fail_for = ways
188
189 def expect_broken( bug ):
190 # This test is a expected not to work due to the indicated trac bug
191 # number.
192 return lambda name, opts, b=bug: _expect_broken (name, opts, b )
193
194 def _expect_broken( name, opts, bug ):
195 record_broken(name, opts, bug)
196 opts.expect = 'fail';
197
198 def expect_broken_for( bug, ways ):
199 return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
200
201 def _expect_broken_for( name, opts, bug, ways ):
202 record_broken(name, opts, bug)
203 opts.expect_fail_for = ways
204
205 def record_broken(name, opts, bug):
206 global brokens
207 me = (bug, opts.testdir, name)
208 if not me in brokens:
209 brokens.append(me)
210
211 def _expect_pass(way):
212 # Helper function. Not intended for use in .T files.
213 opts = getTestOpts()
214 return opts.expect == 'pass' and way not in opts.expect_fail_for
215
216 # -----
217
218 def omit_ways( ways ):
219 return lambda name, opts, w=ways: _omit_ways( name, opts, w )
220
221 def _omit_ways( name, opts, ways ):
222 opts.omit_ways = ways
223
224 # -----
225
226 def only_ways( ways ):
227 return lambda name, opts, w=ways: _only_ways( name, opts, w )
228
229 def _only_ways( name, opts, ways ):
230 opts.only_ways = ways
231
232 # -----
233
234 def extra_ways( ways ):
235 return lambda name, opts, w=ways: _extra_ways( name, opts, w )
236
237 def _extra_ways( name, opts, ways ):
238 opts.extra_ways = ways
239
240 # -----
241
242 def set_stdin( file ):
243 return lambda name, opts, f=file: _set_stdin(name, opts, f);
244
245 def _set_stdin( name, opts, f ):
246 opts.stdin = f
247
248 # -----
249
250 def exit_code( val ):
251 return lambda name, opts, v=val: _exit_code(name, opts, v);
252
253 def _exit_code( name, opts, v ):
254 opts.exit_code = v
255
256 def signal_exit_code( val ):
257 if opsys('solaris2'):
258 return exit_code( val );
259 else:
260 # When application running on Linux receives fatal error
261 # signal, then its exit code is encoded as 128 + signal
262 # value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
263 # I assume that Mac OS X behaves in the same way at least Mac
264 # OS X builder behavior suggests this.
265 return exit_code( val+128 );
266
267 # -----
268
269 def compile_timeout_multiplier( val ):
270 return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
271
272 def _compile_timeout_multiplier( name, opts, v ):
273 opts.compile_timeout_multiplier = v
274
275 def run_timeout_multiplier( val ):
276 return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
277
278 def _run_timeout_multiplier( name, opts, v ):
279 opts.run_timeout_multiplier = v
280
281 # -----
282
283 def extra_run_opts( val ):
284 return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
285
286 def _extra_run_opts( name, opts, v ):
287 opts.extra_run_opts = v
288
289 # -----
290
291 def extra_hc_opts( val ):
292 return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
293
294 def _extra_hc_opts( name, opts, v ):
295 opts.extra_hc_opts = v
296
297 # -----
298
299 def extra_clean( files ):
300 # TODO. Remove all calls to extra_clean.
301 return lambda _name, _opts: None
302
303 def extra_files(files):
304 return lambda name, opts: _extra_files(name, opts, files)
305
306 def _extra_files(name, opts, files):
307 opts.extra_files.extend(files)
308
309 # -----
310
311 def stats_num_field( field, expecteds ):
312 return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
313
314 def _stats_num_field( name, opts, field, expecteds ):
315 if field in opts.stats_range_fields:
316 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
317
318 if type(expecteds) is list:
319 for (b, expected, dev) in expecteds:
320 if b:
321 opts.stats_range_fields[field] = (expected, dev)
322 return
323 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
324
325 else:
326 (expected, dev) = expecteds
327 opts.stats_range_fields[field] = (expected, dev)
328
329 def compiler_stats_num_field( field, expecteds ):
330 return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
331
332 def _compiler_stats_num_field( name, opts, field, expecteds ):
333 if field in opts.compiler_stats_range_fields:
334 framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
335
336 # Compiler performance numbers change when debugging is on, making the results
337 # useless and confusing. Therefore, skip if debugging is on.
338 if compiler_debugged():
339 skip(name, opts)
340
341 for (b, expected, dev) in expecteds:
342 if b:
343 opts.compiler_stats_range_fields[field] = (expected, dev)
344 return
345
346 framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
347
348 # -----
349
350 def when(b, f):
351 # When list_brokens is on, we want to see all expect_broken calls,
352 # so we always do f
353 if b or config.list_broken:
354 return f
355 else:
356 return normal
357
358 def unless(b, f):
359 return when(not b, f)
360
361 def doing_ghci():
362 return 'ghci' in config.run_ways
363
364 def ghc_dynamic():
365 return config.ghc_dynamic
366
367 def fast():
368 return config.speed == 2
369
370 def platform( plat ):
371 return config.platform == plat
372
373 def opsys( os ):
374 return config.os == os
375
376 def arch( arch ):
377 return config.arch == arch
378
379 def wordsize( ws ):
380 return config.wordsize == str(ws)
381
382 def msys( ):
383 return config.msys
384
385 def cygwin( ):
386 return config.cygwin
387
388 def have_vanilla( ):
389 return config.have_vanilla
390
391 def have_dynamic( ):
392 return config.have_dynamic
393
394 def have_profiling( ):
395 return config.have_profiling
396
397 def in_tree_compiler( ):
398 return config.in_tree_compiler
399
400 def unregisterised( ):
401 return config.unregisterised
402
403 def compiler_profiled( ):
404 return config.compiler_profiled
405
406 def compiler_debugged( ):
407 return config.compiler_debugged
408
409 # ---
410
411 def high_memory_usage(name, opts):
412 opts.alone = True
413
414 # If a test is for a multi-CPU race, then running the test alone
415 # increases the chance that we'll actually see it.
416 def multi_cpu_race(name, opts):
417 opts.alone = True
418
419 # ---
420 def literate( name, opts ):
421 opts.literate = 1;
422
423 def c_src( name, opts ):
424 opts.c_src = 1;
425
426 def objc_src( name, opts ):
427 opts.objc_src = 1;
428
429 def objcpp_src( name, opts ):
430 opts.objcpp_src = 1;
431
432 def cmm_src( name, opts ):
433 opts.cmm_src = 1;
434
435 def outputdir( odir ):
436 return lambda name, opts, d=odir: _outputdir(name, opts, d)
437
438 def _outputdir( name, opts, odir ):
439 opts.outputdir = odir;
440
441 # ----
442
443 def pre_cmd( cmd ):
444 return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
445
446 def _pre_cmd( name, opts, cmd ):
447 opts.pre_cmd = cmd
448
449 # ----
450
451 def clean_cmd( cmd ):
452 # TODO. Remove all calls to clean_cmd.
453 return lambda _name, _opts: None
454
455 # ----
456
457 def cmd_prefix( prefix ):
458 return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
459
460 def _cmd_prefix( name, opts, prefix ):
461 opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
462
463 # ----
464
465 def cmd_wrapper( fun ):
466 return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
467
468 def _cmd_wrapper( name, opts, fun ):
469 opts.cmd_wrapper = fun
470
471 # ----
472
473 def compile_cmd_prefix( prefix ):
474 return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
475
476 def _compile_cmd_prefix( name, opts, prefix ):
477 opts.compile_cmd_prefix = prefix
478
479 # ----
480
481 def check_stdout( f ):
482 return lambda name, opts, f=f: _check_stdout(name, opts, f)
483
484 def _check_stdout( name, opts, f ):
485 opts.check_stdout = f
486
487 # ----
488
489 def normalise_slashes( name, opts ):
490 _normalise_fun(name, opts, normalise_slashes_)
491
492 def normalise_exe( name, opts ):
493 _normalise_fun(name, opts, normalise_exe_)
494
495 def normalise_fun( *fs ):
496 return lambda name, opts: _normalise_fun(name, opts, fs)
497
498 def _normalise_fun( name, opts, *fs ):
499 opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
500
501 def normalise_errmsg_fun( *fs ):
502 return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
503
504 def _normalise_errmsg_fun( name, opts, *fs ):
505 opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
506
507 def normalise_version_( *pkgs ):
508 def normalise_version__( str ):
509 return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
510 '\\1-<VERSION>', str)
511 return normalise_version__
512
513 def normalise_version( *pkgs ):
514 def normalise_version__( name, opts ):
515 _normalise_fun(name, opts, normalise_version_(*pkgs))
516 _normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
517 return normalise_version__
518
519 def normalise_drive_letter(name, opts):
520 # Windows only. Change D:\\ to C:\\.
521 _normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
522
523 def keep_prof_callstacks(name, opts):
524 """Keep profiling callstacks.
525
526 Use together with `only_ways(prof_ways)`.
527 """
528 opts.keep_prof_callstacks = True
529
530 def join_normalisers(*a):
531 """
532 Compose functions, flattening sequences.
533
534 join_normalisers(f1,[f2,f3],f4)
535
536 is the same as
537
538 lambda x: f1(f2(f3(f4(x))))
539 """
540
541 def flatten(l):
542 """
543 Taken from http://stackoverflow.com/a/2158532/946226
544 """
545 for el in l:
546 if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
547 for sub in flatten(el):
548 yield sub
549 else:
550 yield el
551
552 a = flatten(a)
553
554 fn = lambda x:x # identity function
555 for f in a:
556 assert callable(f)
557 fn = lambda x,f=f,fn=fn: fn(f(x))
558 return fn
559
560 # ----
561 # Function for composing two opt-fns together
562
563 def executeSetups(fs, name, opts):
564 if type(fs) is list:
565 # If we have a list of setups, then execute each one
566 for f in fs:
567 executeSetups(f, name, opts)
568 else:
569 # fs is a single function, so just apply it
570 fs(name, opts)
571
572 # -----------------------------------------------------------------------------
573 # The current directory of tests
574
575 def newTestDir(tempdir, dir):
576
577 global thisdir_settings
578 # reset the options for this test directory
579 def settings(name, opts, tempdir=tempdir, dir=dir):
580 return _newTestDir(name, opts, tempdir, dir)
581 thisdir_settings = settings
582
583 # Should be equal to entry in toplevel .gitignore.
584 testdir_suffix = '.run'
585
586 def _newTestDir(name, opts, tempdir, dir):
587 opts.srcdir = os.path.join(os.getcwd(), dir)
588 opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
589 opts.compiler_always_flags = config.compiler_always_flags
590
591 # -----------------------------------------------------------------------------
592 # Actually doing tests
593
594 parallelTests = []
595 aloneTests = []
596 allTestNames = set([])
597
598 def runTest (opts, name, func, args):
599 ok = 0
600
601 if config.use_threads:
602 t.thread_pool.acquire()
603 try:
604 while config.threads<(t.running_threads+1):
605 t.thread_pool.wait()
606 t.running_threads = t.running_threads+1
607 ok=1
608 t.thread_pool.release()
609 thread.start_new_thread(test_common_thread, (name, opts, func, args))
610 except:
611 if not ok:
612 t.thread_pool.release()
613 else:
614 test_common_work (name, opts, func, args)
615
616 # name :: String
617 # setup :: TestOpts -> IO ()
618 def test (name, setup, func, args):
619 global aloneTests
620 global parallelTests
621 global allTestNames
622 global thisdir_settings
623 if name in allTestNames:
624 framework_fail(name, 'duplicate', 'There are multiple tests with this name')
625 if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
626 framework_fail(name, 'bad_name', 'This test has an invalid name')
627
628 if config.run_only_some_tests:
629 if name not in config.only:
630 return
631 else:
632 # Note [Mutating config.only]
633 # config.only is initiallly the set of tests requested by
634 # the user (via 'make TEST='). We then remove all tests that
635 # we've already seen (in .T files), so that we can later
636 # report on any tests we couldn't find and error out.
637 config.only.remove(name)
638
639 # Make a deep copy of the default_testopts, as we need our own copy
640 # of any dictionaries etc inside it. Otherwise, if one test modifies
641 # them, all tests will see the modified version!
642 myTestOpts = copy.deepcopy(default_testopts)
643
644 executeSetups([thisdir_settings, setup], name, myTestOpts)
645
646 thisTest = lambda : runTest(myTestOpts, name, func, args)
647 if myTestOpts.alone:
648 aloneTests.append(thisTest)
649 else:
650 parallelTests.append(thisTest)
651 allTestNames.add(name)
652
653 if config.use_threads:
654 def test_common_thread(name, opts, func, args):
655 t.lock.acquire()
656 try:
657 test_common_work(name,opts,func,args)
658 finally:
659 t.lock.release()
660 t.thread_pool.acquire()
661 t.running_threads = t.running_threads - 1
662 t.thread_pool.notify()
663 t.thread_pool.release()
664
665 def get_package_cache_timestamp():
666 if config.package_conf_cache_file == '':
667 return 0.0
668 else:
669 try:
670 return os.stat(config.package_conf_cache_file).st_mtime
671 except:
672 return 0.0
673
674 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o') # 12112
675
676 def test_common_work (name, opts, func, args):
677 try:
678 t.total_tests = t.total_tests+1
679 setLocalTestOpts(opts)
680
681 package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
682
683 # All the ways we might run this test
684 if func == compile or func == multimod_compile:
685 all_ways = config.compile_ways
686 elif func == compile_and_run or func == multimod_compile_and_run:
687 all_ways = config.run_ways
688 elif func == ghci_script:
689 if 'ghci' in config.run_ways:
690 all_ways = ['ghci']
691 else:
692 all_ways = []
693 else:
694 all_ways = ['normal']
695
696 # A test itself can request extra ways by setting opts.extra_ways
697 all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
698
699 t.total_test_cases = t.total_test_cases + len(all_ways)
700
701 ok_way = lambda way: \
702 not getTestOpts().skip \
703 and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
704 and (config.cmdline_ways == [] or way in config.cmdline_ways) \
705 and (not (config.skip_perf_tests and isStatsTest())) \
706 and way not in getTestOpts().omit_ways
707
708 # Which ways we are asked to skip
709 do_ways = list(filter (ok_way,all_ways))
710
711 # Only run all ways in slow mode.
712 # See Note [validate and testsuite speed] in toplevel Makefile.
713 if config.accept:
714 # Only ever run one way
715 do_ways = do_ways[:1]
716 elif config.speed > 0:
717 # However, if we EXPLICITLY asked for a way (with extra_ways)
718 # please test it!
719 explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
720 other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
721 do_ways = other_ways[:1] + explicit_ways
722
723 # Find all files in the source directory that this test
724 # depends on. Do this only once for all ways.
725 # Generously add all filenames that start with the name of
726 # the test to this set, as a convenience to test authors.
727 # They will have to use the `extra_files` setup function to
728 # specify all other files that their test depends on (but
729 # this seems to be necessary for only about 10% of all
730 # tests).
731 files = set(f for f in os.listdir(opts.srcdir)
732 if f.startswith(name) and not f == name and
733 not f.endswith(testdir_suffix) and
734 not os.path.splitext(f)[1] in do_not_copy)
735 for filename in (opts.extra_files + extra_src_files.get(name, [])):
736 if filename.startswith('/'):
737 framework_fail(name, 'whole-test',
738 'no absolute paths in extra_files please: ' + filename)
739
740 elif '*' in filename:
741 # Don't use wildcards in extra_files too much, as
742 # globbing is slow.
743 files.update((os.path.relpath(f, opts.srcdir)
744 for f in glob.iglob(in_srcdir(filename))))
745
746 elif filename:
747 files.add(filename)
748
749 else:
750 framework_fail(name, 'whole-test', 'extra_file is empty string')
751
752 # Run the required tests...
753 for way in do_ways:
754 if stopping():
755 break
756 try:
757 do_test(name, way, func, args, files)
758 except KeyboardInterrupt:
759 stopNow()
760 except Exception as e:
761 framework_fail(name, way, str(e))
762 traceback.print_exc()
763
764 for way in all_ways:
765 if way not in do_ways:
766 skiptest (name,way)
767
768 if config.cleanup and do_ways:
769 cleanup()
770
771 package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
772
773 if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
774 framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
775
776 except Exception as e:
777 framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
778
779 def do_test(name, way, func, args, files):
780 opts = getTestOpts()
781
782 full_name = name + '(' + way + ')'
783
784 if_verbose(2, "=====> {0} {1} of {2} {3}".format(
785 full_name, t.total_tests, len(allTestNames),
786 [t.n_unexpected_passes, t.n_unexpected_failures, t.n_framework_failures]))
787
788 # Clean up prior to the test, so that we can't spuriously conclude
789 # that it passed on the basis of old run outputs.
790 cleanup()
791 os.makedirs(opts.testdir)
792
793 # Link all source files for this test into a new directory in
794 # /tmp, and run the test in that directory. This makes it
795 # possible to run tests in parallel, without modification, that
796 # would otherwise (accidentally) write to the same output file.
797 # It also makes it easier to keep the testsuite clean.
798
799 for extra_file in files:
800 src = in_srcdir(extra_file)
801 dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
802 if os.path.isfile(src):
803 link_or_copy_file(src, dst)
804 elif os.path.isdir(src):
805 os.mkdir(dst)
806 lndir(src, dst)
807 else:
808 if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
809 # When using a ghc built without haddock support, .t
810 # files are rightfully missing. Don't
811 # framework_fail. Test will be skipped later.
812 pass
813 else:
814 framework_fail(name, way,
815 'extra_file does not exist: ' + extra_file)
816
817 if func.__name__ == 'run_command' or opts.pre_cmd:
818 # When running 'MAKE' make sure 'TOP' still points to the
819 # root of the testsuite.
820 src_makefile = in_srcdir('Makefile')
821 dst_makefile = in_testdir('Makefile')
822 if os.path.exists(src_makefile):
823 with open(src_makefile, 'r') as src:
824 makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
825 with open(dst_makefile, 'w') as dst:
826 dst.write(makefile)
827
828 if config.use_threads:
829 t.lock.release()
830
831 if opts.pre_cmd:
832 exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, opts.pre_cmd))
833 if exit_code != 0:
834 framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
835
836 try:
837 result = func(*[name,way] + args)
838 finally:
839 if config.use_threads:
840 t.lock.acquire()
841
842 if opts.expect not in ['pass', 'fail', 'missing-lib']:
843 framework_fail(name, way, 'bad expected ' + opts.expect)
844
845 try:
846 passFail = result['passFail']
847 except:
848 passFail = 'No passFail found'
849
850 if passFail == 'pass':
851 if _expect_pass(way):
852 t.n_expected_passes = t.n_expected_passes + 1
853 if name in t.expected_passes:
854 t.expected_passes[name].append(way)
855 else:
856 t.expected_passes[name] = [way]
857 else:
858 if_verbose(1, '*** unexpected pass for %s' % full_name)
859 t.n_unexpected_passes = t.n_unexpected_passes + 1
860 addPassingTestInfo(t.unexpected_passes, opts.testdir, name, way)
861 elif passFail == 'fail':
862 if _expect_pass(way):
863 reason = result['reason']
864 tag = result.get('tag')
865 if tag == 'stat':
866 if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
867 t.n_unexpected_stat_failures = t.n_unexpected_stat_failures + 1
868 addFailingTestInfo(t.unexpected_stat_failures, opts.testdir, name, reason, way)
869 else:
870 if_verbose(1, '*** unexpected failure for %s' % full_name)
871 t.n_unexpected_failures = t.n_unexpected_failures + 1
872 addFailingTestInfo(t.unexpected_failures, opts.testdir, name, reason, way)
873 else:
874 if opts.expect == 'missing-lib':
875 t.n_missing_libs = t.n_missing_libs + 1
876 if name in t.missing_libs:
877 t.missing_libs[name].append(way)
878 else:
879 t.missing_libs[name] = [way]
880 else:
881 t.n_expected_failures = t.n_expected_failures + 1
882 if name in t.expected_failures:
883 t.expected_failures[name].append(way)
884 else:
885 t.expected_failures[name] = [way]
886 else:
887 framework_fail(name, way, 'bad result ' + passFail)
888
889 def addPassingTestInfo (testInfos, directory, name, way):
890 directory = re.sub('^\\.[/\\\\]', '', directory)
891
892 if not directory in testInfos:
893 testInfos[directory] = {}
894
895 if not name in testInfos[directory]:
896 testInfos[directory][name] = []
897
898 testInfos[directory][name].append(way)
899
900 def addFailingTestInfo (testInfos, directory, name, reason, way):
901 directory = re.sub('^\\.[/\\\\]', '', directory)
902
903 if not directory in testInfos:
904 testInfos[directory] = {}
905
906 if not name in testInfos[directory]:
907 testInfos[directory][name] = {}
908
909 if not reason in testInfos[directory][name]:
910 testInfos[directory][name][reason] = []
911
912 testInfos[directory][name][reason].append(way)
913
914 def skiptest (name, way):
915 # print 'Skipping test \"', name, '\"'
916 t.n_tests_skipped = t.n_tests_skipped + 1
917 if name in t.tests_skipped:
918 t.tests_skipped[name].append(way)
919 else:
920 t.tests_skipped[name] = [way]
921
922 def framework_fail( name, way, reason ):
923 full_name = name + '(' + way + ')'
924 if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
925 t.n_framework_failures = t.n_framework_failures + 1
926 if name in t.framework_failures:
927 t.framework_failures[name].append(way)
928 else:
929 t.framework_failures[name] = [way]
930
931 def badResult(result):
932 try:
933 if result['passFail'] == 'pass':
934 return False
935 return True
936 except:
937 return True
938
939 def passed():
940 return {'passFail': 'pass'}
941
942 def failBecause(reason, tag=None):
943 return {'passFail': 'fail', 'reason': reason, 'tag': tag}
944
945 # -----------------------------------------------------------------------------
946 # Generic command tests
947
948 # A generic command test is expected to run and exit successfully.
949 #
950 # The expected exit code can be changed via exit_code() as normal, and
951 # the expected stdout/stderr are stored in <testname>.stdout and
952 # <testname>.stderr. The output of the command can be ignored
953 # altogether by using run_command_ignore_output instead of
954 # run_command.
955
956 def run_command( name, way, cmd ):
957 return simple_run( name, '', cmd, '' )
958
959 # -----------------------------------------------------------------------------
960 # GHCi tests
961
962 def ghci_script( name, way, script):
963 flags = ' '.join(get_compiler_flags())
964 way_flags = ' '.join(config.way_flags[way])
965
966 # We pass HC and HC_OPTS as environment variables, so that the
967 # script can invoke the correct compiler by using ':! $HC $HC_OPTS'
968 cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
969 ).format(flags=flags, way_flags=way_flags)
970
971 getTestOpts().stdin = script
972 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
973
974 # -----------------------------------------------------------------------------
975 # Compile-only tests
976
977 def compile( name, way, extra_hc_opts ):
978 return do_compile( name, way, 0, '', [], extra_hc_opts )
979
980 def compile_fail( name, way, extra_hc_opts ):
981 return do_compile( name, way, 1, '', [], extra_hc_opts )
982
983 def multimod_compile( name, way, top_mod, extra_hc_opts ):
984 return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
985
986 def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
987 return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
988
989 def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
990 return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
991
992 def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
993 return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
994
995 def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts):
996 # print 'Compile only, extra args = ', extra_hc_opts
997
998 result = extras_build( way, extra_mods, extra_hc_opts )
999 if badResult(result):
1000 return result
1001 extra_hc_opts = result['hc_opts']
1002
1003 result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1)
1004
1005 if badResult(result):
1006 return result
1007
1008 # the actual stderr should always match the expected, regardless
1009 # of whether we expected the compilation to fail or not (successful
1010 # compilations may generate warnings).
1011
1012 expected_stderr_file = find_expected_file(name, 'stderr')
1013 actual_stderr_file = add_suffix(name, 'comp.stderr')
1014
1015 if not compare_outputs(way, 'stderr',
1016 join_normalisers(getTestOpts().extra_errmsg_normaliser,
1017 normalise_errmsg),
1018 expected_stderr_file, actual_stderr_file,
1019 whitespace_normaliser=normalise_whitespace):
1020 return failBecause('stderr mismatch')
1021
1022 # no problems found, this test passed
1023 return passed()
1024
1025 def compile_cmp_asm( name, way, extra_hc_opts ):
1026 print('Compile only, extra args = ', extra_hc_opts)
1027 result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
1028
1029 if badResult(result):
1030 return result
1031
1032 # the actual stderr should always match the expected, regardless
1033 # of whether we expected the compilation to fail or not (successful
1034 # compilations may generate warnings).
1035
1036 expected_asm_file = find_expected_file(name, 'asm')
1037 actual_asm_file = add_suffix(name, 's')
1038
1039 if not compare_outputs(way, 'asm',
1040 join_normalisers(normalise_errmsg, normalise_asm),
1041 expected_asm_file, actual_asm_file):
1042 return failBecause('asm mismatch')
1043
1044 # no problems found, this test passed
1045 return passed()
1046
1047 # -----------------------------------------------------------------------------
1048 # Compile-and-run tests
1049
1050 def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts ):
1051 # print 'Compile and run, extra args = ', extra_hc_opts
1052
1053 result = extras_build( way, extra_mods, extra_hc_opts )
1054 if badResult(result):
1055 return result
1056 extra_hc_opts = result['hc_opts']
1057
1058 if way.startswith('ghci'): # interpreted...
1059 return interpreter_run(name, way, extra_hc_opts, top_mod)
1060 else: # compiled...
1061 result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1)
1062 if badResult(result):
1063 return result
1064
1065 cmd = './' + name;
1066
1067 # we don't check the compiler's stderr for a compile-and-run test
1068 return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
1069
1070 def compile_and_run( name, way, extra_hc_opts ):
1071 return compile_and_run__( name, way, '', [], extra_hc_opts)
1072
1073 def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
1074 return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
1075
1076 def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
1077 return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
1078
1079 def stats( name, way, stats_file ):
1080 opts = getTestOpts()
1081 return checkStats(name, way, stats_file, opts.stats_range_fields)
1082
1083 # -----------------------------------------------------------------------------
1084 # Check -t stats info
1085
1086 def checkStats(name, way, stats_file, range_fields):
1087 full_name = name + '(' + way + ')'
1088
1089 result = passed()
1090 if range_fields:
1091 try:
1092 f = open(in_testdir(stats_file))
1093 except IOError as e:
1094 return failBecause(str(e))
1095 contents = f.read()
1096 f.close()
1097
1098 for (field, (expected, dev)) in range_fields.items():
1099 m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
1100 if m == None:
1101 print('Failed to find field: ', field)
1102 result = failBecause('no such stats field')
1103 val = int(m.group(1))
1104
1105 lowerBound = trunc( expected * ((100 - float(dev))/100))
1106 upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
1107
1108 deviation = round(((float(val) * 100)/ expected) - 100, 1)
1109
1110 if val < lowerBound:
1111 print(field, 'value is too low:')
1112 print('(If this is because you have improved GHC, please')
1113 print('update the test so that GHC doesn\'t regress again)')
1114 result = failBecause('stat too good', tag='stat')
1115 if val > upperBound:
1116 print(field, 'value is too high:')
1117 result = failBecause('stat not good enough', tag='stat')
1118
1119 if val < lowerBound or val > upperBound or config.verbose >= 4:
1120 length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
1121
1122 def display(descr, val, extra):
1123 print(descr, str(val).rjust(length), extra)
1124
1125 display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
1126 display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
1127 display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
1128 display(' Actual ' + full_name + ' ' + field + ':', val, '')
1129 if val != expected:
1130 display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
1131
1132 return result
1133
1134 # -----------------------------------------------------------------------------
1135 # Build a single-module program
1136
1137 def extras_build( way, extra_mods, extra_hc_opts ):
1138 for mod, opts in extra_mods:
1139 result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
1140 if not (mod.endswith('.hs') or mod.endswith('.lhs')):
1141 extra_hc_opts += ' ' + replace_suffix(mod, 'o')
1142 if badResult(result):
1143 return result
1144
1145 return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
1146
1147 def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf):
1148 opts = getTestOpts()
1149
1150 # Redirect stdout and stderr to the same file
1151 stdout = in_testdir(name, 'comp.stderr')
1152 stderr = subprocess.STDOUT
1153
1154 if top_mod != '':
1155 srcname = top_mod
1156 elif addsuf:
1157 srcname = add_hs_lhs_suffix(name)
1158 else:
1159 srcname = name
1160
1161 if top_mod != '':
1162 to_do = '--make '
1163 if link:
1164 to_do = to_do + '-o ' + name
1165 elif link:
1166 to_do = '-o ' + name
1167 else:
1168 to_do = '-c' # just compile
1169
1170 stats_file = name + '.comp.stats'
1171 if opts.compiler_stats_range_fields:
1172 extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1173
1174 # Required by GHC 7.3+, harmless for earlier versions:
1175 if (getTestOpts().c_src or
1176 getTestOpts().objc_src or
1177 getTestOpts().objcpp_src or
1178 getTestOpts().cmm_src):
1179 extra_hc_opts += ' -no-hs-main '
1180
1181 if getTestOpts().compile_cmd_prefix == '':
1182 cmd_prefix = ''
1183 else:
1184 cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
1185
1186 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1187
1188 cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
1189 '{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
1190 ).format(**locals())
1191
1192 exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
1193
1194 if exit_code != 0 and not should_fail:
1195 if config.verbose >= 1 and _expect_pass(way):
1196 print('Compile failed (exit code {0}) errors were:'.format(exit_code))
1197 actual_stderr_path = in_testdir(name, 'comp.stderr')
1198 if_verbose_dump(1, actual_stderr_path)
1199
1200 # ToDo: if the sub-shell was killed by ^C, then exit
1201
1202 statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
1203
1204 if badResult(statsResult):
1205 return statsResult
1206
1207 if should_fail:
1208 if exit_code == 0:
1209 return failBecause('exit code 0')
1210 else:
1211 if exit_code != 0:
1212 return failBecause('exit code non-0')
1213
1214 return passed()
1215
1216 # -----------------------------------------------------------------------------
1217 # Run a program and check its output
1218 #
1219 # If testname.stdin exists, route input from that, else
1220 # from /dev/null. Route output to testname.run.stdout and
1221 # testname.run.stderr. Returns the exit code of the run.
1222
1223 def simple_run(name, way, prog, extra_run_opts):
1224 opts = getTestOpts()
1225
1226 # figure out what to use for stdin
1227 if opts.stdin:
1228 stdin = in_testdir(opts.stdin)
1229 elif os.path.exists(in_testdir(name, 'stdin')):
1230 stdin = in_testdir(name, 'stdin')
1231 else:
1232 stdin = None
1233
1234 stdout = in_testdir(name, 'run.stdout')
1235 if opts.combined_output:
1236 stderr = subprocess.STDOUT
1237 else:
1238 stderr = in_testdir(name, 'run.stderr')
1239
1240 my_rts_flags = rts_flags(way)
1241
1242 stats_file = name + '.stats'
1243 if opts.stats_range_fields:
1244 stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
1245 else:
1246 stats_args = ''
1247
1248 # Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
1249 cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
1250
1251 if opts.cmd_wrapper != None:
1252 cmd = opts.cmd_wrapper(cmd)
1253
1254 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1255
1256 # run the command
1257 exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
1258
1259 # check the exit code
1260 if exit_code != opts.exit_code:
1261 if config.verbose >= 1 and _expect_pass(way):
1262 print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
1263 dump_stdout(name)
1264 dump_stderr(name)
1265 return failBecause('bad exit code')
1266
1267 check_hp = '-h' in my_rts_flags
1268 check_prof = '-p' in my_rts_flags
1269
1270 if not opts.ignore_output:
1271 bad_stderr = not opts.combined_output and not check_stderr_ok(name, way)
1272 bad_stdout = not check_stdout_ok(name, way)
1273 if bad_stderr:
1274 return failBecause('bad stderr')
1275 if bad_stdout:
1276 return failBecause('bad stdout')
1277 # exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
1278 if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
1279 return failBecause('bad heap profile')
1280 if check_prof and not check_prof_ok(name, way):
1281 return failBecause('bad profile')
1282
1283 return checkStats(name, way, stats_file, opts.stats_range_fields)
1284
1285 def rts_flags(way):
1286 args = config.way_rts_flags.get(way, [])
1287 return '+RTS {} -RTS'.format(' '.join(args)) if args else ''
1288
1289 # -----------------------------------------------------------------------------
1290 # Run a program in the interpreter and check its output
1291
1292 def interpreter_run(name, way, extra_hc_opts, top_mod):
1293 opts = getTestOpts()
1294
1295 stdout = in_testdir(name, 'interp.stdout')
1296 stderr = in_testdir(name, 'interp.stderr')
1297 script = in_testdir(name, 'genscript')
1298
1299 if opts.combined_output:
1300 framework_fail(name, 'unsupported',
1301 'WAY=ghci and combined_output together is not supported')
1302
1303 if (top_mod == ''):
1304 srcname = add_hs_lhs_suffix(name)
1305 else:
1306 srcname = top_mod
1307
1308 delimiter = '===== program output begins here\n'
1309
1310 with open(script, 'w') as f:
1311 # set the prog name and command-line args to match the compiled
1312 # environment.
1313 f.write(':set prog ' + name + '\n')
1314 f.write(':set args ' + opts.extra_run_opts + '\n')
1315 # Add marker lines to the stdout and stderr output files, so we
1316 # can separate GHCi's output from the program's.
1317 f.write(':! echo ' + delimiter)
1318 f.write(':! echo 1>&2 ' + delimiter)
1319 # Set stdout to be line-buffered to match the compiled environment.
1320 f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
1321 # wrapping in GHC.TopHandler.runIO ensures we get the same output
1322 # in the event of an exception as for the compiled program.
1323 f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
1324
1325 stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
1326 if os.path.exists(stdin):
1327 os.system('cat "{0}" >> "{1}"'.format(stdin, script))
1328
1329 flags = ' '.join(get_compiler_flags() + config.way_flags[way])
1330
1331 cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
1332 ).format(**locals())
1333
1334 if getTestOpts().cmd_wrapper != None:
1335 cmd = opts.cmd_wrapper(cmd);
1336
1337 cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
1338
1339 exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
1340
1341 # split the stdout into compilation/program output
1342 split_file(stdout, delimiter,
1343 in_testdir(name, 'comp.stdout'),
1344 in_testdir(name, 'run.stdout'))
1345 split_file(stderr, delimiter,
1346 in_testdir(name, 'comp.stderr'),
1347 in_testdir(name, 'run.stderr'))
1348
1349 # check the exit code
1350 if exit_code != getTestOpts().exit_code:
1351 print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
1352 dump_stdout(name)
1353 dump_stderr(name)
1354 return failBecause('bad exit code')
1355
1356 # ToDo: if the sub-shell was killed by ^C, then exit
1357
1358 if getTestOpts().ignore_output or (check_stderr_ok(name, way) and
1359 check_stdout_ok(name, way)):
1360 return passed()
1361 else:
1362 return failBecause('bad stdout or stderr')
1363
1364 def split_file(in_fn, delimiter, out1_fn, out2_fn):
1365 # See Note [Universal newlines].
1366 infile = io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None)
1367 out1 = io.open(out1_fn, 'w', encoding='utf8', newline='')
1368 out2 = io.open(out2_fn, 'w', encoding='utf8', newline='')
1369
1370 line = infile.readline()
1371 while (re.sub('^\s*','',line) != delimiter and line != ''):
1372 out1.write(line)
1373 line = infile.readline()
1374 out1.close()
1375
1376 line = infile.readline()
1377 while (line != ''):
1378 out2.write(line)
1379 line = infile.readline()
1380 out2.close()
1381
1382 # -----------------------------------------------------------------------------
1383 # Utils
1384 def get_compiler_flags():
1385 opts = getTestOpts()
1386
1387 flags = copy.copy(opts.compiler_always_flags)
1388
1389 flags.append(opts.extra_hc_opts)
1390
1391 if opts.outputdir != None:
1392 flags.extend(["-outputdir", opts.outputdir])
1393
1394 return flags
1395
1396 def check_stdout_ok(name, way):
1397 actual_stdout_file = add_suffix(name, 'run.stdout')
1398 expected_stdout_file = find_expected_file(name, 'stdout')
1399
1400 extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
1401
1402 check_stdout = getTestOpts().check_stdout
1403 if check_stdout:
1404 actual_stdout_path = in_testdir(actual_stdout_file)
1405 return check_stdout(actual_stdout_path, extra_norm)
1406
1407 return compare_outputs(way, 'stdout', extra_norm,
1408 expected_stdout_file, actual_stdout_file)
1409
1410 def dump_stdout( name ):
1411 print('Stdout:')
1412 print(read_no_crs(in_testdir(name, 'run.stdout')))
1413
1414 def check_stderr_ok(name, way):
1415 actual_stderr_file = add_suffix(name, 'run.stderr')
1416 expected_stderr_file = find_expected_file(name, 'stderr')
1417
1418 return compare_outputs(way, 'stderr',
1419 join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
1420 expected_stderr_file, actual_stderr_file,
1421 whitespace_normaliser=normalise_whitespace)
1422
1423 def dump_stderr( name ):
1424 print("Stderr:")
1425 print(read_no_crs(in_testdir(name, 'run.stderr')))
1426
1427 def read_no_crs(file):
1428 str = ''
1429 try:
1430 # See Note [Universal newlines].
1431 h = io.open(file, 'r', encoding='utf8', errors='replace', newline=None)
1432 str = h.read()
1433 h.close
1434 except:
1435 # On Windows, if the program fails very early, it seems the
1436 # files stdout/stderr are redirected to may not get created
1437 pass
1438 return str
1439
1440 def write_file(file, str):
1441 # See Note [Universal newlines].
1442 h = io.open(file, 'w', encoding='utf8', newline='')
1443 h.write(str)
1444 h.close
1445
1446 # Note [Universal newlines]
1447 #
1448 # We don't want to write any Windows style line endings ever, because
1449 # it would mean that `make accept` would touch every line of the file
1450 # when switching between Linux and Windows.
1451 #
1452 # Furthermore, when reading a file, it is convenient to translate all
1453 # Windows style endings to '\n', as it simplifies searching or massaging
1454 # the content.
1455 #
1456 # Solution: use `io.open` instead of `open`
1457 # * when reading: use newline=None to translate '\r\n' to '\n'
1458 # * when writing: use newline='' to not translate '\n' to '\r\n'
1459 #
1460 # See https://docs.python.org/2/library/io.html#io.open.
1461 #
1462 # This should work with both python2 and python3, and with both mingw*
1463 # as msys2 style Python.
1464 #
1465 # Do note that io.open returns unicode strings. So we have to specify
1466 # the expected encoding. But there is at least one file which is not
1467 # valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
1468 # Another solution would be to open files in binary mode always, and
1469 # operate on bytes.
1470
1471 def check_hp_ok(name):
1472 opts = getTestOpts()
1473
1474 # do not qualify for hp2ps because we should be in the right directory
1475 hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
1476
1477 hp2psResult = runCmd(hp2psCmd)
1478
1479 actual_ps_path = in_testdir(name, 'ps')
1480
1481 if hp2psResult == 0:
1482 if os.path.exists(actual_ps_path):
1483 if gs_working:
1484 gsResult = runCmd(genGSCmd(actual_ps_path))
1485 if (gsResult == 0):
1486 return (True)
1487 else:
1488 print("hp2ps output for " + name + "is not valid PostScript")
1489 else: return (True) # assume postscript is valid without ghostscript
1490 else:
1491 print("hp2ps did not generate PostScript for " + name)
1492 return (False)
1493 else:
1494 print("hp2ps error when processing heap profile for " + name)
1495 return(False)
1496
1497 def check_prof_ok(name, way):
1498 expected_prof_file = find_expected_file(name, 'prof.sample')
1499 expected_prof_path = in_testdir(expected_prof_file)
1500
1501 # Check actual prof file only if we have an expected prof file to
1502 # compare it with.
1503 if not os.path.exists(expected_prof_path):
1504 return True
1505
1506 actual_prof_file = add_suffix(name, 'prof')
1507 actual_prof_path = in_testdir(actual_prof_file)
1508
1509 if not os.path.exists(actual_prof_path):
1510 print(actual_prof_path + " does not exist")
1511 return(False)
1512
1513 if os.path.getsize(actual_prof_path) == 0:
1514 print(actual_prof_path + " is empty")
1515 return(False)
1516
1517 return compare_outputs(way, 'prof', normalise_prof,
1518 expected_prof_file, actual_prof_file,
1519 whitespace_normaliser=normalise_whitespace)
1520
1521 # Compare expected output to actual output, and optionally accept the
1522 # new output. Returns true if output matched or was accepted, false
1523 # otherwise. See Note [Output comparison] for the meaning of the
1524 # normaliser and whitespace_normaliser parameters.
1525 def compare_outputs(way, kind, normaliser, expected_file, actual_file,
1526 whitespace_normaliser=lambda x:x):
1527
1528 expected_path = in_srcdir(expected_file)
1529 actual_path = in_testdir(actual_file)
1530
1531 if os.path.exists(expected_path):
1532 expected_str = normaliser(read_no_crs(expected_path))
1533 # Create the .normalised file in the testdir, not in the srcdir.
1534 expected_normalised_file = add_suffix(expected_file, 'normalised')
1535 expected_normalised_path = in_testdir(expected_normalised_file)
1536 else:
1537 expected_str = ''
1538 expected_normalised_path = '/dev/null'
1539
1540 actual_raw = read_no_crs(actual_path)
1541 actual_str = normaliser(actual_raw)
1542
1543 # See Note [Output comparison].
1544 if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
1545 return 1
1546 else:
1547 if config.verbose >= 1 and _expect_pass(way):
1548 print('Actual ' + kind + ' output differs from expected:')
1549
1550 if expected_normalised_path != '/dev/null':
1551 write_file(expected_normalised_path, expected_str)
1552
1553 actual_normalised_path = add_suffix(actual_path, 'normalised')
1554 write_file(actual_normalised_path, actual_str)
1555
1556 if config.verbose >= 1 and _expect_pass(way):
1557 # See Note [Output comparison].
1558 r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
1559 actual_normalised_path))
1560
1561 # If for some reason there were no non-whitespace differences,
1562 # then do a full diff
1563 if r == 0:
1564 r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
1565 actual_normalised_path))
1566
1567 if config.accept and (getTestOpts().expect == 'fail' or
1568 way in getTestOpts().expect_fail_for):
1569 if_verbose(1, 'Test is expected to fail. Not accepting new output.')
1570 return 0
1571 elif config.accept and actual_raw:
1572 if_verbose(1, 'Accepting new output.')
1573 write_file(expected_path, actual_raw)
1574 return 1
1575 elif config.accept:
1576 if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
1577 os.remove(expected_path)
1578 return 1
1579 else:
1580 return 0
1581
1582 # Note [Output comparison]
1583 #
1584 # We do two types of output comparison:
1585 #
1586 # 1. To decide whether a test has failed. We apply a `normaliser` and an
1587 # optional `whitespace_normaliser` to the expected and the actual
1588 # output, before comparing the two.
1589 #
1590 # 2. To show as a diff to the user when the test indeed failed. We apply
1591 # the same `normaliser` function to the outputs, to make the diff as
1592 # small as possible (only showing the actual problem). But we don't
1593 # apply the `whitespace_normaliser` here, because it might completely
1594 # squash all whitespace, making the diff unreadable. Instead we rely
1595 # on the `diff` program to ignore whitespace changes as much as
1596 # possible (#10152).
1597
1598 def normalise_whitespace( str ):
1599 # Merge contiguous whitespace characters into a single space.
1600 return u' '.join(w for w in str.split())
1601
1602 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
1603
1604 def normalise_callstacks(s):
1605 opts = getTestOpts()
1606 def repl(matches):
1607 location = matches.group(1)
1608 location = normalise_slashes_(location)
1609 return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
1610 # Ignore line number differences in call stacks (#10834).
1611 s = re.sub(callSite_re, repl, s)
1612 # Ignore the change in how we identify implicit call-stacks
1613 s = s.replace('from ImplicitParams', 'from HasCallStack')
1614 if not opts.keep_prof_callstacks:
1615 # Don't output prof callstacks. Test output should be
1616 # independent from the WAY we run the test.
1617 s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
1618 return s
1619
1620 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
1621
1622 def normalise_type_reps(str):
1623 """ Normalise out fingerprints from Typeable TyCon representations """
1624 return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
1625
1626 def normalise_errmsg( str ):
1627 """Normalise error-messages emitted via stderr"""
1628 # IBM AIX's `ld` is a bit chatty
1629 if opsys('aix'):
1630 str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
1631 # remove " error:" and lower-case " Warning:" to make patch for
1632 # trac issue #10021 smaller
1633 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1634 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1635 str = normalise_callstacks(str)
1636 str = normalise_type_reps(str)
1637
1638 # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
1639 # the colon is there because it appears in error messages; this
1640 # hacky solution is used in place of more sophisticated filename
1641 # mangling
1642 str = re.sub('([^\\s])\\.exe', '\\1', str)
1643 # normalise slashes, minimise Windows/Unix filename differences
1644 str = re.sub('\\\\', '/', str)
1645 # The inplace ghc's are called ghc-stage[123] to avoid filename
1646 # collisions, so we need to normalise that to just "ghc"
1647 str = re.sub('ghc-stage[123]', 'ghc', str)
1648 # Error messages simetimes contain integer implementation package
1649 str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
1650 # Also filter out bullet characters. This is because bullets are used to
1651 # separate error sections, and tests shouldn't be sensitive to how the
1652 # the division happens.
1653 bullet = u'•'.encode('utf8') if isinstance(str, bytes) else u'•'
1654 str = str.replace(bullet, '')
1655 return str
1656
1657 # normalise a .prof file, so that we can reasonably compare it against
1658 # a sample. This doesn't compare any of the actual profiling data,
1659 # only the shape of the profile and the number of entries.
1660 def normalise_prof (str):
1661 # strip everything up to the line beginning "COST CENTRE"
1662 str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
1663
1664 # strip results for CAFs, these tend to change unpredictably
1665 str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
1666
1667 # XXX Ignore Main.main. Sometimes this appears under CAF, and
1668 # sometimes under MAIN.
1669 str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
1670
1671 # We have somthing like this:
1672 #
1673 # MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
1674 # CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
1675 # readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
1676 # readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
1677 # main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
1678 # == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
1679 # == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
1680 # showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
1681 # showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
1682 # readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
1683 # readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
1684 #
1685 # then we remove all the specific profiling data, leaving only the cost
1686 # centre name, module, src, and entries, to end up with this: (modulo
1687 # whitespace between columns)
1688 #
1689 # MAIN MAIN <built-in> 0
1690 # readPrec Main Main_1.hs:7:13-16 1
1691 # readPrec Main Main_1.hs:4:13-16 1
1692 # == Main Main_1.hs:7:25-26 1
1693 # == Main Main_1.hs:4:25-26 1
1694 # showsPrec Main Main_1.hs:7:19-22 2
1695 # showsPrec Main Main_1.hs:4:19-22 2
1696 # readPrec Main Main_1.hs:7:13-16 0
1697 # readPrec Main Main_1.hs:4:13-16 0
1698
1699 # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
1700 # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
1701 # this works fine.
1702 str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
1703 '\\1 \\2 \\3 \\5\n', str)
1704 return str
1705
1706 def normalise_slashes_( str ):
1707 str = re.sub('\\\\', '/', str)
1708 return str
1709
1710 def normalise_exe_( str ):
1711 str = re.sub('\.exe', '', str)
1712 return str
1713
1714 def normalise_output( str ):
1715 # remove " error:" and lower-case " Warning:" to make patch for
1716 # trac issue #10021 smaller
1717 str = modify_lines(str, lambda l: re.sub(' error:', '', l))
1718 str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
1719 # Remove a .exe extension (for Windows)
1720 # This can occur in error messages generated by the program.
1721 str = re.sub('([^\\s])\\.exe', '\\1', str)
1722 str = normalise_callstacks(str)
1723 str = normalise_type_reps(str)
1724 return str
1725
1726 def normalise_asm( str ):
1727 lines = str.split('\n')
1728 # Only keep instructions and labels not starting with a dot.
1729 metadata = re.compile('^[ \t]*\\..*$')
1730 out = []
1731 for line in lines:
1732 # Drop metadata directives (e.g. ".type")
1733 if not metadata.match(line):
1734 line = re.sub('@plt', '', line)
1735 instr = line.lstrip().split()
1736 # Drop empty lines.
1737 if not instr:
1738 continue
1739 # Drop operands, except for call instructions.
1740 elif instr[0] == 'call':
1741 out.append(instr[0] + ' ' + instr[1])
1742 else:
1743 out.append(instr[0])
1744 out = u'\n'.join(out)
1745 return out
1746
1747 def if_verbose( n, s ):
1748 if config.verbose >= n:
1749 print(s)
1750
1751 def if_verbose_dump( n, f ):
1752 if config.verbose >= n:
1753 try:
1754 print(open(f).read())
1755 except:
1756 print('')
1757
1758 def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0):
1759 timeout_prog = strip_quotes(config.timeout_prog)
1760 timeout = str(int(ceil(config.timeout * timeout_multiplier)))
1761
1762 # Format cmd using config. Example: cmd='{hpc} report A.tix'
1763 cmd = cmd.format(**config.__dict__)
1764 if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
1765
1766 if stdin:
1767 stdin = open(stdin, 'r')
1768 if stdout:
1769 stdout = open(stdout, 'w')
1770 if stderr and stderr is not subprocess.STDOUT:
1771 stderr = open(stderr, 'w')
1772
1773 # cmd is a complex command in Bourne-shell syntax
1774 # e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
1775 # Hence it must ultimately be run by a Bourne shell. It's timeout's job
1776 # to invoke the Bourne shell
1777 r = subprocess.call([timeout_prog, timeout, cmd],
1778 stdin=stdin, stdout=stdout, stderr=stderr)
1779
1780 if stdin:
1781 stdin.close()
1782 if stdout:
1783 stdout.close()
1784 if stderr and stderr is not subprocess.STDOUT:
1785 stderr.close()
1786
1787 if r == 98:
1788 # The python timeout program uses 98 to signal that ^C was pressed
1789 stopNow()
1790 if r == 99 and getTestOpts().exit_code != 99:
1791 # Only print a message when timeout killed the process unexpectedly.
1792 if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
1793 return r
1794
1795 # -----------------------------------------------------------------------------
1796 # checking if ghostscript is available for checking the output of hp2ps
1797
1798 def genGSCmd(psfile):
1799 return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
1800
1801 def gsNotWorking():
1802 global gs_working
1803 print("GhostScript not available for hp2ps tests")
1804
1805 global gs_working
1806 gs_working = 0
1807 if config.have_profiling:
1808 if config.gs != '':
1809 resultGood = runCmd(genGSCmd(config.confdir + '/good.ps'));
1810 if resultGood == 0:
1811 resultBad = runCmd(genGSCmd(config.confdir + '/bad.ps') +
1812 ' >/dev/null 2>&1')
1813 if resultBad != 0:
1814 print("GhostScript available for hp2ps tests")
1815 gs_working = 1;
1816 else:
1817 gsNotWorking();
1818 else:
1819 gsNotWorking();
1820 else:
1821 gsNotWorking();
1822
1823 def add_suffix( name, suffix ):
1824 if suffix == '':
1825 return name
1826 else:
1827 return name + '.' + suffix
1828
1829 def add_hs_lhs_suffix(name):
1830 if getTestOpts().c_src:
1831 return add_suffix(name, 'c')
1832 elif getTestOpts().cmm_src:
1833 return add_suffix(name, 'cmm')
1834 elif getTestOpts().objc_src:
1835 return add_suffix(name, 'm')
1836 elif getTestOpts().objcpp_src:
1837 return add_suffix(name, 'mm')
1838 elif getTestOpts().literate:
1839 return add_suffix(name, 'lhs')
1840 else:
1841 return add_suffix(name, 'hs')
1842
1843 def replace_suffix( name, suffix ):
1844 base, suf = os.path.splitext(name)
1845 return base + '.' + suffix
1846
1847 def in_testdir(name, suffix=''):
1848 return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
1849
1850 def in_srcdir(name, suffix=''):
1851 return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
1852
1853 # Finding the sample output. The filename is of the form
1854 #
1855 # <test>.stdout[-ws-<wordsize>][-<platform>]
1856 #
1857 def find_expected_file(name, suff):
1858 basename = add_suffix(name, suff)
1859
1860 files = [basename + ws + plat
1861 for plat in ['-' + config.platform, '-' + config.os, '']
1862 for ws in ['-ws-' + config.wordsize, '']]
1863
1864 for f in files:
1865 if os.path.exists(in_srcdir(f)):
1866 return f
1867
1868 return basename
1869
1870 def cleanup():
1871 shutil.rmtree(getTestOpts().testdir, ignore_errors=True)
1872
1873 # -----------------------------------------------------------------------------
1874 # Return a list of all the files ending in '.T' below directories roots.
1875
1876 def findTFiles(roots):
1877 for root in roots:
1878 for path, dirs, files in os.walk(root, topdown=True):
1879 # Never pick up .T files in uncleaned .run directories.
1880 dirs[:] = [dir for dir in sorted(dirs)
1881 if not dir.endswith(testdir_suffix)]
1882 for filename in files:
1883 if filename.endswith('.T'):
1884 yield os.path.join(path, filename)
1885
1886 # -----------------------------------------------------------------------------
1887 # Output a test summary to the specified file object
1888
1889 def summary(t, file, short=False):
1890
1891 file.write('\n')
1892 printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures, t.unexpected_stat_failures])
1893
1894 if short:
1895 # Only print the list of unexpected tests above.
1896 return
1897
1898 file.write('SUMMARY for test run started at '
1899 + time.strftime("%c %Z", t.start_time) + '\n'
1900 + str(datetime.timedelta(seconds=
1901 round(time.time() - time.mktime(t.start_time)))).rjust(8)
1902 + ' spent to go through\n'
1903 + repr(t.total_tests).rjust(8)
1904 + ' total tests, which gave rise to\n'
1905 + repr(t.total_test_cases).rjust(8)
1906 + ' test cases, of which\n'
1907 + repr(t.n_tests_skipped).rjust(8)
1908 + ' were skipped\n'
1909 + '\n'
1910 + repr(t.n_missing_libs).rjust(8)
1911 + ' had missing libraries\n'
1912 + repr(t.n_expected_passes).rjust(8)
1913 + ' expected passes\n'
1914 + repr(t.n_expected_failures).rjust(8)
1915 + ' expected failures\n'
1916 + '\n'
1917 + repr(t.n_framework_failures).rjust(8)
1918 + ' caused framework failures\n'
1919 + repr(t.n_unexpected_passes).rjust(8)
1920 + ' unexpected passes\n'
1921 + repr(t.n_unexpected_failures).rjust(8)
1922 + ' unexpected failures\n'
1923 + repr(t.n_unexpected_stat_failures).rjust(8)
1924 + ' unexpected stat failures\n'
1925 + '\n')
1926
1927 if t.n_unexpected_passes > 0:
1928 file.write('Unexpected passes:\n')
1929 printPassingTestInfosSummary(file, t.unexpected_passes)
1930
1931 if t.n_unexpected_failures > 0:
1932 file.write('Unexpected failures:\n')
1933 printFailingTestInfosSummary(file, t.unexpected_failures)
1934
1935 if t.n_unexpected_stat_failures > 0:
1936 file.write('Unexpected stat failures:\n')
1937 printFailingTestInfosSummary(file, t.unexpected_stat_failures)
1938
1939 if t.n_framework_failures > 0:
1940 file.write('Test framework failures:\n')
1941 printFrameworkFailureSummary(file, t.framework_failures)
1942
1943 if stopping():
1944 file.write('WARNING: Testsuite run was terminated early\n')
1945
1946 def printUnexpectedTests(file, testInfoss):
1947 unexpected = []
1948 for testInfos in testInfoss:
1949 directories = testInfos.keys()
1950 for directory in directories:
1951 tests = list(testInfos[directory].keys())
1952 unexpected += tests
1953 if unexpected != []:
1954 file.write('Unexpected results from:\n')
1955 file.write('TEST="' + ' '.join(unexpected) + '"\n')
1956 file.write('\n')
1957
1958 def printPassingTestInfosSummary(file, testInfos):
1959 directories = list(testInfos.keys())
1960 directories.sort()
1961 maxDirLen = max(len(x) for x in directories)
1962 for directory in directories:
1963 tests = list(testInfos[directory].keys())
1964 tests.sort()
1965 for test in tests:
1966 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
1967 ' (' + ','.join(testInfos[directory][test]) + ')\n')
1968 file.write('\n')
1969
1970 def printFailingTestInfosSummary(file, testInfos):
1971 directories = list(testInfos.keys())
1972 directories.sort()
1973 maxDirLen = max(len(d) for d in directories)
1974 for directory in directories:
1975 tests = list(testInfos[directory].keys())
1976 tests.sort()
1977 for test in tests:
1978 reasons = testInfos[directory][test].keys()
1979 for reason in reasons:
1980 file.write(' ' + directory.ljust(maxDirLen + 2) + test + \
1981 ' [' + reason + ']' + \
1982 ' (' + ','.join(testInfos[directory][test][reason]) + ')\n')
1983 file.write('\n')
1984
1985 def printFrameworkFailureSummary(file, testInfos):
1986 names = list(testInfos.keys())
1987 names.sort()
1988 maxNameLen = max(len(n) for n in names)
1989 for name in names:
1990 ways = testInfos[name]
1991 file.write(' ' + name.ljust(maxNameLen + 2) + \
1992 ' (' + ','.join(ways) + ')\n')
1993 file.write('\n')
1994
1995 def modify_lines(s, f):
1996 s = u'\n'.join([f(l) for l in s.splitlines()])
1997 if s and s[-1] != '\n':
1998 # Prevent '\ No newline at end of file' warnings when diffing.
1999 s += '\n'
2000 return s