Update terminal title while running test-suite
[ghc.git] / testsuite / driver / runtests.py
1 #!/usr/bin/env python3
2
3 #
4 # (c) Simon Marlow 2002
5 #
6
7 import argparse
8 import signal
9 import sys
10 import os
11 import io
12 import shutil
13 import tempfile
14 import time
15 import re
16 import traceback
17
18 # We don't actually need subprocess in runtests.py, but:
19 # * We do need it in testlibs.py
20 # * We can't import testlibs.py until after we have imported ctypes
21 # * If we import ctypes before subprocess on cygwin, then sys.exit(0)
22 # says "Aborted" and we fail with exit code 134.
23 # So we import it here first, so that the testsuite doesn't appear to fail.
24 import subprocess
25
26 from testutil import getStdout, Watcher, str_warn, str_info
27 from testglobals import getConfig, ghc_env, getTestRun, TestOptions, brokens
28 from perf_notes import MetricChange, inside_git_repo, is_worktree_dirty
29 from junit import junit
30 import cpu_features
31
32 # Readline sometimes spews out ANSI escapes for some values of TERM,
33 # which result in test failures. Thus set TERM to a nice, simple, safe
34 # value.
35 os.environ['TERM'] = 'vt100'
36 ghc_env['TERM'] = 'vt100'
37
38 global config
39 config = getConfig() # get it from testglobals
40
41 def signal_handler(signal, frame):
42 stopNow()
43
44 # -----------------------------------------------------------------------------
45 # cmd-line options
46
47 parser = argparse.ArgumentParser(description="GHC's testsuite driver")
48 perf_group = parser.add_mutually_exclusive_group()
49
50 parser.add_argument("-e", action='append', help="A string to execute from the command line.")
51 parser.add_argument("--config-file", action="append", help="config file")
52 parser.add_argument("--config", action='append', help="config field")
53 parser.add_argument("--rootdir", action='append', help="root of tree containing tests (default: .)")
54 parser.add_argument("--metrics-file", help="file in which to save (append) the performance test metrics. If omitted, git notes will be used.")
55 parser.add_argument("--summary-file", help="file in which to save the (human-readable) summary")
56 parser.add_argument("--no-print-summary", action="store_true", help="should we print the summary?")
57 parser.add_argument("--only", action="append", help="just this test (can be give multiple --only= flags)")
58 parser.add_argument("--way", action="append", help="just this way")
59 parser.add_argument("--skipway", action="append", help="skip this way")
60 parser.add_argument("--threads", type=int, help="threads to run simultaneously")
61 parser.add_argument("--verbose", type=int, choices=[0,1,2,3,4,5], help="verbose (Values 0 through 5 accepted)")
62 parser.add_argument("--junit", type=argparse.FileType('wb'), help="output testsuite summary in JUnit format")
63 parser.add_argument("--test-env", default='local', help="Override default chosen test-env.")
64 perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests")
65 perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests")
66
67 args = parser.parse_args()
68
69 if args.e:
70 for e in args.e:
71 exec(e)
72
73 if args.config_file:
74 for arg in args.config_file:
75 exec(open(arg).read())
76
77 if args.config:
78 for arg in args.config:
79 field, value = arg.split('=', 1)
80 setattr(config, field, value)
81
82 all_ways = config.run_ways+config.compile_ways+config.other_ways
83
84 if args.rootdir:
85 config.rootdirs = args.rootdir
86
87 config.metrics_file = args.metrics_file
88 hasMetricsFile = bool(config.metrics_file)
89 config.summary_file = args.summary_file
90 config.no_print_summary = args.no_print_summary
91
92 if args.only:
93 config.only = args.only
94 config.run_only_some_tests = True
95
96 if args.way:
97 for way in args.way:
98 if way not in all_ways:
99 print('WARNING: Unknown WAY %s in --way' % way)
100 else:
101 config.cmdline_ways += [way]
102 if way in config.other_ways:
103 config.run_ways += [way]
104 config.compile_ways += [way]
105
106 if args.skipway:
107 for way in args.skipway:
108 if way not in all_ways:
109 print('WARNING: Unknown WAY %s in --skipway' % way)
110
111 config.other_ways = [w for w in config.other_ways if w not in args.skipway]
112 config.run_ways = [w for w in config.run_ways if w not in args.skipway]
113 config.compile_ways = [w for w in config.compile_ways if w not in args.skipway]
114
115 if args.threads:
116 config.threads = args.threads
117 config.use_threads = True
118
119 if args.verbose is not None:
120 config.verbose = args.verbose
121
122 # Note force skip perf tests: skip if this is not a git repo (estimated with inside_git_repo)
123 # and no metrics file is given. In this case there is no way to read the previous commit's
124 # perf test results, nor a way to store new perf test results.
125 forceSkipPerfTests = not hasMetricsFile and not inside_git_repo()
126 config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests
127 config.only_perf_tests = args.only_perf_tests
128
129 if args.test_env:
130 config.test_env = args.test_env
131
132 config.cygwin = False
133 config.msys = False
134
135 if windows:
136 h = os.popen('uname -s', 'r')
137 v = h.read()
138 h.close()
139 if v.startswith("CYGWIN"):
140 config.cygwin = True
141 elif v.startswith("MINGW") or v.startswith("MSYS"):
142 # msys gives "MINGW32"
143 # msys2 gives "MINGW_NT-6.2" or "MSYS_NT-6.3"
144 config.msys = True
145 else:
146 raise Exception("Can't detect Windows terminal type")
147
148 # Try to use UTF8
149 if windows:
150 import ctypes
151 # Windows and mingw* Python provide windll, msys2 python provides cdll.
152 if hasattr(ctypes, 'WinDLL'):
153 mydll = ctypes.WinDLL
154 else:
155 mydll = ctypes.CDLL
156
157 # This actually leaves the terminal in codepage 65001 (UTF8) even
158 # after python terminates. We ought really remember the old codepage
159 # and set it back.
160 kernel32 = mydll('kernel32.dll')
161 if kernel32.SetConsoleCP(65001) == 0:
162 raise Exception("Failure calling SetConsoleCP(65001)")
163 if kernel32.SetConsoleOutputCP(65001) == 0:
164 raise Exception("Failure calling SetConsoleOutputCP(65001)")
165
166 # register the interrupt handler
167 signal.signal(signal.SIGINT, signal_handler)
168 else:
169 # Try and find a utf8 locale to use
170 # First see if we already have a UTF8 locale
171 h = os.popen('locale | grep LC_CTYPE | grep -i utf', 'r')
172 v = h.read()
173 h.close()
174 if v == '':
175 # We don't, so now see if 'locale -a' works
176 h = os.popen('locale -a | grep -F .', 'r')
177 v = h.read()
178 h.close()
179 if v != '':
180 # If it does then use the first utf8 locale that is available
181 h = os.popen('locale -a | grep -i "utf8\|utf-8" 2>/dev/null', 'r')
182 v = h.readline().strip()
183 h.close()
184 if v != '':
185 os.environ['LC_ALL'] = v
186 ghc_env['LC_ALL'] = v
187 print("setting LC_ALL to", v)
188 else:
189 print('WARNING: No UTF8 locale found.')
190 print('You may get some spurious test failures.')
191
192 # https://stackoverflow.com/a/22254892/1308058
193 def supports_colors():
194 """
195 Returns True if the running system's terminal supports color, and False
196 otherwise.
197 """
198 plat = sys.platform
199 supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
200 'ANSICON' in os.environ)
201 # isatty is not always implemented, #6223.
202 is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
203 if not supported_platform or not is_a_tty:
204 return False
205 return True
206
207 config.supports_colors = supports_colors()
208
209 # This has to come after arg parsing as the args can change the compiler
210 get_compiler_info()
211
212 # Can't import this earlier as we need to know if threading will be
213 # enabled or not
214 from testlib import *
215
216 def format_path(path):
217 if windows:
218 if os.pathsep == ':':
219 # If using msys2 python instead of mingw we have to change the drive
220 # letter representation. Otherwise it thinks we're adding two env
221 # variables E and /Foo when we add E:/Foo.
222 path = re.sub('([a-zA-Z]):', '/\\1', path)
223 if config.cygwin:
224 # On cygwin we can't put "c:\foo" in $PATH, as : is a
225 # field separator. So convert to /cygdrive/c/foo instead.
226 # Other pythons use ; as the separator, so no problem.
227 path = re.sub('([a-zA-Z]):', '/cygdrive/\\1', path)
228 path = re.sub('\\\\', '/', path)
229 return path
230
231 # On Windows we need to set $PATH to include the paths to all the DLLs
232 # in order for the dynamic library tests to work.
233 if windows or darwin:
234 pkginfo = getStdout([config.ghc_pkg, 'dump'])
235 topdir = config.libdir
236 if windows:
237 mingw = os.path.abspath(os.path.join(topdir, '../mingw/bin'))
238 mingw = format_path(mingw)
239 ghc_env['PATH'] = os.pathsep.join([ghc_env.get("PATH", ""), mingw])
240 for line in pkginfo.split('\n'):
241 if line.startswith('library-dirs:'):
242 path = line.rstrip()
243 path = re.sub('^library-dirs: ', '', path)
244 # Use string.replace instead of re.sub, because re.sub
245 # interprets backslashes in the replacement string as
246 # escape sequences.
247 path = path.replace('$topdir', topdir)
248 if path.startswith('"'):
249 path = re.sub('^"(.*)"$', '\\1', path)
250 path = re.sub('\\\\(.)', '\\1', path)
251 if windows:
252 path = format_path(path)
253 ghc_env['PATH'] = os.pathsep.join([path, ghc_env.get("PATH", "")])
254 else:
255 # darwin
256 ghc_env['DYLD_LIBRARY_PATH'] = os.pathsep.join([path, ghc_env.get("DYLD_LIBRARY_PATH", "")])
257
258 testopts_local.x = TestOptions()
259
260 # if timeout == -1 then we try to calculate a sensible value
261 if config.timeout == -1:
262 config.timeout = int(read_no_crs(config.top + '/timeout/calibrate.out'))
263
264 print('Timeout is ' + str(config.timeout))
265 print('Known ways: ' + ', '.join(config.other_ways))
266 print('Run ways: ' + ', '.join(config.run_ways))
267 print('Compile ways: ' + ', '.join(config.compile_ways))
268
269 # Try get allowed performance changes from the git commit.
270 try:
271 config.allowed_perf_changes = Perf.get_allowed_perf_changes()
272 except subprocess.CalledProcessError:
273 print('Failed to get allowed metric changes from the HEAD git commit message.')
274
275 print('Allowing performance changes in: ' + ', '.join(config.allowed_perf_changes.keys()))
276
277 # -----------------------------------------------------------------------------
278 # The main dude
279
280 if config.rootdirs == []:
281 config.rootdirs = ['.']
282
283 t_files = list(findTFiles(config.rootdirs))
284
285 print('Found', len(t_files), '.T files...')
286
287 t = getTestRun()
288
289 # Avoid cmd.exe built-in 'date' command on Windows
290 t.start_time = time.localtime()
291
292 print('Beginning test run at', time.strftime("%c %Z",t.start_time))
293
294 # For reference
295 try:
296 print('Detected CPU features: ', cpu_features.get_cpu_features())
297 except Exception as e:
298 print('Failed to detect CPU features: ', e)
299
300 sys.stdout.flush()
301 # we output text, which cannot be unbuffered
302 sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w")
303
304 if config.local:
305 tempdir = ''
306 else:
307 # See note [Running tests in /tmp]
308 tempdir = tempfile.mkdtemp('', 'ghctest-')
309
310 # opts.testdir should be quoted when used, to make sure the testsuite
311 # keeps working when it contains backward slashes, for example from
312 # using os.path.join. Windows native and mingw* python
313 # (/mingw64/bin/python) set `os.path.sep = '\\'`, while msys2 python
314 # (/bin/python, /usr/bin/python or /usr/local/bin/python) sets
315 # `os.path.sep = '/'`.
316 # To catch usage of unquoted opts.testdir early, insert some spaces into
317 # tempdir.
318 tempdir = os.path.join(tempdir, 'test spaces')
319
320 def cleanup_and_exit(exitcode):
321 if config.cleanup and tempdir:
322 shutil.rmtree(tempdir, ignore_errors=True)
323 exit(exitcode)
324
325 # First collect all the tests to be run
326 t_files_ok = True
327 for file in t_files:
328 if_verbose(2, '====> Scanning %s' % file)
329 newTestDir(tempdir, os.path.dirname(file))
330 try:
331 with io.open(file, encoding='utf8') as f:
332 src = f.read()
333
334 exec(src)
335 except Exception as e:
336 traceback.print_exc()
337 framework_fail(file, '', str(e))
338 t_files_ok = False
339
340 for name in config.only:
341 if t_files_ok:
342 # See Note [Mutating config.only]
343 framework_fail(name, '', 'test not found')
344 else:
345 # Let user fix .T file errors before reporting on unfound tests.
346 # The reason the test can not be found is likely because of those
347 # .T file errors.
348 pass
349
350 if config.list_broken:
351 print('')
352 print('Broken tests:')
353 print(' '.join(map (lambda bdn: '#' + str(bdn[0]) + '(' + bdn[1] + '/' + bdn[2] + ')', brokens)))
354 print('')
355
356 if t.framework_failures:
357 print('WARNING:', len(t.framework_failures), 'framework failures!')
358 print('')
359 else:
360 # completion watcher
361 watcher = Watcher(len(parallelTests))
362
363 # Now run all the tests
364 try:
365 for oneTest in parallelTests:
366 if stopping():
367 break
368 oneTest(watcher)
369
370 # wait for parallel tests to finish
371 if not stopping():
372 watcher.wait()
373
374 # Run the following tests purely sequential
375 config.use_threads = False
376 for oneTest in aloneTests:
377 if stopping():
378 break
379 oneTest(watcher)
380 except KeyboardInterrupt:
381 pass
382
383 # flush everything before we continue
384 sys.stdout.flush()
385
386 # Warn if had to force skip perf tests (see Note force skip perf tests).
387 spacing = " "
388 if forceSkipPerfTests and not args.skip_perf_tests:
389 print()
390 print(str_warn('Skipping All Performance Tests') + ' `git` exited with non-zero exit code.')
391 print(spacing + 'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).')
392 print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')
393
394 # Warn of new metrics.
395 new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
396 if any(new_metrics):
397 if inside_git_repo():
398 reason = 'a baseline (expected value) cannot be recovered from' + \
399 ' previous git commits. This may be due to HEAD having' + \
400 ' new tests or having expected changes, the presence of' + \
401 ' expected changes since the last run of the tests, and/or' + \
402 ' the latest test run being too old.'
403 fix = 'If the tests exist on the previous' + \
404 ' commit (And are configured to run with the same ways),' + \
405 ' then check out that commit and run the tests to generate' + \
406 ' the missing metrics. Alternatively, a baseline may be' + \
407 ' recovered from ci results once fetched:\n\n' + \
408 spacing + 'git fetch ' + \
409 'https://gitlab.haskell.org/ghc/ghc-performance-notes.git' + \
410 ' refs/notes/perf:refs/notes/' + Perf.CiNamespace
411 else:
412 reason = "this is not a git repo so the previous git commit's" + \
413 " metrics cannot be loaded from git notes:"
414 fix = ""
415 print()
416 print(str_warn('Missing Baseline Metrics') + \
417 ' these metrics trivially pass because ' + reason)
418 print(spacing + (' ').join(set([metric.test for metric in new_metrics])))
419 if fix != "":
420 print()
421 print(fix)
422
423 # Inform of how to accept metric changes.
424 if (len(t.unexpected_stat_failures) > 0):
425 print()
426 print(str_info("Some stats have changed") + " If this is expected, " + \
427 "allow changes by appending the git commit message with this:")
428 print('-' * 25)
429 print(Perf.allow_changes_string(t.metrics))
430 print('-' * 25)
431
432 summary(t, sys.stdout, config.no_print_summary, config.supports_colors)
433
434 # Write perf stats if any exist or if a metrics file is specified.
435 stats = [stat for (_, stat) in t.metrics]
436 if hasMetricsFile:
437 print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
438 with open(config.metrics_file, 'a') as file:
439 file.write("\n" + Perf.format_perf_stat(stats))
440 elif inside_git_repo() and any(stats):
441 if is_worktree_dirty():
442 print()
443 print(str_warn('Performance Metrics NOT Saved') + \
444 ' working tree is dirty. Commit changes or use ' + \
445 '--metrics-file to save metrics to a file.')
446 else:
447 Perf.append_perf_stat(stats)
448
449 # Write summary
450 if config.summary_file:
451 with open(config.summary_file, 'w') as file:
452 summary(t, file)
453
454 if args.junit:
455 junit(t).write(args.junit)
456
457 if len(t.unexpected_failures) > 0 or \
458 len(t.unexpected_stat_failures) > 0 or \
459 len(t.unexpected_passes) > 0 or \
460 len(t.framework_failures) > 0:
461 exitcode = 1
462 else:
463 exitcode = 0
464
465 cleanup_and_exit(exitcode)
466
467 # Note [Running tests in /tmp]
468 #
469 # Use LOCAL=0 to run tests in /tmp, to catch tests that use files from
470 # the source directory without copying them to the test directory first.
471 #
472 # As an example, take a run_command test with a Makefile containing
473 # `$(TEST_HC) ../Foo.hs`. GHC will now create the output files Foo.o and
474 # Foo.hi in the source directory. There are 2 problems with this:
475 # * Output files in the source directory won't get cleaned up automatically.
476 # * Two tests might (over)write the same output file.
477 #
478 # Tests that only fail when run concurrently with other tests are the
479 # worst, so we try to catch them early by enabling LOCAL=0 in validate.
480 #
481 # Adding -outputdir='.' to TEST_HC_OPTS would help a bit, but it requires
482 # making changes to quite a few tests. The problem is that
483 # `$(TEST_HC) ../Foo.hs -outputdir=.` with Foo.hs containing
484 # `module Main where` does not produce Foo.o, as it would without
485 # -outputdir, but Main.o. See [1].
486 #
487 # Using -outputdir='.' is not foolproof anyway, since it does not change
488 # the destination of the final executable (Foo.exe).
489 #
490 # Another hardening method that could be tried is to `chmod -w` the
491 # source directory.
492 #
493 # By default we set LOCAL=1, because it makes it easier to inspect the
494 # test directory while working on a new test.
495 #
496 # [1]
497 # https://downloads.haskell.org/~ghc/8.0.1/docs/html/users_guide/separate_compilation.html#output-files