Update terminal title while running test-suite
[ghc.git] / testsuite / driver / testglobals.py
1 #
2 # (c) Simon Marlow 2002
3 #
4
5 # -----------------------------------------------------------------------------
6 # Configuration info
7
8 # There is a single global instance of this structure, stored in the
9 # variable config below. The fields of the structure are filled in by
10 # the appropriate config script(s) for this compiler/platform, in
11 # ../config.
12 #
13 # Bits of the structure may also be filled in from the command line,
14 # via the build system, using the '-e' option to runtests.
15
16 class TestConfig:
17 def __init__(self):
18
19 # Where the testsuite root is
20 self.top = ''
21
22 # Directories below which to look for test description files (foo.T)
23 self.rootdirs = []
24
25 # Run these tests only (run all tests if empty)
26 self.run_only_some_tests = False
27 self.only = set()
28
29 # Accept new output which differs from the sample?
30 self.accept = False
31 self.accept_platform = False
32 self.accept_os = False
33
34 # File in which to save the performance metrics.
35 self.metrics_file = ''
36
37 # File in which to save the summary
38 self.summary_file = ''
39
40 # Should we print the summary?
41 # Disabling this is useful for Phabricator/Harbormaster
42 # logfiles, which are truncated to 30 lines. TODO. Revise if
43 # this is still true.
44 # Note that we have a separate flag for this, instead of
45 # overloading --verbose, as you might want to see the summary
46 # with --verbose=0.
47 self.no_print_summary = False
48
49 # What platform are we running on?
50 self.platform = ''
51 self.os = ''
52 self.arch = ''
53 self.msys = False
54 self.cygwin = False
55
56 # What is the wordsize (in bits) of this platform?
57 self.wordsize = ''
58
59 # Verbosity level
60 self.verbose = 3
61
62 # See Note [validate and testsuite speed] in toplevel Makefile.
63 self.speed = 1
64
65 self.list_broken = False
66
67 # Path to the compiler (stage2 by default)
68 self.compiler = ''
69 # and ghc-pkg
70 self.ghc_pkg = ''
71
72 # Is self.compiler a stage 1, 2 or 3 compiler?
73 self.stage = 2
74
75 # Flags we always give to this compiler
76 self.compiler_always_flags = []
77
78 # Which ways to run tests (when compiling and running respectively)
79 # Other ways are added from the command line if we have the appropriate
80 # libraries.
81 self.compile_ways = []
82 self.run_ways = []
83 self.other_ways = []
84
85 # The ways selected via the command line.
86 self.cmdline_ways = []
87
88 # Lists of flags for each way
89 self.way_flags = {}
90 self.way_rts_flags = {}
91
92 # Do we have vanilla libraries?
93 self.have_vanilla = False
94
95 # Do we have dynamic libraries?
96 self.have_dynamic = False
97
98 # Do we have profiling support?
99 self.have_profiling = False
100
101 # Do we have interpreter support?
102 self.have_interp = False
103
104 # Do we have shared libraries?
105 self.have_shared_libs = False
106
107 # Do we have SMP support?
108 self.have_smp = False
109
110 # Is gdb avaliable?
111 self.have_gdb = False
112
113 # Is readelf available?
114 self.have_readelf = False
115
116 # Are we testing an in-tree compiler?
117 self.in_tree_compiler = True
118
119 # the timeout program
120 self.timeout_prog = ''
121 self.timeout = 300
122
123 # threads
124 self.threads = 1
125 self.use_threads = False
126
127 # Should we skip performance tests
128 self.skip_perf_tests = False
129
130 # Only do performance tests
131 self.only_perf_tests = False
132
133 # Allowed performance changes (see perf_notes.get_allowed_perf_changes())
134 self.allowed_perf_changes = {}
135
136 # The test environment.
137 self.test_env = 'local'
138
139 # terminal supports colors
140 self.supports_colors = False
141
142 global config
143 config = TestConfig()
144
145 def getConfig():
146 return config
147
148 import os
149 # Hold our modified GHC testrunning environment so we don't poison the current
150 # python's environment.
151 global ghc_env
152 ghc_env = os.environ.copy()
153
154 # -----------------------------------------------------------------------------
155 # Information about the current test run
156
157 class TestResult:
158 """
159 A result from the execution of a test. These live in the expected_passes,
160 framework_failures, framework_warnings, unexpected_passes,
161 unexpected_failures, unexpected_stat_failures lists of TestRun.
162 """
163 __slots__ = 'directory', 'testname', 'reason', 'way', 'stderr'
164 def __init__(self, directory, testname, reason, way, stderr=None):
165 self.directory = directory
166 self.testname = testname
167 self.reason = reason
168 self.way = way
169 self.stderr = stderr
170
171 class TestRun:
172 def __init__(self):
173 self.start_time = None
174 self.total_tests = 0
175 self.total_test_cases = 0
176
177 self.n_tests_skipped = 0
178 self.n_expected_passes = 0
179 self.n_expected_failures = 0
180
181 # type: List[TestResult]
182 self.missing_libs = []
183 self.framework_failures = []
184 self.framework_warnings = []
185
186 self.expected_passes = []
187 self.unexpected_passes = []
188 self.unexpected_failures = []
189 self.unexpected_stat_failures = []
190
191 # List of all metrics measured in this test run.
192 # [(change, PerfStat)] where change is one of the MetricChange
193 # constants: NewMetric, NoChange, Increase, Decrease.
194 # NewMetric happens when the previous git commit has no metric recorded.
195 self.metrics = []
196
197 global t
198 t = TestRun()
199
200 def getTestRun():
201 return t
202
203 # -----------------------------------------------------------------------------
204 # Information about the current test
205
206 class TestOptions:
207 def __init__(self):
208 # skip this test?
209 self.skip = False
210
211 # skip these ways
212 self.omit_ways = []
213
214 # skip all ways except these (None == do all ways)
215 self.only_ways = None
216
217 # add these ways to the default set
218 self.extra_ways = []
219
220 # the result we normally expect for this test
221 self.expect = 'pass'
222
223 # override the expected result for certain ways
224 self.expect_fail_for = []
225
226 # the stdin file that this test will use (empty for <name>.stdin)
227 self.stdin = ''
228
229 # Set the expected stderr/stdout. '' means infer from test name.
230 self.use_specs = {}
231
232 # don't compare output
233 self.ignore_stdout = False
234 self.ignore_stderr = False
235
236 # Backpack test
237 self.compile_backpack = False
238
239 # We sometimes want to modify the compiler_always_flags, so
240 # they are copied from config.compiler_always_flags when we
241 # make a new instance of TestOptions.
242 self.compiler_always_flags = []
243
244 # extra compiler opts for this test
245 self.extra_hc_opts = ''
246
247 # extra run opts for this test
248 self.extra_run_opts = ''
249
250 # expected exit code
251 self.exit_code = 0
252
253 # extra files to clean afterward
254 self.clean_files = []
255
256 # extra files to copy to the testdir
257 self.extra_files = []
258
259 # Map from metric to (function from way and commit to baseline value, allowed percentage deviation) e.g.
260 # { 'bytes allocated': (
261 # lambda way commit:
262 # ...
263 # if way1: return None ...
264 # elif way2:return 9300000000 ...
265 # ...
266 # , 10) }
267 # This means no baseline is available for way1. For way 2, allow a 10%
268 # deviation from 9300000000.
269 self.stats_range_fields = {}
270
271 # Is the test testing performance?
272 self.is_stats_test = False
273
274 # Does this test the compiler's performance as opposed to the generated code.
275 self.is_compiler_stats_test = False
276
277 # should we run this test alone, i.e. not run it in parallel with
278 # any other threads
279 self.alone = False
280
281 # Does this test use a literate (.lhs) file?
282 self.literate = False
283
284 # Does this test use a .c, .m or .mm file?
285 self.c_src = False
286 self.objc_src = False
287 self.objcpp_src = False
288
289 # Does this test use a .cmm file?
290 self.cmm_src = False
291
292 # Should we put .hi/.o files in a subdirectory?
293 self.outputdir = None
294
295 # Command to run before the test
296 self.pre_cmd = None
297
298 # Command wrapper: a function to apply to the command before running it
299 self.cmd_wrapper = None
300
301 # Prefix to put on the command before compiling it
302 self.compile_cmd_prefix = ''
303
304 # Extra output normalisation
305 self.extra_normaliser = lambda x: x
306
307 # Custom output checker, otherwise do a comparison with expected
308 # stdout file. Accepts two arguments: filename of actual stdout
309 # output, and a normaliser function given other test options
310 self.check_stdout = None
311
312 # Check .hp file when profiling libraries are available?
313 self.check_hp = True
314
315 # Extra normalisation for compiler error messages
316 self.extra_errmsg_normaliser = lambda x: x
317
318 # Keep profiling callstacks.
319 self.keep_prof_callstacks = False
320
321 # The directory the test is in
322 self.testdir = '.'
323
324 # Should we redirect stdout and stderr to a single file?
325 self.combined_output = False
326
327 # How should the timeout be adjusted on this test?
328 self.compile_timeout_multiplier = 1.0
329 self.run_timeout_multiplier = 1.0
330
331 self.cleanup = True
332
333 # Sould we run tests in a local subdirectory (<testname>-run) or
334 # in temporary directory in /tmp? See Note [Running tests in /tmp].
335 self.local = True
336
337 # The default set of options
338 global default_testopts
339 default_testopts = TestOptions()
340
341 # (bug, directory, name) of tests marked broken
342 global brokens
343 brokens = []