verify.py (13137:a3750228268f) verify.py (13139:78d9cd67bbdf)
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101test_phase_classes = {}
102
103class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
128 scons(*scons_args)
129
130class RunPhase(TestPhaseBase):
131 name = 'execute'
132 number = 2
133
134 def run(self, tests):
135 parser = argparse.ArgumentParser()
136 parser.add_argument('--timeout', type=int, metavar='SECONDS',
137 help='Time limit for each run in seconds.',
138 default=0)
139 parser.add_argument('-j', type=int, default=1,
140 help='How many tests to run in parallel.')
141 args = parser.parse_args(self.args)
142
143 timeout_cmd = [
144 'timeout',
145 '--kill-after', str(args.timeout * 2),
146 str(args.timeout)
147 ]
148 curdir = os.getcwd()
149 def run_test(test):
150 cmd = []
151 if args.timeout:
152 cmd.extend(timeout_cmd)
153 cmd.extend([
154 test.full_path(),
155 '-red', os.path.abspath(test.m5out_dir()),
156 '--listener-mode=off',
157 '--quiet',
158 config_path,
159 '--working-dir',
160 os.path.dirname(test.src_dir())
161 ])
162 # Ensure the output directory exists.
163 if not os.path.exists(test.m5out_dir()):
164 os.makedirs(test.m5out_dir())
165 try:
166 subprocess.check_call(cmd)
167 except subprocess.CalledProcessError, error:
168 returncode = error.returncode
169 else:
170 returncode = 0
171 os.chdir(curdir)
172 with open(test.returncode_file(), 'w') as rc:
173 rc.write('%d\n' % returncode)
174
175 runnable = filter(lambda t: not t.compile_only, tests)
176 if args.j == 1:
177 map(run_test, runnable)
178 else:
179 tp = multiprocessing.pool.ThreadPool(args.j)
180 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
181 tp.close()
182 tp.join()
183
184class Checker(object):
185 def __init__(self, ref, test, tag):
186 self.ref = ref
187 self.test = test
188 self.tag = tag
189
190 def check(self):
191 with open(self.text) as test_f, open(self.ref) as ref_f:
192 return test_f.read() == ref_f.read()
193
194def tagged_filt(tag, num):
195 return (r'^\n{}: \({}{}\) .*\n(In file: .*\n)?'
196 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
197
198def error_filt(num):
199 return tagged_filt('Error', num)
200
201def warning_filt(num):
202 return tagged_filt('Warning', num)
203
204def info_filt(num):
205 return tagged_filt('Info', num)
206
207class LogChecker(Checker):
208 def merge_filts(*filts):
209 filts = map(lambda f: '(' + f + ')', filts)
210 filts = '|'.join(filts)
211 return re.compile(filts, flags=re.MULTILINE)
212
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101test_phase_classes = {}
102
103class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
128 scons(*scons_args)
129
130class RunPhase(TestPhaseBase):
131 name = 'execute'
132 number = 2
133
134 def run(self, tests):
135 parser = argparse.ArgumentParser()
136 parser.add_argument('--timeout', type=int, metavar='SECONDS',
137 help='Time limit for each run in seconds.',
138 default=0)
139 parser.add_argument('-j', type=int, default=1,
140 help='How many tests to run in parallel.')
141 args = parser.parse_args(self.args)
142
143 timeout_cmd = [
144 'timeout',
145 '--kill-after', str(args.timeout * 2),
146 str(args.timeout)
147 ]
148 curdir = os.getcwd()
149 def run_test(test):
150 cmd = []
151 if args.timeout:
152 cmd.extend(timeout_cmd)
153 cmd.extend([
154 test.full_path(),
155 '-red', os.path.abspath(test.m5out_dir()),
156 '--listener-mode=off',
157 '--quiet',
158 config_path,
159 '--working-dir',
160 os.path.dirname(test.src_dir())
161 ])
162 # Ensure the output directory exists.
163 if not os.path.exists(test.m5out_dir()):
164 os.makedirs(test.m5out_dir())
165 try:
166 subprocess.check_call(cmd)
167 except subprocess.CalledProcessError, error:
168 returncode = error.returncode
169 else:
170 returncode = 0
171 os.chdir(curdir)
172 with open(test.returncode_file(), 'w') as rc:
173 rc.write('%d\n' % returncode)
174
175 runnable = filter(lambda t: not t.compile_only, tests)
176 if args.j == 1:
177 map(run_test, runnable)
178 else:
179 tp = multiprocessing.pool.ThreadPool(args.j)
180 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
181 tp.close()
182 tp.join()
183
184class Checker(object):
185 def __init__(self, ref, test, tag):
186 self.ref = ref
187 self.test = test
188 self.tag = tag
189
190 def check(self):
191 with open(self.text) as test_f, open(self.ref) as ref_f:
192 return test_f.read() == ref_f.read()
193
194def tagged_filt(tag, num):
195 return (r'^\n{}: \({}{}\) .*\n(In file: .*\n)?'
196 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
197
198def error_filt(num):
199 return tagged_filt('Error', num)
200
201def warning_filt(num):
202 return tagged_filt('Warning', num)
203
204def info_filt(num):
205 return tagged_filt('Info', num)
206
207class LogChecker(Checker):
208 def merge_filts(*filts):
209 filts = map(lambda f: '(' + f + ')', filts)
210 filts = '|'.join(filts)
211 return re.compile(filts, flags=re.MULTILINE)
212
213 # The reporting mechanism will print the actual filename when running in
214 # gem5, and the "golden" output will say "<removed by verify.py>". We want
215 # to strip out both versions to make comparing the output sensible.
216 in_file_filt = r'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$'
217
213 ref_filt = merge_filts(
214 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
215 r'^SystemC Simulation\n',
216 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
217 r'You can turn off(.*\n){7}',
218 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
219 r' sc_clock\(const char(.*\n){3}',
220 warning_filt(540),
221 warning_filt(569),
222 warning_filt(571),
223 error_filt(541),
224 error_filt(542),
225 error_filt(543),
226 info_filt(804),
218 ref_filt = merge_filts(
219 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
220 r'^SystemC Simulation\n',
221 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
222 r'You can turn off(.*\n){7}',
223 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
224 r' sc_clock\(const char(.*\n){3}',
225 warning_filt(540),
226 warning_filt(569),
227 warning_filt(571),
228 error_filt(541),
229 error_filt(542),
230 error_filt(543),
231 info_filt(804),
232 in_file_filt,
227 )
228 test_filt = merge_filts(
229 r'^Global frequency set at \d* ticks per second\n',
230 info_filt(804),
233 )
234 test_filt = merge_filts(
235 r'^Global frequency set at \d* ticks per second\n',
236 info_filt(804),
237 in_file_filt,
231 )
232
233 def __init__(self, ref, test, tag, out_dir):
234 super(LogChecker, self).__init__(ref, test, tag)
235 self.out_dir = out_dir
236
237 def apply_filters(self, data, filts):
238 re.sub(filt, '', data)
239
240 def check(self):
241 test_file = os.path.basename(self.test)
242 ref_file = os.path.basename(self.ref)
243 with open(self.test) as test_f, open(self.ref) as ref_f:
244 test = re.sub(self.test_filt, '', test_f.read())
245 ref = re.sub(self.ref_filt, '', ref_f.read())
246 diff_file = '.'.join([ref_file, 'diff'])
247 diff_path = os.path.join(self.out_dir, diff_file)
248 if test != ref:
249 with open(diff_path, 'w') as diff_f:
250 for line in difflib.unified_diff(
251 ref.splitlines(True), test.splitlines(True),
252 fromfile=ref_file,
253 tofile=test_file):
254 diff_f.write(line)
255 return False
256 else:
257 if os.path.exists(diff_path):
258 os.unlink(diff_path)
259 return True
260
261class GoldenDir(object):
262 def __init__(self, path, platform):
263 self.path = path
264 self.platform = platform
265
266 contents = os.listdir(path)
267 suffix = '.' + platform
268 suffixed = filter(lambda c: c.endswith(suffix), contents)
269 bases = map(lambda t: t[:-len(platform)], suffixed)
270 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
271
272 self.entries = {}
273 class Entry(object):
274 def __init__(self, e_path):
275 self.used = False
276 self.path = os.path.join(path, e_path)
277
278 def use(self):
279 self.used = True
280
281 for entry in contents:
282 self.entries[entry] = Entry(entry)
283
284 def entry(self, name):
285 def match(n):
286 return (n == name) or n.startswith(name + '.')
287 matches = { n: e for n, e in self.entries.items() if match(n) }
288
289 for match in matches.values():
290 match.use()
291
292 platform_name = '.'.join([ name, self.platform ])
293 if platform_name in matches:
294 return matches[platform_name].path
295 if name in matches:
296 return matches[name].path
297 else:
298 return None
299
300 def unused(self):
301 items = self.entries.items()
302 items = filter(lambda i: not i[1].used, items)
303
304 items.sort()
305 sources = []
306 i = 0
307 while i < len(items):
308 root = items[i][0]
309 sources.append(root)
310 i += 1
311 while i < len(items) and items[i][0].startswith(root):
312 i += 1
313 return sources
314
315class VerifyPhase(TestPhaseBase):
316 name = 'verify'
317 number = 3
318
319 def reset_status(self):
320 self._passed = []
321 self._failed = {}
322
323 def passed(self, test):
324 self._passed.append(test)
325
326 def failed(self, test, cause, note=''):
327 test.set_prop('note', note)
328 self._failed.setdefault(cause, []).append(test)
329
330 def print_status(self):
331 total_passed = len(self._passed)
332 total_failed = sum(map(len, self._failed.values()))
333 print()
334 print('Passed: {passed:4} - Failed: {failed:4}'.format(
335 passed=total_passed, failed=total_failed))
336
337 def write_result_file(self, path):
338 results = {
339 'passed': map(lambda t: t.props, self._passed),
340 'failed': {
341 cause: map(lambda t: t.props, tests) for
342 cause, tests in self._failed.iteritems()
343 }
344 }
345 with open(path, 'w') as rf:
346 json.dump(results, rf)
347
348 def print_results(self):
349 print()
350 print('Passed:')
351 for path in sorted(list([ t.path for t in self._passed ])):
352 print(' ', path)
353
354 print()
355 print('Failed:')
356
357 causes = []
358 for cause, tests in sorted(self._failed.items()):
359 block = ' ' + cause.capitalize() + ':\n'
360 for test in sorted(tests, key=lambda t: t.path):
361 block += ' ' + test.path
362 if test.note:
363 block += ' - ' + test.note
364 block += '\n'
365 causes.append(block)
366
367 print('\n'.join(causes))
368
369 def run(self, tests):
370 parser = argparse.ArgumentParser()
371 result_opts = parser.add_mutually_exclusive_group()
372 result_opts.add_argument('--result-file', action='store_true',
373 help='Create a results.json file in the current directory.')
374 result_opts.add_argument('--result-file-at', metavar='PATH',
375 help='Create a results json file at the given path.')
376 parser.add_argument('--print-results', action='store_true',
377 help='Print a list of tests that passed or failed')
378 args = parser.parse_args(self.args)
379
380 self.reset_status()
381
382 runnable = filter(lambda t: not t.compile_only, tests)
383 compile_only = filter(lambda t: t.compile_only, tests)
384
385 for test in compile_only:
386 if os.path.exists(test.full_path()):
387 self.passed(test)
388 else:
389 self.failed(test, 'compile failed')
390
391 for test in runnable:
392 with open(test.returncode_file()) as rc:
393 returncode = int(rc.read())
394
395 expected_returncode = 0
396 if os.path.exists(test.expected_returncode_file()):
397 with open(test.expected_returncode_file()) as erc:
398 expected_returncode = int(erc.read())
399
400 if returncode == 124:
401 self.failed(test, 'time out')
402 continue
403 elif returncode != expected_returncode:
404 if expected_returncode == 0:
405 self.failed(test, 'abort')
406 else:
407 self.failed(test, 'missed abort')
408 continue
409
410 out_dir = test.m5out_dir()
411
412 Diff = collections.namedtuple(
413 'Diff', 'ref, test, tag, ref_filter')
414
415 diffs = []
416
417 gd = GoldenDir(test.golden_dir(), 'linux64')
418
419 missing = []
420 log_file = '.'.join([test.name, 'log'])
421 log_path = gd.entry(log_file)
422 simout_path = os.path.join(out_dir, 'simout')
423 if not os.path.exists(simout_path):
424 missing.append('log output')
425 elif log_path:
426 diffs.append(LogChecker(log_path, simout_path,
427 log_file, out_dir))
428
429 for name in gd.unused():
430 test_path = os.path.join(out_dir, name)
431 ref_path = gd.entry(name)
432 if not os.path.exists(test_path):
433 missing.append(name)
434 else:
435 diffs.append(Checker(ref_path, test_path, name))
436
437 if missing:
438 self.failed(test, 'missing output', ' '.join(missing))
439 continue
440
441 failed_diffs = filter(lambda d: not d.check(), diffs)
442 if failed_diffs:
443 tags = map(lambda d: d.tag, failed_diffs)
444 self.failed(test, 'failed diffs', ' '.join(tags))
445 continue
446
447 self.passed(test)
448
449 if args.print_results:
450 self.print_results()
451
452 self.print_status()
453
454 result_path = None
455 if args.result_file:
456 result_path = os.path.join(os.getcwd(), 'results.json')
457 elif args.result_file_at:
458 result_path = args.result_file_at
459
460 if result_path:
461 self.write_result_file(result_path)
462
463
464parser = argparse.ArgumentParser(description='SystemC test utility')
465
466parser.add_argument('build_dir', metavar='BUILD_DIR',
467 help='The build directory (ie. build/ARM).')
468
469parser.add_argument('--update-json', action='store_true',
470 help='Update the json manifest of tests.')
471
472parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
473 default='opt',
474 help='Flavor of binary to test.')
475
476parser.add_argument('--list', action='store_true',
477 help='List the available tests')
478
479filter_opts = parser.add_mutually_exclusive_group()
480filter_opts.add_argument('--filter', default='True',
481 help='Python expression which filters tests based '
482 'on their properties')
483filter_opts.add_argument('--filter-file', default=None,
484 type=argparse.FileType('r'),
485 help='Same as --filter, but read from a file')
486
487def collect_phases(args):
488 phase_groups = [list(g) for k, g in
489 itertools.groupby(args, lambda x: x != '--phase') if k]
490 main_args = parser.parse_args(phase_groups[0][1:])
491 phases = []
492 names = []
493 for group in phase_groups[1:]:
494 name = group[0]
495 if name in names:
496 raise RuntimeException('Phase %s specified more than once' % name)
497 phase = test_phase_classes[name]
498 phases.append(phase(main_args, *group[1:]))
499 phases.sort()
500 return main_args, phases
501
502main_args, phases = collect_phases(sys.argv)
503
504if len(phases) == 0:
505 phases = [
506 CompilePhase(main_args),
507 RunPhase(main_args),
508 VerifyPhase(main_args)
509 ]
510
511
512
513json_path = os.path.join(main_args.build_dir, json_rel_path)
514
515if main_args.update_json:
516 scons(os.path.join(json_path))
517
518with open(json_path) as f:
519 test_data = json.load(f)
520
521 if main_args.filter_file:
522 f = main_args.filter_file
523 filt = compile(f.read(), f.name, 'eval')
524 else:
525 filt = compile(main_args.filter, '<string>', 'eval')
526
527 filtered_tests = {
528 target: props for (target, props) in
529 test_data.iteritems() if eval(filt, dict(props))
530 }
531
532 if len(filtered_tests) == 0:
533 print('All tests were filtered out.')
534 exit()
535
536 if main_args.list:
537 for target, props in sorted(filtered_tests.iteritems()):
538 print('%s.%s' % (target, main_args.flavor))
539 for key, val in props.iteritems():
540 print(' %s: %s' % (key, val))
541 print('Total tests: %d' % len(filtered_tests))
542 else:
543 tests_to_run = list([
544 Test(target, main_args.flavor, main_args.build_dir, props) for
545 target, props in sorted(filtered_tests.iteritems())
546 ])
547
548 for phase in phases:
549 phase.run(tests_to_run)
238 )
239
240 def __init__(self, ref, test, tag, out_dir):
241 super(LogChecker, self).__init__(ref, test, tag)
242 self.out_dir = out_dir
243
244 def apply_filters(self, data, filts):
245 re.sub(filt, '', data)
246
247 def check(self):
248 test_file = os.path.basename(self.test)
249 ref_file = os.path.basename(self.ref)
250 with open(self.test) as test_f, open(self.ref) as ref_f:
251 test = re.sub(self.test_filt, '', test_f.read())
252 ref = re.sub(self.ref_filt, '', ref_f.read())
253 diff_file = '.'.join([ref_file, 'diff'])
254 diff_path = os.path.join(self.out_dir, diff_file)
255 if test != ref:
256 with open(diff_path, 'w') as diff_f:
257 for line in difflib.unified_diff(
258 ref.splitlines(True), test.splitlines(True),
259 fromfile=ref_file,
260 tofile=test_file):
261 diff_f.write(line)
262 return False
263 else:
264 if os.path.exists(diff_path):
265 os.unlink(diff_path)
266 return True
267
268class GoldenDir(object):
269 def __init__(self, path, platform):
270 self.path = path
271 self.platform = platform
272
273 contents = os.listdir(path)
274 suffix = '.' + platform
275 suffixed = filter(lambda c: c.endswith(suffix), contents)
276 bases = map(lambda t: t[:-len(platform)], suffixed)
277 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
278
279 self.entries = {}
280 class Entry(object):
281 def __init__(self, e_path):
282 self.used = False
283 self.path = os.path.join(path, e_path)
284
285 def use(self):
286 self.used = True
287
288 for entry in contents:
289 self.entries[entry] = Entry(entry)
290
291 def entry(self, name):
292 def match(n):
293 return (n == name) or n.startswith(name + '.')
294 matches = { n: e for n, e in self.entries.items() if match(n) }
295
296 for match in matches.values():
297 match.use()
298
299 platform_name = '.'.join([ name, self.platform ])
300 if platform_name in matches:
301 return matches[platform_name].path
302 if name in matches:
303 return matches[name].path
304 else:
305 return None
306
307 def unused(self):
308 items = self.entries.items()
309 items = filter(lambda i: not i[1].used, items)
310
311 items.sort()
312 sources = []
313 i = 0
314 while i < len(items):
315 root = items[i][0]
316 sources.append(root)
317 i += 1
318 while i < len(items) and items[i][0].startswith(root):
319 i += 1
320 return sources
321
322class VerifyPhase(TestPhaseBase):
323 name = 'verify'
324 number = 3
325
326 def reset_status(self):
327 self._passed = []
328 self._failed = {}
329
330 def passed(self, test):
331 self._passed.append(test)
332
333 def failed(self, test, cause, note=''):
334 test.set_prop('note', note)
335 self._failed.setdefault(cause, []).append(test)
336
337 def print_status(self):
338 total_passed = len(self._passed)
339 total_failed = sum(map(len, self._failed.values()))
340 print()
341 print('Passed: {passed:4} - Failed: {failed:4}'.format(
342 passed=total_passed, failed=total_failed))
343
344 def write_result_file(self, path):
345 results = {
346 'passed': map(lambda t: t.props, self._passed),
347 'failed': {
348 cause: map(lambda t: t.props, tests) for
349 cause, tests in self._failed.iteritems()
350 }
351 }
352 with open(path, 'w') as rf:
353 json.dump(results, rf)
354
355 def print_results(self):
356 print()
357 print('Passed:')
358 for path in sorted(list([ t.path for t in self._passed ])):
359 print(' ', path)
360
361 print()
362 print('Failed:')
363
364 causes = []
365 for cause, tests in sorted(self._failed.items()):
366 block = ' ' + cause.capitalize() + ':\n'
367 for test in sorted(tests, key=lambda t: t.path):
368 block += ' ' + test.path
369 if test.note:
370 block += ' - ' + test.note
371 block += '\n'
372 causes.append(block)
373
374 print('\n'.join(causes))
375
376 def run(self, tests):
377 parser = argparse.ArgumentParser()
378 result_opts = parser.add_mutually_exclusive_group()
379 result_opts.add_argument('--result-file', action='store_true',
380 help='Create a results.json file in the current directory.')
381 result_opts.add_argument('--result-file-at', metavar='PATH',
382 help='Create a results json file at the given path.')
383 parser.add_argument('--print-results', action='store_true',
384 help='Print a list of tests that passed or failed')
385 args = parser.parse_args(self.args)
386
387 self.reset_status()
388
389 runnable = filter(lambda t: not t.compile_only, tests)
390 compile_only = filter(lambda t: t.compile_only, tests)
391
392 for test in compile_only:
393 if os.path.exists(test.full_path()):
394 self.passed(test)
395 else:
396 self.failed(test, 'compile failed')
397
398 for test in runnable:
399 with open(test.returncode_file()) as rc:
400 returncode = int(rc.read())
401
402 expected_returncode = 0
403 if os.path.exists(test.expected_returncode_file()):
404 with open(test.expected_returncode_file()) as erc:
405 expected_returncode = int(erc.read())
406
407 if returncode == 124:
408 self.failed(test, 'time out')
409 continue
410 elif returncode != expected_returncode:
411 if expected_returncode == 0:
412 self.failed(test, 'abort')
413 else:
414 self.failed(test, 'missed abort')
415 continue
416
417 out_dir = test.m5out_dir()
418
419 Diff = collections.namedtuple(
420 'Diff', 'ref, test, tag, ref_filter')
421
422 diffs = []
423
424 gd = GoldenDir(test.golden_dir(), 'linux64')
425
426 missing = []
427 log_file = '.'.join([test.name, 'log'])
428 log_path = gd.entry(log_file)
429 simout_path = os.path.join(out_dir, 'simout')
430 if not os.path.exists(simout_path):
431 missing.append('log output')
432 elif log_path:
433 diffs.append(LogChecker(log_path, simout_path,
434 log_file, out_dir))
435
436 for name in gd.unused():
437 test_path = os.path.join(out_dir, name)
438 ref_path = gd.entry(name)
439 if not os.path.exists(test_path):
440 missing.append(name)
441 else:
442 diffs.append(Checker(ref_path, test_path, name))
443
444 if missing:
445 self.failed(test, 'missing output', ' '.join(missing))
446 continue
447
448 failed_diffs = filter(lambda d: not d.check(), diffs)
449 if failed_diffs:
450 tags = map(lambda d: d.tag, failed_diffs)
451 self.failed(test, 'failed diffs', ' '.join(tags))
452 continue
453
454 self.passed(test)
455
456 if args.print_results:
457 self.print_results()
458
459 self.print_status()
460
461 result_path = None
462 if args.result_file:
463 result_path = os.path.join(os.getcwd(), 'results.json')
464 elif args.result_file_at:
465 result_path = args.result_file_at
466
467 if result_path:
468 self.write_result_file(result_path)
469
470
471parser = argparse.ArgumentParser(description='SystemC test utility')
472
473parser.add_argument('build_dir', metavar='BUILD_DIR',
474 help='The build directory (ie. build/ARM).')
475
476parser.add_argument('--update-json', action='store_true',
477 help='Update the json manifest of tests.')
478
479parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
480 default='opt',
481 help='Flavor of binary to test.')
482
483parser.add_argument('--list', action='store_true',
484 help='List the available tests')
485
486filter_opts = parser.add_mutually_exclusive_group()
487filter_opts.add_argument('--filter', default='True',
488 help='Python expression which filters tests based '
489 'on their properties')
490filter_opts.add_argument('--filter-file', default=None,
491 type=argparse.FileType('r'),
492 help='Same as --filter, but read from a file')
493
494def collect_phases(args):
495 phase_groups = [list(g) for k, g in
496 itertools.groupby(args, lambda x: x != '--phase') if k]
497 main_args = parser.parse_args(phase_groups[0][1:])
498 phases = []
499 names = []
500 for group in phase_groups[1:]:
501 name = group[0]
502 if name in names:
503 raise RuntimeException('Phase %s specified more than once' % name)
504 phase = test_phase_classes[name]
505 phases.append(phase(main_args, *group[1:]))
506 phases.sort()
507 return main_args, phases
508
509main_args, phases = collect_phases(sys.argv)
510
511if len(phases) == 0:
512 phases = [
513 CompilePhase(main_args),
514 RunPhase(main_args),
515 VerifyPhase(main_args)
516 ]
517
518
519
520json_path = os.path.join(main_args.build_dir, json_rel_path)
521
522if main_args.update_json:
523 scons(os.path.join(json_path))
524
525with open(json_path) as f:
526 test_data = json.load(f)
527
528 if main_args.filter_file:
529 f = main_args.filter_file
530 filt = compile(f.read(), f.name, 'eval')
531 else:
532 filt = compile(main_args.filter, '<string>', 'eval')
533
534 filtered_tests = {
535 target: props for (target, props) in
536 test_data.iteritems() if eval(filt, dict(props))
537 }
538
539 if len(filtered_tests) == 0:
540 print('All tests were filtered out.')
541 exit()
542
543 if main_args.list:
544 for target, props in sorted(filtered_tests.iteritems()):
545 print('%s.%s' % (target, main_args.flavor))
546 for key, val in props.iteritems():
547 print(' %s: %s' % (key, val))
548 print('Total tests: %d' % len(filtered_tests))
549 else:
550 tests_to_run = list([
551 Test(target, main_args.flavor, main_args.build_dir, props) for
552 target, props in sorted(filtered_tests.iteritems())
553 ])
554
555 for phase in phases:
556 phase.run(tests_to_run)