verify.py (13242:b4d52d9afc7f) verify.py (13250:4680968cc4cb)
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101test_phase_classes = {}
102
103class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127
128 parser = argparse.ArgumentParser()
129 parser.add_argument('-j', type=int, default=0)
130 args, leftovers = parser.parse_known_args(self.args)
131 if args.j == 0:
132 self.args = ('-j', str(self.main_args.j)) + self.args
133
134 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
135 scons(*scons_args)
136
137class RunPhase(TestPhaseBase):
138 name = 'execute'
139 number = 2
140
141 def run(self, tests):
142 parser = argparse.ArgumentParser()
143 parser.add_argument('--timeout', type=int, metavar='SECONDS',
144 help='Time limit for each run in seconds, '
145 '0 to disable.',
146 default=60)
147 parser.add_argument('-j', type=int, default=0,
148 help='How many tests to run in parallel.')
149 args = parser.parse_args(self.args)
150
151 timeout_cmd = [
152 'timeout',
153 '--kill-after', str(args.timeout * 2),
154 str(args.timeout)
155 ]
156 curdir = os.getcwd()
157 def run_test(test):
158 cmd = []
159 if args.timeout:
160 cmd.extend(timeout_cmd)
161 cmd.extend([
162 test.full_path(),
163 '-rd', os.path.abspath(test.m5out_dir()),
164 '--listener-mode=off',
165 '--quiet',
166 config_path,
167 '--working-dir',
168 os.path.dirname(test.src_dir())
169 ])
170 # Ensure the output directory exists.
171 if not os.path.exists(test.m5out_dir()):
172 os.makedirs(test.m5out_dir())
173 try:
174 subprocess.check_call(cmd)
175 except subprocess.CalledProcessError, error:
176 returncode = error.returncode
177 else:
178 returncode = 0
179 os.chdir(curdir)
180 with open(test.returncode_file(), 'w') as rc:
181 rc.write('%d\n' % returncode)
182
183 j = self.main_args.j if args.j == 0 else args.j
184
185 runnable = filter(lambda t: not t.compile_only, tests)
186 if j == 1:
187 map(run_test, runnable)
188 else:
189 tp = multiprocessing.pool.ThreadPool(j)
190 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
191 tp.close()
192 tp.join()
193
194class Checker(object):
195 def __init__(self, ref, test, tag):
196 self.ref = ref
197 self.test = test
198 self.tag = tag
199
200 def check(self):
201 with open(self.test) as test_f, open(self.ref) as ref_f:
202 return test_f.read() == ref_f.read()
203
204def tagged_filt(tag, num):
205 return (r'\n{}: \({}{}\) .*\n(In file: .*\n)?'
206 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
207
208def error_filt(num):
209 return tagged_filt('Error', num)
210
211def warning_filt(num):
212 return tagged_filt('Warning', num)
213
214def info_filt(num):
215 return tagged_filt('Info', num)
216
217class DiffingChecker(Checker):
218 def __init__(self, ref, test, tag, out_dir):
219 super(DiffingChecker, self).__init__(ref, test, tag)
220 self.out_dir = out_dir
221
222 def diffing_check(self, ref_lines, test_lines):
223 test_file = os.path.basename(self.test)
224 ref_file = os.path.basename(self.ref)
225
226 diff_file = '.'.join([ref_file, 'diff'])
227 diff_path = os.path.join(self.out_dir, diff_file)
228 if test_lines != ref_lines:
229 with open(diff_path, 'w') as diff_f:
230 for line in difflib.unified_diff(
231 ref_lines, test_lines,
232 fromfile=ref_file,
233 tofile=test_file):
234 diff_f.write(line)
235 return False
236 else:
237 if os.path.exists(diff_path):
238 os.unlink(diff_path)
239 return True
240
241class LogChecker(DiffingChecker):
242 def merge_filts(*filts):
243 filts = map(lambda f: '(' + f + ')', filts)
244 filts = '|'.join(filts)
245 return re.compile(filts, flags=re.MULTILINE)
246
247 # The reporting mechanism will print the actual filename when running in
248 # gem5, and the "golden" output will say "<removed by verify.py>". We want
249 # to strip out both versions to make comparing the output sensible.
250 in_file_filt = r'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$'
251
252 ref_filt = merge_filts(
253 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
254 r'^SystemC Simulation\n',
255 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
256 r'You can turn off(.*\n){7}',
257 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
258 r' sc_clock\(const char(.*\n){3}',
259 warning_filt(540),
260 warning_filt(571),
261 info_filt(804),
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101test_phase_classes = {}
102
103class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127
128 parser = argparse.ArgumentParser()
129 parser.add_argument('-j', type=int, default=0)
130 args, leftovers = parser.parse_known_args(self.args)
131 if args.j == 0:
132 self.args = ('-j', str(self.main_args.j)) + self.args
133
134 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
135 scons(*scons_args)
136
137class RunPhase(TestPhaseBase):
138 name = 'execute'
139 number = 2
140
141 def run(self, tests):
142 parser = argparse.ArgumentParser()
143 parser.add_argument('--timeout', type=int, metavar='SECONDS',
144 help='Time limit for each run in seconds, '
145 '0 to disable.',
146 default=60)
147 parser.add_argument('-j', type=int, default=0,
148 help='How many tests to run in parallel.')
149 args = parser.parse_args(self.args)
150
151 timeout_cmd = [
152 'timeout',
153 '--kill-after', str(args.timeout * 2),
154 str(args.timeout)
155 ]
156 curdir = os.getcwd()
157 def run_test(test):
158 cmd = []
159 if args.timeout:
160 cmd.extend(timeout_cmd)
161 cmd.extend([
162 test.full_path(),
163 '-rd', os.path.abspath(test.m5out_dir()),
164 '--listener-mode=off',
165 '--quiet',
166 config_path,
167 '--working-dir',
168 os.path.dirname(test.src_dir())
169 ])
170 # Ensure the output directory exists.
171 if not os.path.exists(test.m5out_dir()):
172 os.makedirs(test.m5out_dir())
173 try:
174 subprocess.check_call(cmd)
175 except subprocess.CalledProcessError, error:
176 returncode = error.returncode
177 else:
178 returncode = 0
179 os.chdir(curdir)
180 with open(test.returncode_file(), 'w') as rc:
181 rc.write('%d\n' % returncode)
182
183 j = self.main_args.j if args.j == 0 else args.j
184
185 runnable = filter(lambda t: not t.compile_only, tests)
186 if j == 1:
187 map(run_test, runnable)
188 else:
189 tp = multiprocessing.pool.ThreadPool(j)
190 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
191 tp.close()
192 tp.join()
193
194class Checker(object):
195 def __init__(self, ref, test, tag):
196 self.ref = ref
197 self.test = test
198 self.tag = tag
199
200 def check(self):
201 with open(self.test) as test_f, open(self.ref) as ref_f:
202 return test_f.read() == ref_f.read()
203
204def tagged_filt(tag, num):
205 return (r'\n{}: \({}{}\) .*\n(In file: .*\n)?'
206 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
207
208def error_filt(num):
209 return tagged_filt('Error', num)
210
211def warning_filt(num):
212 return tagged_filt('Warning', num)
213
214def info_filt(num):
215 return tagged_filt('Info', num)
216
217class DiffingChecker(Checker):
218 def __init__(self, ref, test, tag, out_dir):
219 super(DiffingChecker, self).__init__(ref, test, tag)
220 self.out_dir = out_dir
221
222 def diffing_check(self, ref_lines, test_lines):
223 test_file = os.path.basename(self.test)
224 ref_file = os.path.basename(self.ref)
225
226 diff_file = '.'.join([ref_file, 'diff'])
227 diff_path = os.path.join(self.out_dir, diff_file)
228 if test_lines != ref_lines:
229 with open(diff_path, 'w') as diff_f:
230 for line in difflib.unified_diff(
231 ref_lines, test_lines,
232 fromfile=ref_file,
233 tofile=test_file):
234 diff_f.write(line)
235 return False
236 else:
237 if os.path.exists(diff_path):
238 os.unlink(diff_path)
239 return True
240
241class LogChecker(DiffingChecker):
242 def merge_filts(*filts):
243 filts = map(lambda f: '(' + f + ')', filts)
244 filts = '|'.join(filts)
245 return re.compile(filts, flags=re.MULTILINE)
246
247 # The reporting mechanism will print the actual filename when running in
248 # gem5, and the "golden" output will say "<removed by verify.py>". We want
249 # to strip out both versions to make comparing the output sensible.
250 in_file_filt = r'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$'
251
252 ref_filt = merge_filts(
253 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
254 r'^SystemC Simulation\n',
255 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
256 r'You can turn off(.*\n){7}',
257 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
258 r' sc_clock\(const char(.*\n){3}',
259 warning_filt(540),
260 warning_filt(571),
261 info_filt(804),
262 info_filt(704),
262 in_file_filt,
263 )
264 test_filt = merge_filts(
265 r'^Global frequency set at \d* ticks per second\n',
266 r'^info: Entering event queue @ \d*\. Starting simulation\.\.\.\n',
267 r'warn: [^(]+\([^)]*\)( \[with [^]]*\])? not implemented\.\n',
268 r'warn: Ignoring request to set stack size\.\n',
269 info_filt(804),
270 in_file_filt,
271 )
272
273 def apply_filters(self, data, filts):
274 re.sub(filt, '', data)
275
276 def check(self):
277 with open(self.test) as test_f, open(self.ref) as ref_f:
278 test = re.sub(self.test_filt, '', test_f.read())
279 ref = re.sub(self.ref_filt, '', ref_f.read())
280 return self.diffing_check(ref.splitlines(True),
281 test.splitlines(True))
282
283class VcdChecker(DiffingChecker):
284 def check(self):
285 with open (self.test) as test_f, open(self.ref) as ref_f:
286 ref = ref_f.read().splitlines(True)
287 test = test_f.read().splitlines(True)
288 # Strip off the first seven lines of the test output which are
289 # date and version information.
290 test = test[7:]
291
292 return self.diffing_check(ref, test)
293
294class GoldenDir(object):
295 def __init__(self, path, platform):
296 self.path = path
297 self.platform = platform
298
299 contents = os.listdir(path)
300 suffix = '.' + platform
301 suffixed = filter(lambda c: c.endswith(suffix), contents)
302 bases = map(lambda t: t[:-len(platform)], suffixed)
303 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
304
305 self.entries = {}
306 class Entry(object):
307 def __init__(self, e_path):
308 self.used = False
309 self.path = os.path.join(path, e_path)
310
311 def use(self):
312 self.used = True
313
314 for entry in contents:
315 self.entries[entry] = Entry(entry)
316
317 def entry(self, name):
318 def match(n):
319 return (n == name) or n.startswith(name + '.')
320 matches = { n: e for n, e in self.entries.items() if match(n) }
321
322 for match in matches.values():
323 match.use()
324
325 platform_name = '.'.join([ name, self.platform ])
326 if platform_name in matches:
327 return matches[platform_name].path
328 if name in matches:
329 return matches[name].path
330 else:
331 return None
332
333 def unused(self):
334 items = self.entries.items()
335 items = filter(lambda i: not i[1].used, items)
336
337 items.sort()
338 sources = []
339 i = 0
340 while i < len(items):
341 root = items[i][0]
342 sources.append(root)
343 i += 1
344 while i < len(items) and items[i][0].startswith(root):
345 i += 1
346 return sources
347
348class VerifyPhase(TestPhaseBase):
349 name = 'verify'
350 number = 3
351
352 def reset_status(self):
353 self._passed = []
354 self._failed = {}
355
356 def passed(self, test):
357 self._passed.append(test)
358
359 def failed(self, test, cause, note=''):
360 test.set_prop('note', note)
361 self._failed.setdefault(cause, []).append(test)
362
363 def print_status(self):
364 total_passed = len(self._passed)
365 total_failed = sum(map(len, self._failed.values()))
366 print()
367 print('Passed: {passed:4} - Failed: {failed:4}'.format(
368 passed=total_passed, failed=total_failed))
369
370 def write_result_file(self, path):
371 results = {
372 'passed': map(lambda t: t.props, self._passed),
373 'failed': {
374 cause: map(lambda t: t.props, tests) for
375 cause, tests in self._failed.iteritems()
376 }
377 }
378 with open(path, 'w') as rf:
379 json.dump(results, rf)
380
381 def print_results(self):
382 print()
383 print('Passed:')
384 for path in sorted(list([ t.path for t in self._passed ])):
385 print(' ', path)
386
387 print()
388 print('Failed:')
389
390 causes = []
391 for cause, tests in sorted(self._failed.items()):
392 block = ' ' + cause.capitalize() + ':\n'
393 for test in sorted(tests, key=lambda t: t.path):
394 block += ' ' + test.path
395 if test.note:
396 block += ' - ' + test.note
397 block += '\n'
398 causes.append(block)
399
400 print('\n'.join(causes))
401
402 def run(self, tests):
403 parser = argparse.ArgumentParser()
404 result_opts = parser.add_mutually_exclusive_group()
405 result_opts.add_argument('--result-file', action='store_true',
406 help='Create a results.json file in the current directory.')
407 result_opts.add_argument('--result-file-at', metavar='PATH',
408 help='Create a results json file at the given path.')
409 parser.add_argument('--no-print-results', action='store_true',
410 help='Don\'t print a list of tests that passed or failed')
411 args = parser.parse_args(self.args)
412
413 self.reset_status()
414
415 runnable = filter(lambda t: not t.compile_only, tests)
416 compile_only = filter(lambda t: t.compile_only, tests)
417
418 for test in compile_only:
419 if os.path.exists(test.full_path()):
420 self.passed(test)
421 else:
422 self.failed(test, 'compile failed')
423
424 for test in runnable:
425 with open(test.returncode_file()) as rc:
426 returncode = int(rc.read())
427
428 expected_returncode = 0
429 if os.path.exists(test.expected_returncode_file()):
430 with open(test.expected_returncode_file()) as erc:
431 expected_returncode = int(erc.read())
432
433 if returncode == 124:
434 self.failed(test, 'time out')
435 continue
436 elif returncode != expected_returncode:
437 if expected_returncode == 0:
438 self.failed(test, 'abort')
439 else:
440 self.failed(test, 'missed abort')
441 continue
442
443 out_dir = test.m5out_dir()
444
445 Diff = collections.namedtuple(
446 'Diff', 'ref, test, tag, ref_filter')
447
448 diffs = []
449
450 gd = GoldenDir(test.golden_dir(), 'linux64')
451
452 missing = []
453 log_file = '.'.join([test.name, 'log'])
454 log_path = gd.entry(log_file)
455 simout_path = os.path.join(out_dir, 'simout')
456 if not os.path.exists(simout_path):
457 missing.append('log output')
458 elif log_path:
459 diffs.append(LogChecker(log_path, simout_path,
460 log_file, out_dir))
461
462 for name in gd.unused():
463 test_path = os.path.join(out_dir, name)
464 ref_path = gd.entry(name)
465 if not os.path.exists(test_path):
466 missing.append(name)
467 elif name.endswith('.vcd'):
468 diffs.append(VcdChecker(ref_path, test_path,
469 name, out_dir))
470 else:
471 diffs.append(Checker(ref_path, test_path, name))
472
473 if missing:
474 self.failed(test, 'missing output', ' '.join(missing))
475 continue
476
477 failed_diffs = filter(lambda d: not d.check(), diffs)
478 if failed_diffs:
479 tags = map(lambda d: d.tag, failed_diffs)
480 self.failed(test, 'failed diffs', ' '.join(tags))
481 continue
482
483 self.passed(test)
484
485 if not args.no_print_results:
486 self.print_results()
487
488 self.print_status()
489
490 result_path = None
491 if args.result_file:
492 result_path = os.path.join(os.getcwd(), 'results.json')
493 elif args.result_file_at:
494 result_path = args.result_file_at
495
496 if result_path:
497 self.write_result_file(result_path)
498
499
500parser = argparse.ArgumentParser(description='SystemC test utility')
501
502parser.add_argument('build_dir', metavar='BUILD_DIR',
503 help='The build directory (ie. build/ARM).')
504
505parser.add_argument('--update-json', action='store_true',
506 help='Update the json manifest of tests.')
507
508parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
509 default='opt',
510 help='Flavor of binary to test.')
511
512parser.add_argument('--list', action='store_true',
513 help='List the available tests')
514
515parser.add_argument('-j', type=int, default=1,
516 help='Default level of parallelism, can be overriden '
517 'for individual stages')
518
519filter_opts = parser.add_mutually_exclusive_group()
520filter_opts.add_argument('--filter', default='True',
521 help='Python expression which filters tests based '
522 'on their properties')
523filter_opts.add_argument('--filter-file', default=None,
524 type=argparse.FileType('r'),
525 help='Same as --filter, but read from a file')
526
527def collect_phases(args):
528 phase_groups = [list(g) for k, g in
529 itertools.groupby(args, lambda x: x != '--phase') if k]
530 main_args = parser.parse_args(phase_groups[0][1:])
531 phases = []
532 names = []
533 for group in phase_groups[1:]:
534 name = group[0]
535 if name in names:
536 raise RuntimeException('Phase %s specified more than once' % name)
537 phase = test_phase_classes[name]
538 phases.append(phase(main_args, *group[1:]))
539 phases.sort()
540 return main_args, phases
541
542main_args, phases = collect_phases(sys.argv)
543
544if len(phases) == 0:
545 phases = [
546 CompilePhase(main_args),
547 RunPhase(main_args),
548 VerifyPhase(main_args)
549 ]
550
551
552
553json_path = os.path.join(main_args.build_dir, json_rel_path)
554
555if main_args.update_json:
556 scons(os.path.join(json_path))
557
558with open(json_path) as f:
559 test_data = json.load(f)
560
561 if main_args.filter_file:
562 f = main_args.filter_file
563 filt = compile(f.read(), f.name, 'eval')
564 else:
565 filt = compile(main_args.filter, '<string>', 'eval')
566
567 filtered_tests = {
568 target: props for (target, props) in
569 test_data.iteritems() if eval(filt, dict(props))
570 }
571
572 if len(filtered_tests) == 0:
573 print('All tests were filtered out.')
574 exit()
575
576 if main_args.list:
577 for target, props in sorted(filtered_tests.iteritems()):
578 print('%s.%s' % (target, main_args.flavor))
579 for key, val in props.iteritems():
580 print(' %s: %s' % (key, val))
581 print('Total tests: %d' % len(filtered_tests))
582 else:
583 tests_to_run = list([
584 Test(target, main_args.flavor, main_args.build_dir, props) for
585 target, props in sorted(filtered_tests.iteritems())
586 ])
587
588 for phase in phases:
589 phase.run(tests_to_run)
263 in_file_filt,
264 )
265 test_filt = merge_filts(
266 r'^Global frequency set at \d* ticks per second\n',
267 r'^info: Entering event queue @ \d*\. Starting simulation\.\.\.\n',
268 r'warn: [^(]+\([^)]*\)( \[with [^]]*\])? not implemented\.\n',
269 r'warn: Ignoring request to set stack size\.\n',
270 info_filt(804),
271 in_file_filt,
272 )
273
274 def apply_filters(self, data, filts):
275 re.sub(filt, '', data)
276
277 def check(self):
278 with open(self.test) as test_f, open(self.ref) as ref_f:
279 test = re.sub(self.test_filt, '', test_f.read())
280 ref = re.sub(self.ref_filt, '', ref_f.read())
281 return self.diffing_check(ref.splitlines(True),
282 test.splitlines(True))
283
284class VcdChecker(DiffingChecker):
285 def check(self):
286 with open (self.test) as test_f, open(self.ref) as ref_f:
287 ref = ref_f.read().splitlines(True)
288 test = test_f.read().splitlines(True)
289 # Strip off the first seven lines of the test output which are
290 # date and version information.
291 test = test[7:]
292
293 return self.diffing_check(ref, test)
294
295class GoldenDir(object):
296 def __init__(self, path, platform):
297 self.path = path
298 self.platform = platform
299
300 contents = os.listdir(path)
301 suffix = '.' + platform
302 suffixed = filter(lambda c: c.endswith(suffix), contents)
303 bases = map(lambda t: t[:-len(platform)], suffixed)
304 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
305
306 self.entries = {}
307 class Entry(object):
308 def __init__(self, e_path):
309 self.used = False
310 self.path = os.path.join(path, e_path)
311
312 def use(self):
313 self.used = True
314
315 for entry in contents:
316 self.entries[entry] = Entry(entry)
317
318 def entry(self, name):
319 def match(n):
320 return (n == name) or n.startswith(name + '.')
321 matches = { n: e for n, e in self.entries.items() if match(n) }
322
323 for match in matches.values():
324 match.use()
325
326 platform_name = '.'.join([ name, self.platform ])
327 if platform_name in matches:
328 return matches[platform_name].path
329 if name in matches:
330 return matches[name].path
331 else:
332 return None
333
334 def unused(self):
335 items = self.entries.items()
336 items = filter(lambda i: not i[1].used, items)
337
338 items.sort()
339 sources = []
340 i = 0
341 while i < len(items):
342 root = items[i][0]
343 sources.append(root)
344 i += 1
345 while i < len(items) and items[i][0].startswith(root):
346 i += 1
347 return sources
348
349class VerifyPhase(TestPhaseBase):
350 name = 'verify'
351 number = 3
352
353 def reset_status(self):
354 self._passed = []
355 self._failed = {}
356
357 def passed(self, test):
358 self._passed.append(test)
359
360 def failed(self, test, cause, note=''):
361 test.set_prop('note', note)
362 self._failed.setdefault(cause, []).append(test)
363
364 def print_status(self):
365 total_passed = len(self._passed)
366 total_failed = sum(map(len, self._failed.values()))
367 print()
368 print('Passed: {passed:4} - Failed: {failed:4}'.format(
369 passed=total_passed, failed=total_failed))
370
371 def write_result_file(self, path):
372 results = {
373 'passed': map(lambda t: t.props, self._passed),
374 'failed': {
375 cause: map(lambda t: t.props, tests) for
376 cause, tests in self._failed.iteritems()
377 }
378 }
379 with open(path, 'w') as rf:
380 json.dump(results, rf)
381
382 def print_results(self):
383 print()
384 print('Passed:')
385 for path in sorted(list([ t.path for t in self._passed ])):
386 print(' ', path)
387
388 print()
389 print('Failed:')
390
391 causes = []
392 for cause, tests in sorted(self._failed.items()):
393 block = ' ' + cause.capitalize() + ':\n'
394 for test in sorted(tests, key=lambda t: t.path):
395 block += ' ' + test.path
396 if test.note:
397 block += ' - ' + test.note
398 block += '\n'
399 causes.append(block)
400
401 print('\n'.join(causes))
402
403 def run(self, tests):
404 parser = argparse.ArgumentParser()
405 result_opts = parser.add_mutually_exclusive_group()
406 result_opts.add_argument('--result-file', action='store_true',
407 help='Create a results.json file in the current directory.')
408 result_opts.add_argument('--result-file-at', metavar='PATH',
409 help='Create a results json file at the given path.')
410 parser.add_argument('--no-print-results', action='store_true',
411 help='Don\'t print a list of tests that passed or failed')
412 args = parser.parse_args(self.args)
413
414 self.reset_status()
415
416 runnable = filter(lambda t: not t.compile_only, tests)
417 compile_only = filter(lambda t: t.compile_only, tests)
418
419 for test in compile_only:
420 if os.path.exists(test.full_path()):
421 self.passed(test)
422 else:
423 self.failed(test, 'compile failed')
424
425 for test in runnable:
426 with open(test.returncode_file()) as rc:
427 returncode = int(rc.read())
428
429 expected_returncode = 0
430 if os.path.exists(test.expected_returncode_file()):
431 with open(test.expected_returncode_file()) as erc:
432 expected_returncode = int(erc.read())
433
434 if returncode == 124:
435 self.failed(test, 'time out')
436 continue
437 elif returncode != expected_returncode:
438 if expected_returncode == 0:
439 self.failed(test, 'abort')
440 else:
441 self.failed(test, 'missed abort')
442 continue
443
444 out_dir = test.m5out_dir()
445
446 Diff = collections.namedtuple(
447 'Diff', 'ref, test, tag, ref_filter')
448
449 diffs = []
450
451 gd = GoldenDir(test.golden_dir(), 'linux64')
452
453 missing = []
454 log_file = '.'.join([test.name, 'log'])
455 log_path = gd.entry(log_file)
456 simout_path = os.path.join(out_dir, 'simout')
457 if not os.path.exists(simout_path):
458 missing.append('log output')
459 elif log_path:
460 diffs.append(LogChecker(log_path, simout_path,
461 log_file, out_dir))
462
463 for name in gd.unused():
464 test_path = os.path.join(out_dir, name)
465 ref_path = gd.entry(name)
466 if not os.path.exists(test_path):
467 missing.append(name)
468 elif name.endswith('.vcd'):
469 diffs.append(VcdChecker(ref_path, test_path,
470 name, out_dir))
471 else:
472 diffs.append(Checker(ref_path, test_path, name))
473
474 if missing:
475 self.failed(test, 'missing output', ' '.join(missing))
476 continue
477
478 failed_diffs = filter(lambda d: not d.check(), diffs)
479 if failed_diffs:
480 tags = map(lambda d: d.tag, failed_diffs)
481 self.failed(test, 'failed diffs', ' '.join(tags))
482 continue
483
484 self.passed(test)
485
486 if not args.no_print_results:
487 self.print_results()
488
489 self.print_status()
490
491 result_path = None
492 if args.result_file:
493 result_path = os.path.join(os.getcwd(), 'results.json')
494 elif args.result_file_at:
495 result_path = args.result_file_at
496
497 if result_path:
498 self.write_result_file(result_path)
499
500
501parser = argparse.ArgumentParser(description='SystemC test utility')
502
503parser.add_argument('build_dir', metavar='BUILD_DIR',
504 help='The build directory (ie. build/ARM).')
505
506parser.add_argument('--update-json', action='store_true',
507 help='Update the json manifest of tests.')
508
509parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
510 default='opt',
511 help='Flavor of binary to test.')
512
513parser.add_argument('--list', action='store_true',
514 help='List the available tests')
515
516parser.add_argument('-j', type=int, default=1,
517 help='Default level of parallelism, can be overriden '
518 'for individual stages')
519
520filter_opts = parser.add_mutually_exclusive_group()
521filter_opts.add_argument('--filter', default='True',
522 help='Python expression which filters tests based '
523 'on their properties')
524filter_opts.add_argument('--filter-file', default=None,
525 type=argparse.FileType('r'),
526 help='Same as --filter, but read from a file')
527
528def collect_phases(args):
529 phase_groups = [list(g) for k, g in
530 itertools.groupby(args, lambda x: x != '--phase') if k]
531 main_args = parser.parse_args(phase_groups[0][1:])
532 phases = []
533 names = []
534 for group in phase_groups[1:]:
535 name = group[0]
536 if name in names:
537 raise RuntimeException('Phase %s specified more than once' % name)
538 phase = test_phase_classes[name]
539 phases.append(phase(main_args, *group[1:]))
540 phases.sort()
541 return main_args, phases
542
543main_args, phases = collect_phases(sys.argv)
544
545if len(phases) == 0:
546 phases = [
547 CompilePhase(main_args),
548 RunPhase(main_args),
549 VerifyPhase(main_args)
550 ]
551
552
553
554json_path = os.path.join(main_args.build_dir, json_rel_path)
555
556if main_args.update_json:
557 scons(os.path.join(json_path))
558
559with open(json_path) as f:
560 test_data = json.load(f)
561
562 if main_args.filter_file:
563 f = main_args.filter_file
564 filt = compile(f.read(), f.name, 'eval')
565 else:
566 filt = compile(main_args.filter, '<string>', 'eval')
567
568 filtered_tests = {
569 target: props for (target, props) in
570 test_data.iteritems() if eval(filt, dict(props))
571 }
572
573 if len(filtered_tests) == 0:
574 print('All tests were filtered out.')
575 exit()
576
577 if main_args.list:
578 for target, props in sorted(filtered_tests.iteritems()):
579 print('%s.%s' % (target, main_args.flavor))
580 for key, val in props.iteritems():
581 print(' %s: %s' % (key, val))
582 print('Total tests: %d' % len(filtered_tests))
583 else:
584 tests_to_run = list([
585 Test(target, main_args.flavor, main_args.build_dir, props) for
586 target, props in sorted(filtered_tests.iteritems())
587 ])
588
589 for phase in phases:
590 phase.run(tests_to_run)