verify.py (13034:69726d1f9209) verify.py (13037:ae6f69952478)
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98test_phase_classes = {}
99
100class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
125 scons(*scons_args)
126
127class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 def warning_filt(num):
194 return (r'^\nWarning: \(W{}\) .*\n(In file: .*\n)?'
195 r'(In process: [\w.]* @ .*\n)?').format(num)
196
197 ref_filt = merge_filts(
198 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
199 r'^SystemC Simulation\n',
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98test_phase_classes = {}
99
100class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
125 scons(*scons_args)
126
127class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 def warning_filt(num):
194 return (r'^\nWarning: \(W{}\) .*\n(In file: .*\n)?'
195 r'(In process: [\w.]* @ .*\n)?').format(num)
196
197 ref_filt = merge_filts(
198 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
199 r'^SystemC Simulation\n',
200 warning_filt(571),
201 warning_filt(540),
200 warning_filt(540),
201 warning_filt(569),
202 warning_filt(571),
202 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
203 r'You can turn off(.*\n){7}'
204 )
205 test_filt = merge_filts(
206 r'^Global frequency set at \d* ticks per second\n'
207 )
208
209 def __init__(self, ref, test, tag, out_dir):
210 super(LogChecker, self).__init__(ref, test, tag)
211 self.out_dir = out_dir
212
213 def apply_filters(self, data, filts):
214 re.sub(filt, '', data)
215
216 def check(self):
217 test_file = os.path.basename(self.test)
218 ref_file = os.path.basename(self.ref)
219 with open(self.test) as test_f, open(self.ref) as ref_f:
220 test = re.sub(self.test_filt, '', test_f.read())
221 ref = re.sub(self.ref_filt, '', ref_f.read())
222 diff_file = '.'.join([ref_file, 'diff'])
223 diff_path = os.path.join(self.out_dir, diff_file)
224 if test != ref:
225 with open(diff_path, 'w') as diff_f:
226 for line in difflib.unified_diff(
227 ref.splitlines(True), test.splitlines(True),
228 fromfile=ref_file,
229 tofile=test_file):
230 diff_f.write(line)
231 return False
232 else:
233 if os.path.exists(diff_path):
234 os.unlink(diff_path)
235 return True
236
237class GoldenDir(object):
238 def __init__(self, path, platform):
239 self.path = path
240 self.platform = platform
241
242 contents = os.listdir(path)
243 suffix = '.' + platform
244 suffixed = filter(lambda c: c.endswith(suffix), contents)
245 bases = map(lambda t: t[:-len(platform)], suffixed)
246 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
247
248 self.entries = {}
249 class Entry(object):
250 def __init__(self, e_path):
251 self.used = False
252 self.path = os.path.join(path, e_path)
253
254 def use(self):
255 self.used = True
256
257 for entry in contents:
258 self.entries[entry] = Entry(entry)
259
260 def entry(self, name):
261 def match(n):
262 return (n == name) or n.startswith(name + '.')
263 matches = { n: e for n, e in self.entries.items() if match(n) }
264
265 for match in matches.values():
266 match.use()
267
268 platform_name = '.'.join([ name, self.platform ])
269 if platform_name in matches:
270 return matches[platform_name].path
271 if name in matches:
272 return matches[name].path
273 else:
274 return None
275
276 def unused(self):
277 items = self.entries.items()
278 items = filter(lambda i: not i[1].used, items)
279
280 items.sort()
281 sources = []
282 i = 0
283 while i < len(items):
284 root = items[i][0]
285 sources.append(root)
286 i += 1
287 while i < len(items) and items[i][0].startswith(root):
288 i += 1
289 return sources
290
291class VerifyPhase(TestPhaseBase):
292 name = 'verify'
293 number = 3
294
295 def reset_status(self):
296 self._passed = []
297 self._failed = {}
298
299 def passed(self, test):
300 self._passed.append(test)
301
302 def failed(self, test, cause, note=''):
303 test.set_prop('note', note)
304 self._failed.setdefault(cause, []).append(test)
305
306 def print_status(self):
307 total_passed = len(self._passed)
308 total_failed = sum(map(len, self._failed.values()))
309 print()
310 print('Passed: {passed:4} - Failed: {failed:4}'.format(
311 passed=total_passed, failed=total_failed))
312
313 def write_result_file(self, path):
314 results = {
315 'passed': map(lambda t: t.props, self._passed),
316 'failed': {
317 cause: map(lambda t: t.props, tests) for
318 cause, tests in self._failed.iteritems()
319 }
320 }
321 with open(path, 'w') as rf:
322 json.dump(results, rf)
323
324 def print_results(self):
325 print()
326 print('Passed:')
327 for path in sorted(list([ t.path for t in self._passed ])):
328 print(' ', path)
329
330 print()
331 print('Failed:')
332
333 causes = []
334 for cause, tests in sorted(self._failed.items()):
335 block = ' ' + cause.capitalize() + ':\n'
336 for test in sorted(tests, key=lambda t: t.path):
337 block += ' ' + test.path
338 if test.note:
339 block += ' - ' + test.note
340 block += '\n'
341 causes.append(block)
342
343 print('\n'.join(causes))
344
345 def run(self, tests):
346 parser = argparse.ArgumentParser()
347 result_opts = parser.add_mutually_exclusive_group()
348 result_opts.add_argument('--result-file', action='store_true',
349 help='Create a results.json file in the current directory.')
350 result_opts.add_argument('--result-file-at', metavar='PATH',
351 help='Create a results json file at the given path.')
352 parser.add_argument('--print-results', action='store_true',
353 help='Print a list of tests that passed or failed')
354 args = parser.parse_args(self.args)
355
356 self.reset_status()
357
358 runnable = filter(lambda t: not t.compile_only, tests)
359 compile_only = filter(lambda t: t.compile_only, tests)
360
361 for test in compile_only:
362 if os.path.exists(test.full_path()):
363 self.passed(test)
364 else:
365 self.failed(test, 'compile failed')
366
367 for test in runnable:
368 with open(test.returncode_file()) as rc:
369 returncode = int(rc.read())
370
371 if returncode == 124:
372 self.failed(test, 'time out')
373 continue
374 elif returncode != 0:
375 self.failed(test, 'abort')
376 continue
377
378 out_dir = test.m5out_dir()
379
380 Diff = collections.namedtuple(
381 'Diff', 'ref, test, tag, ref_filter')
382
383 diffs = []
384
385 gd = GoldenDir(test.golden_dir(), 'linux64')
386
387 missing = []
388 log_file = '.'.join([test.name, 'log'])
389 log_path = gd.entry(log_file)
390 simout_path = os.path.join(out_dir, 'simout')
391 if not os.path.exists(simout_path):
392 missing.append('log output')
393 elif log_path:
394 diffs.append(LogChecker(log_path, simout_path,
395 log_file, out_dir))
396
397 for name in gd.unused():
398 test_path = os.path.join(out_dir, name)
399 ref_path = gd.entry(name)
400 if not os.path.exists(test_path):
401 missing.append(name)
402 else:
403 diffs.append(Checker(ref_path, test_path, name))
404
405 if missing:
406 self.failed(test, 'missing output', ' '.join(missing))
407 continue
408
409 failed_diffs = filter(lambda d: not d.check(), diffs)
410 if failed_diffs:
411 tags = map(lambda d: d.tag, failed_diffs)
412 self.failed(test, 'failed diffs', ' '.join(tags))
413 continue
414
415 self.passed(test)
416
417 if args.print_results:
418 self.print_results()
419
420 self.print_status()
421
422 result_path = None
423 if args.result_file:
424 result_path = os.path.join(os.getcwd(), 'results.json')
425 elif args.result_file_at:
426 result_path = args.result_file_at
427
428 if result_path:
429 self.write_result_file(result_path)
430
431
432parser = argparse.ArgumentParser(description='SystemC test utility')
433
434parser.add_argument('build_dir', metavar='BUILD_DIR',
435 help='The build directory (ie. build/ARM).')
436
437parser.add_argument('--update-json', action='store_true',
438 help='Update the json manifest of tests.')
439
440parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
441 default='opt',
442 help='Flavor of binary to test.')
443
444parser.add_argument('--list', action='store_true',
445 help='List the available tests')
446
447filter_opts = parser.add_mutually_exclusive_group()
448filter_opts.add_argument('--filter', default='True',
449 help='Python expression which filters tests based '
450 'on their properties')
451filter_opts.add_argument('--filter-file', default=None,
452 type=argparse.FileType('r'),
453 help='Same as --filter, but read from a file')
454
455def collect_phases(args):
456 phase_groups = [list(g) for k, g in
457 itertools.groupby(args, lambda x: x != '--phase') if k]
458 main_args = parser.parse_args(phase_groups[0][1:])
459 phases = []
460 names = []
461 for group in phase_groups[1:]:
462 name = group[0]
463 if name in names:
464 raise RuntimeException('Phase %s specified more than once' % name)
465 phase = test_phase_classes[name]
466 phases.append(phase(main_args, *group[1:]))
467 phases.sort()
468 return main_args, phases
469
470main_args, phases = collect_phases(sys.argv)
471
472if len(phases) == 0:
473 phases = [
474 CompilePhase(main_args),
475 RunPhase(main_args),
476 VerifyPhase(main_args)
477 ]
478
479
480
481json_path = os.path.join(main_args.build_dir, json_rel_path)
482
483if main_args.update_json:
484 scons(os.path.join(json_path))
485
486with open(json_path) as f:
487 test_data = json.load(f)
488
489 if main_args.filter_file:
490 f = main_args.filter_file
491 filt = compile(f.read(), f.name, 'eval')
492 else:
493 filt = compile(main_args.filter, '<string>', 'eval')
494
495 filtered_tests = {
496 target: props for (target, props) in
497 test_data.iteritems() if eval(filt, dict(props))
498 }
499
500 if main_args.list:
501 for target, props in sorted(filtered_tests.iteritems()):
502 print('%s.%s' % (target, main_args.flavor))
503 for key, val in props.iteritems():
504 print(' %s: %s' % (key, val))
505 print('Total tests: %d' % len(filtered_tests))
506 else:
507 tests_to_run = list([
508 Test(target, main_args.flavor, main_args.build_dir, props) for
509 target, props in sorted(filtered_tests.iteritems())
510 ])
511
512 for phase in phases:
513 phase.run(tests_to_run)
203 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
204 r'You can turn off(.*\n){7}'
205 )
206 test_filt = merge_filts(
207 r'^Global frequency set at \d* ticks per second\n'
208 )
209
210 def __init__(self, ref, test, tag, out_dir):
211 super(LogChecker, self).__init__(ref, test, tag)
212 self.out_dir = out_dir
213
214 def apply_filters(self, data, filts):
215 re.sub(filt, '', data)
216
217 def check(self):
218 test_file = os.path.basename(self.test)
219 ref_file = os.path.basename(self.ref)
220 with open(self.test) as test_f, open(self.ref) as ref_f:
221 test = re.sub(self.test_filt, '', test_f.read())
222 ref = re.sub(self.ref_filt, '', ref_f.read())
223 diff_file = '.'.join([ref_file, 'diff'])
224 diff_path = os.path.join(self.out_dir, diff_file)
225 if test != ref:
226 with open(diff_path, 'w') as diff_f:
227 for line in difflib.unified_diff(
228 ref.splitlines(True), test.splitlines(True),
229 fromfile=ref_file,
230 tofile=test_file):
231 diff_f.write(line)
232 return False
233 else:
234 if os.path.exists(diff_path):
235 os.unlink(diff_path)
236 return True
237
238class GoldenDir(object):
239 def __init__(self, path, platform):
240 self.path = path
241 self.platform = platform
242
243 contents = os.listdir(path)
244 suffix = '.' + platform
245 suffixed = filter(lambda c: c.endswith(suffix), contents)
246 bases = map(lambda t: t[:-len(platform)], suffixed)
247 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
248
249 self.entries = {}
250 class Entry(object):
251 def __init__(self, e_path):
252 self.used = False
253 self.path = os.path.join(path, e_path)
254
255 def use(self):
256 self.used = True
257
258 for entry in contents:
259 self.entries[entry] = Entry(entry)
260
261 def entry(self, name):
262 def match(n):
263 return (n == name) or n.startswith(name + '.')
264 matches = { n: e for n, e in self.entries.items() if match(n) }
265
266 for match in matches.values():
267 match.use()
268
269 platform_name = '.'.join([ name, self.platform ])
270 if platform_name in matches:
271 return matches[platform_name].path
272 if name in matches:
273 return matches[name].path
274 else:
275 return None
276
277 def unused(self):
278 items = self.entries.items()
279 items = filter(lambda i: not i[1].used, items)
280
281 items.sort()
282 sources = []
283 i = 0
284 while i < len(items):
285 root = items[i][0]
286 sources.append(root)
287 i += 1
288 while i < len(items) and items[i][0].startswith(root):
289 i += 1
290 return sources
291
292class VerifyPhase(TestPhaseBase):
293 name = 'verify'
294 number = 3
295
296 def reset_status(self):
297 self._passed = []
298 self._failed = {}
299
300 def passed(self, test):
301 self._passed.append(test)
302
303 def failed(self, test, cause, note=''):
304 test.set_prop('note', note)
305 self._failed.setdefault(cause, []).append(test)
306
307 def print_status(self):
308 total_passed = len(self._passed)
309 total_failed = sum(map(len, self._failed.values()))
310 print()
311 print('Passed: {passed:4} - Failed: {failed:4}'.format(
312 passed=total_passed, failed=total_failed))
313
314 def write_result_file(self, path):
315 results = {
316 'passed': map(lambda t: t.props, self._passed),
317 'failed': {
318 cause: map(lambda t: t.props, tests) for
319 cause, tests in self._failed.iteritems()
320 }
321 }
322 with open(path, 'w') as rf:
323 json.dump(results, rf)
324
325 def print_results(self):
326 print()
327 print('Passed:')
328 for path in sorted(list([ t.path for t in self._passed ])):
329 print(' ', path)
330
331 print()
332 print('Failed:')
333
334 causes = []
335 for cause, tests in sorted(self._failed.items()):
336 block = ' ' + cause.capitalize() + ':\n'
337 for test in sorted(tests, key=lambda t: t.path):
338 block += ' ' + test.path
339 if test.note:
340 block += ' - ' + test.note
341 block += '\n'
342 causes.append(block)
343
344 print('\n'.join(causes))
345
346 def run(self, tests):
347 parser = argparse.ArgumentParser()
348 result_opts = parser.add_mutually_exclusive_group()
349 result_opts.add_argument('--result-file', action='store_true',
350 help='Create a results.json file in the current directory.')
351 result_opts.add_argument('--result-file-at', metavar='PATH',
352 help='Create a results json file at the given path.')
353 parser.add_argument('--print-results', action='store_true',
354 help='Print a list of tests that passed or failed')
355 args = parser.parse_args(self.args)
356
357 self.reset_status()
358
359 runnable = filter(lambda t: not t.compile_only, tests)
360 compile_only = filter(lambda t: t.compile_only, tests)
361
362 for test in compile_only:
363 if os.path.exists(test.full_path()):
364 self.passed(test)
365 else:
366 self.failed(test, 'compile failed')
367
368 for test in runnable:
369 with open(test.returncode_file()) as rc:
370 returncode = int(rc.read())
371
372 if returncode == 124:
373 self.failed(test, 'time out')
374 continue
375 elif returncode != 0:
376 self.failed(test, 'abort')
377 continue
378
379 out_dir = test.m5out_dir()
380
381 Diff = collections.namedtuple(
382 'Diff', 'ref, test, tag, ref_filter')
383
384 diffs = []
385
386 gd = GoldenDir(test.golden_dir(), 'linux64')
387
388 missing = []
389 log_file = '.'.join([test.name, 'log'])
390 log_path = gd.entry(log_file)
391 simout_path = os.path.join(out_dir, 'simout')
392 if not os.path.exists(simout_path):
393 missing.append('log output')
394 elif log_path:
395 diffs.append(LogChecker(log_path, simout_path,
396 log_file, out_dir))
397
398 for name in gd.unused():
399 test_path = os.path.join(out_dir, name)
400 ref_path = gd.entry(name)
401 if not os.path.exists(test_path):
402 missing.append(name)
403 else:
404 diffs.append(Checker(ref_path, test_path, name))
405
406 if missing:
407 self.failed(test, 'missing output', ' '.join(missing))
408 continue
409
410 failed_diffs = filter(lambda d: not d.check(), diffs)
411 if failed_diffs:
412 tags = map(lambda d: d.tag, failed_diffs)
413 self.failed(test, 'failed diffs', ' '.join(tags))
414 continue
415
416 self.passed(test)
417
418 if args.print_results:
419 self.print_results()
420
421 self.print_status()
422
423 result_path = None
424 if args.result_file:
425 result_path = os.path.join(os.getcwd(), 'results.json')
426 elif args.result_file_at:
427 result_path = args.result_file_at
428
429 if result_path:
430 self.write_result_file(result_path)
431
432
433parser = argparse.ArgumentParser(description='SystemC test utility')
434
435parser.add_argument('build_dir', metavar='BUILD_DIR',
436 help='The build directory (ie. build/ARM).')
437
438parser.add_argument('--update-json', action='store_true',
439 help='Update the json manifest of tests.')
440
441parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
442 default='opt',
443 help='Flavor of binary to test.')
444
445parser.add_argument('--list', action='store_true',
446 help='List the available tests')
447
448filter_opts = parser.add_mutually_exclusive_group()
449filter_opts.add_argument('--filter', default='True',
450 help='Python expression which filters tests based '
451 'on their properties')
452filter_opts.add_argument('--filter-file', default=None,
453 type=argparse.FileType('r'),
454 help='Same as --filter, but read from a file')
455
456def collect_phases(args):
457 phase_groups = [list(g) for k, g in
458 itertools.groupby(args, lambda x: x != '--phase') if k]
459 main_args = parser.parse_args(phase_groups[0][1:])
460 phases = []
461 names = []
462 for group in phase_groups[1:]:
463 name = group[0]
464 if name in names:
465 raise RuntimeException('Phase %s specified more than once' % name)
466 phase = test_phase_classes[name]
467 phases.append(phase(main_args, *group[1:]))
468 phases.sort()
469 return main_args, phases
470
471main_args, phases = collect_phases(sys.argv)
472
473if len(phases) == 0:
474 phases = [
475 CompilePhase(main_args),
476 RunPhase(main_args),
477 VerifyPhase(main_args)
478 ]
479
480
481
482json_path = os.path.join(main_args.build_dir, json_rel_path)
483
484if main_args.update_json:
485 scons(os.path.join(json_path))
486
487with open(json_path) as f:
488 test_data = json.load(f)
489
490 if main_args.filter_file:
491 f = main_args.filter_file
492 filt = compile(f.read(), f.name, 'eval')
493 else:
494 filt = compile(main_args.filter, '<string>', 'eval')
495
496 filtered_tests = {
497 target: props for (target, props) in
498 test_data.iteritems() if eval(filt, dict(props))
499 }
500
501 if main_args.list:
502 for target, props in sorted(filtered_tests.iteritems()):
503 print('%s.%s' % (target, main_args.flavor))
504 for key, val in props.iteritems():
505 print(' %s: %s' % (key, val))
506 print('Total tests: %d' % len(filtered_tests))
507 else:
508 tests_to_run = list([
509 Test(target, main_args.flavor, main_args.build_dir, props) for
510 target, props in sorted(filtered_tests.iteritems())
511 ])
512
513 for phase in phases:
514 phase.run(tests_to_run)