verify.py (13003:3a164f2f8103) verify.py (13004:ba6455680bfc)
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98test_phase_classes = {}
99
100class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = list(self.args) + targets
125 scons(*scons_args)
126
127class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 ref_filt = merge_filts(
194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98test_phase_classes = {}
99
100class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = list(self.args) + targets
125 scons(*scons_args)
126
127class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 ref_filt = merge_filts(
194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
195 r'^SystemC Simulation\n'
195 r'^SystemC Simulation\n',
196 r'^\nWarning: .*\nIn file: .*\n'
196 )
197 test_filt = merge_filts(
198 r'^Global frequency set at \d* ticks per second\n'
199 )
200
201 def __init__(self, ref, test, tag, out_dir):
202 super(LogChecker, self).__init__(ref, test, tag)
203 self.out_dir = out_dir
204
205 def apply_filters(self, data, filts):
206 re.sub(filt, '', data)
207
208 def check(self):
209 test_file = os.path.basename(self.test)
210 ref_file = os.path.basename(self.ref)
211 with open(self.test) as test_f, open(self.ref) as ref_f:
212 test = re.sub(self.test_filt, '', test_f.read())
213 ref = re.sub(self.ref_filt, '', ref_f.read())
214 if test != ref:
215 diff_file = '.'.join([ref_file, 'diff'])
216 diff_path = os.path.join(self.out_dir, diff_file)
217 with open(diff_path, 'w') as diff_f:
218 for line in difflib.unified_diff(
219 ref.splitlines(True), test.splitlines(True),
220 fromfile=ref_file,
221 tofile=test_file):
222 diff_f.write(line)
223 return False
224 return True
225
226class VerifyPhase(TestPhaseBase):
227 name = 'verify'
228 number = 3
229
230 def reset_status(self):
231 self._passed = []
232 self._failed = {}
233
234 def passed(self, test):
235 self._passed.append(test)
236
237 def failed(self, test, cause, note=''):
238 test.set_prop('note', note)
239 self._failed.setdefault(cause, []).append(test)
240
241 def print_status(self):
242 total_passed = len(self._passed)
243 total_failed = sum(map(len, self._failed.values()))
244 print()
245 print('Passed: {passed:4} - Failed: {failed:4}'.format(
246 passed=total_passed, failed=total_failed))
247
248 def write_result_file(self, path):
249 results = {
250 'passed': map(lambda t: t.props, self._passed),
251 'failed': {
252 cause: map(lambda t: t.props, tests) for
253 cause, tests in self._failed.iteritems()
254 }
255 }
256 with open(path, 'w') as rf:
257 json.dump(results, rf)
258
259 def print_results(self):
260 print()
261 print('Passed:')
262 for path in sorted(list([ t.path for t in self._passed ])):
263 print(' ', path)
264
265 print()
266 print('Failed:')
267
268 causes = []
269 for cause, tests in sorted(self._failed.items()):
270 block = ' ' + cause.capitalize() + ':\n'
271 for test in sorted(tests, key=lambda t: t.path):
272 block += ' ' + test.path
273 if test.note:
274 block += ' - ' + test.note
275 block += '\n'
276 causes.append(block)
277
278 print('\n'.join(causes))
279
280 def run(self, tests):
281 parser = argparse.ArgumentParser()
282 result_opts = parser.add_mutually_exclusive_group()
283 result_opts.add_argument('--result-file', action='store_true',
284 help='Create a results.json file in the current directory.')
285 result_opts.add_argument('--result-file-at', metavar='PATH',
286 help='Create a results json file at the given path.')
287 parser.add_argument('--print-results', action='store_true',
288 help='Print a list of tests that passed or failed')
289 args = parser.parse_args(self.args)
290
291 self.reset_status()
292
293 runnable = filter(lambda t: not t.compile_only, tests)
294 compile_only = filter(lambda t: t.compile_only, tests)
295
296 for test in compile_only:
297 if os.path.exists(test.full_path()):
298 self.passed(test)
299 else:
300 self.failed(test, 'compile failed')
301
302 for test in runnable:
303 with open(test.returncode_file()) as rc:
304 returncode = int(rc.read())
305
306 if returncode == 124:
307 self.failed(test, 'time out')
308 continue
309 elif returncode != 0:
310 self.failed(test, 'abort')
311 continue
312
313 out_dir = test.m5out_dir()
314
315 Diff = collections.namedtuple(
316 'Diff', 'ref, test, tag, ref_filter')
317
318 diffs = []
319
320 log_file = '.'.join([test.name, 'log'])
321 log_path = os.path.join(test.golden_dir(), log_file)
322 simout_path = os.path.join(out_dir, 'simout')
323 if not os.path.exists(simout_path):
324 self.failed(test, 'no log output')
325 if os.path.exists(log_path):
326 diffs.append(LogChecker(
327 log_path, simout_path, log_file, out_dir))
328
329 failed_diffs = filter(lambda d: not d.check(), diffs)
330 if failed_diffs:
331 tags = map(lambda d: d.tag, failed_diffs)
332 self.failed(test, 'failed diffs', ' '.join(tags))
333 continue
334
335 self.passed(test)
336
337 if args.print_results:
338 self.print_results()
339
340 self.print_status()
341
342 result_path = None
343 if args.result_file:
344 result_path = os.path.join(os.getcwd(), 'results.json')
345 elif args.result_file_at:
346 result_path = args.result_file_at
347
348 if result_path:
349 self.write_result_file(result_path)
350
351
352parser = argparse.ArgumentParser(description='SystemC test utility')
353
354parser.add_argument('build_dir', metavar='BUILD_DIR',
355 help='The build directory (ie. build/ARM).')
356
357parser.add_argument('--update-json', action='store_true',
358 help='Update the json manifest of tests.')
359
360parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
361 default='opt',
362 help='Flavor of binary to test.')
363
364parser.add_argument('--list', action='store_true',
365 help='List the available tests')
366
367filter_opts = parser.add_mutually_exclusive_group()
368filter_opts.add_argument('--filter', default='True',
369 help='Python expression which filters tests based '
370 'on their properties')
371filter_opts.add_argument('--filter-file', default=None,
372 type=argparse.FileType('r'),
373 help='Same as --filter, but read from a file')
374
375def collect_phases(args):
376 phase_groups = [list(g) for k, g in
377 itertools.groupby(args, lambda x: x != '--phase') if k]
378 main_args = parser.parse_args(phase_groups[0][1:])
379 phases = []
380 names = []
381 for group in phase_groups[1:]:
382 name = group[0]
383 if name in names:
384 raise RuntimeException('Phase %s specified more than once' % name)
385 phase = test_phase_classes[name]
386 phases.append(phase(main_args, *group[1:]))
387 phases.sort()
388 return main_args, phases
389
390main_args, phases = collect_phases(sys.argv)
391
392if len(phases) == 0:
393 phases = [
394 CompilePhase(main_args),
395 RunPhase(main_args),
396 VerifyPhase(main_args)
397 ]
398
399
400
401json_path = os.path.join(main_args.build_dir, json_rel_path)
402
403if main_args.update_json:
404 scons(os.path.join(json_path))
405
406with open(json_path) as f:
407 test_data = json.load(f)
408
409 if main_args.filter_file:
410 f = main_args.filter_file
411 filt = compile(f.read(), f.name, 'eval')
412 else:
413 filt = compile(main_args.filter, '<string>', 'eval')
414
415 filtered_tests = {
416 target: props for (target, props) in
417 test_data.iteritems() if eval(filt, dict(props))
418 }
419
420 if main_args.list:
421 for target, props in sorted(filtered_tests.iteritems()):
422 print('%s.%s' % (target, main_args.flavor))
423 for key, val in props.iteritems():
424 print(' %s: %s' % (key, val))
425 print('Total tests: %d' % len(filtered_tests))
426 else:
427 tests_to_run = list([
428 Test(target, main_args.flavor, main_args.build_dir, props) for
429 target, props in sorted(filtered_tests.iteritems())
430 ])
431
432 for phase in phases:
433 phase.run(tests_to_run)
197 )
198 test_filt = merge_filts(
199 r'^Global frequency set at \d* ticks per second\n'
200 )
201
202 def __init__(self, ref, test, tag, out_dir):
203 super(LogChecker, self).__init__(ref, test, tag)
204 self.out_dir = out_dir
205
206 def apply_filters(self, data, filts):
207 re.sub(filt, '', data)
208
209 def check(self):
210 test_file = os.path.basename(self.test)
211 ref_file = os.path.basename(self.ref)
212 with open(self.test) as test_f, open(self.ref) as ref_f:
213 test = re.sub(self.test_filt, '', test_f.read())
214 ref = re.sub(self.ref_filt, '', ref_f.read())
215 if test != ref:
216 diff_file = '.'.join([ref_file, 'diff'])
217 diff_path = os.path.join(self.out_dir, diff_file)
218 with open(diff_path, 'w') as diff_f:
219 for line in difflib.unified_diff(
220 ref.splitlines(True), test.splitlines(True),
221 fromfile=ref_file,
222 tofile=test_file):
223 diff_f.write(line)
224 return False
225 return True
226
227class VerifyPhase(TestPhaseBase):
228 name = 'verify'
229 number = 3
230
231 def reset_status(self):
232 self._passed = []
233 self._failed = {}
234
235 def passed(self, test):
236 self._passed.append(test)
237
238 def failed(self, test, cause, note=''):
239 test.set_prop('note', note)
240 self._failed.setdefault(cause, []).append(test)
241
242 def print_status(self):
243 total_passed = len(self._passed)
244 total_failed = sum(map(len, self._failed.values()))
245 print()
246 print('Passed: {passed:4} - Failed: {failed:4}'.format(
247 passed=total_passed, failed=total_failed))
248
249 def write_result_file(self, path):
250 results = {
251 'passed': map(lambda t: t.props, self._passed),
252 'failed': {
253 cause: map(lambda t: t.props, tests) for
254 cause, tests in self._failed.iteritems()
255 }
256 }
257 with open(path, 'w') as rf:
258 json.dump(results, rf)
259
260 def print_results(self):
261 print()
262 print('Passed:')
263 for path in sorted(list([ t.path for t in self._passed ])):
264 print(' ', path)
265
266 print()
267 print('Failed:')
268
269 causes = []
270 for cause, tests in sorted(self._failed.items()):
271 block = ' ' + cause.capitalize() + ':\n'
272 for test in sorted(tests, key=lambda t: t.path):
273 block += ' ' + test.path
274 if test.note:
275 block += ' - ' + test.note
276 block += '\n'
277 causes.append(block)
278
279 print('\n'.join(causes))
280
281 def run(self, tests):
282 parser = argparse.ArgumentParser()
283 result_opts = parser.add_mutually_exclusive_group()
284 result_opts.add_argument('--result-file', action='store_true',
285 help='Create a results.json file in the current directory.')
286 result_opts.add_argument('--result-file-at', metavar='PATH',
287 help='Create a results json file at the given path.')
288 parser.add_argument('--print-results', action='store_true',
289 help='Print a list of tests that passed or failed')
290 args = parser.parse_args(self.args)
291
292 self.reset_status()
293
294 runnable = filter(lambda t: not t.compile_only, tests)
295 compile_only = filter(lambda t: t.compile_only, tests)
296
297 for test in compile_only:
298 if os.path.exists(test.full_path()):
299 self.passed(test)
300 else:
301 self.failed(test, 'compile failed')
302
303 for test in runnable:
304 with open(test.returncode_file()) as rc:
305 returncode = int(rc.read())
306
307 if returncode == 124:
308 self.failed(test, 'time out')
309 continue
310 elif returncode != 0:
311 self.failed(test, 'abort')
312 continue
313
314 out_dir = test.m5out_dir()
315
316 Diff = collections.namedtuple(
317 'Diff', 'ref, test, tag, ref_filter')
318
319 diffs = []
320
321 log_file = '.'.join([test.name, 'log'])
322 log_path = os.path.join(test.golden_dir(), log_file)
323 simout_path = os.path.join(out_dir, 'simout')
324 if not os.path.exists(simout_path):
325 self.failed(test, 'no log output')
326 if os.path.exists(log_path):
327 diffs.append(LogChecker(
328 log_path, simout_path, log_file, out_dir))
329
330 failed_diffs = filter(lambda d: not d.check(), diffs)
331 if failed_diffs:
332 tags = map(lambda d: d.tag, failed_diffs)
333 self.failed(test, 'failed diffs', ' '.join(tags))
334 continue
335
336 self.passed(test)
337
338 if args.print_results:
339 self.print_results()
340
341 self.print_status()
342
343 result_path = None
344 if args.result_file:
345 result_path = os.path.join(os.getcwd(), 'results.json')
346 elif args.result_file_at:
347 result_path = args.result_file_at
348
349 if result_path:
350 self.write_result_file(result_path)
351
352
353parser = argparse.ArgumentParser(description='SystemC test utility')
354
355parser.add_argument('build_dir', metavar='BUILD_DIR',
356 help='The build directory (ie. build/ARM).')
357
358parser.add_argument('--update-json', action='store_true',
359 help='Update the json manifest of tests.')
360
361parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
362 default='opt',
363 help='Flavor of binary to test.')
364
365parser.add_argument('--list', action='store_true',
366 help='List the available tests')
367
368filter_opts = parser.add_mutually_exclusive_group()
369filter_opts.add_argument('--filter', default='True',
370 help='Python expression which filters tests based '
371 'on their properties')
372filter_opts.add_argument('--filter-file', default=None,
373 type=argparse.FileType('r'),
374 help='Same as --filter, but read from a file')
375
376def collect_phases(args):
377 phase_groups = [list(g) for k, g in
378 itertools.groupby(args, lambda x: x != '--phase') if k]
379 main_args = parser.parse_args(phase_groups[0][1:])
380 phases = []
381 names = []
382 for group in phase_groups[1:]:
383 name = group[0]
384 if name in names:
385 raise RuntimeException('Phase %s specified more than once' % name)
386 phase = test_phase_classes[name]
387 phases.append(phase(main_args, *group[1:]))
388 phases.sort()
389 return main_args, phases
390
391main_args, phases = collect_phases(sys.argv)
392
393if len(phases) == 0:
394 phases = [
395 CompilePhase(main_args),
396 RunPhase(main_args),
397 VerifyPhase(main_args)
398 ]
399
400
401
402json_path = os.path.join(main_args.build_dir, json_rel_path)
403
404if main_args.update_json:
405 scons(os.path.join(json_path))
406
407with open(json_path) as f:
408 test_data = json.load(f)
409
410 if main_args.filter_file:
411 f = main_args.filter_file
412 filt = compile(f.read(), f.name, 'eval')
413 else:
414 filt = compile(main_args.filter, '<string>', 'eval')
415
416 filtered_tests = {
417 target: props for (target, props) in
418 test_data.iteritems() if eval(filt, dict(props))
419 }
420
421 if main_args.list:
422 for target, props in sorted(filtered_tests.iteritems()):
423 print('%s.%s' % (target, main_args.flavor))
424 for key, val in props.iteritems():
425 print(' %s: %s' % (key, val))
426 print('Total tests: %d' % len(filtered_tests))
427 else:
428 tests_to_run = list([
429 Test(target, main_args.flavor, main_args.build_dir, props) for
430 target, props in sorted(filtered_tests.iteritems())
431 ])
432
433 for phase in phases:
434 phase.run(tests_to_run)