1#!/usr/bin/env python2 2# 3# Copyright 2018 Google, Inc. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Gabe Black 29 30from __future__ import print_function 31 32import argparse 33import collections 34import difflib 35import functools 36import inspect 37import itertools 38import json 39import multiprocessing.pool 40import os 41import re 42import subprocess 43import sys 44 45script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) 46script_dir = os.path.dirname(script_path) 47config_path = os.path.join(script_dir, 'config.py') 48 49systemc_rel_path = 'systemc' 50tests_rel_path = os.path.join(systemc_rel_path, 'tests') 51json_rel_path = os.path.join(tests_rel_path, 'tests.json') 52 53 54 55def scons(*args): 56 args = ['scons'] + list(args) 57 subprocess.check_call(args) 58 59 60 61class Test(object): 62 def __init__(self, target, suffix, build_dir, props): 63 self.target = target 64 self.suffix = suffix 65 self.build_dir = build_dir 66 self.props = {} 67 68 for key, val in props.iteritems(): 69 self.set_prop(key, val) 70 71 def set_prop(self, key, val): 72 setattr(self, key, val) 73 self.props[key] = val 74 75 def dir(self): 76 return os.path.join(self.build_dir, tests_rel_path, self.path) 77 78 def src_dir(self): 79 return os.path.join(script_dir, self.path) 80 81 def golden_dir(self): 82 return os.path.join(self.src_dir(), 'golden') 83 84 def bin(self): 85 return '.'.join([self.name, self.suffix]) 86 87 def full_path(self): 88 return os.path.join(self.dir(), self.bin()) 89 90 def m5out_dir(self): 91 return os.path.join(self.dir(), 'm5out.' + self.suffix) 92 93 def returncode_file(self): 94 return os.path.join(self.m5out_dir(), 'returncode') 95 96 97 98test_phase_classes = {} 99 100class TestPhaseMeta(type): 101 def __init__(cls, name, bases, d): 102 if not d.pop('abstract', False): 103 test_phase_classes[d['name']] = cls 104 105 super(TestPhaseMeta, cls).__init__(name, bases, d) 106 107class TestPhaseBase(object): 108 __metaclass__ = TestPhaseMeta 109 abstract = True 110 111 def __init__(self, main_args, *args): 112 self.main_args = main_args 113 self.args = args 114 115 def __lt__(self, other): 116 return self.number < other.number 117 118class CompilePhase(TestPhaseBase): 119 name = 'compile' 120 number = 1 121 122 def run(self, tests): 123 targets = list([test.full_path() for test in tests])
| 1#!/usr/bin/env python2 2# 3# Copyright 2018 Google, Inc. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Gabe Black 29 30from __future__ import print_function 31 32import argparse 33import collections 34import difflib 35import functools 36import inspect 37import itertools 38import json 39import multiprocessing.pool 40import os 41import re 42import subprocess 43import sys 44 45script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) 46script_dir = os.path.dirname(script_path) 47config_path = os.path.join(script_dir, 'config.py') 48 49systemc_rel_path = 'systemc' 50tests_rel_path = os.path.join(systemc_rel_path, 'tests') 51json_rel_path = os.path.join(tests_rel_path, 'tests.json') 52 53 54 55def scons(*args): 56 args = ['scons'] + list(args) 57 subprocess.check_call(args) 58 59 60 61class Test(object): 62 def __init__(self, target, suffix, build_dir, props): 63 self.target = target 64 self.suffix = suffix 65 self.build_dir = build_dir 66 self.props = {} 67 68 for key, val in props.iteritems(): 69 self.set_prop(key, val) 70 71 def set_prop(self, key, val): 72 setattr(self, key, val) 73 self.props[key] = val 74 75 def dir(self): 76 return os.path.join(self.build_dir, tests_rel_path, self.path) 77 78 def src_dir(self): 79 return os.path.join(script_dir, self.path) 80 81 def golden_dir(self): 82 return os.path.join(self.src_dir(), 'golden') 83 84 def bin(self): 85 return '.'.join([self.name, self.suffix]) 86 87 def full_path(self): 88 return os.path.join(self.dir(), self.bin()) 89 90 def m5out_dir(self): 91 return os.path.join(self.dir(), 'm5out.' + self.suffix) 92 93 def returncode_file(self): 94 return os.path.join(self.m5out_dir(), 'returncode') 95 96 97 98test_phase_classes = {} 99 100class TestPhaseMeta(type): 101 def __init__(cls, name, bases, d): 102 if not d.pop('abstract', False): 103 test_phase_classes[d['name']] = cls 104 105 super(TestPhaseMeta, cls).__init__(name, bases, d) 106 107class TestPhaseBase(object): 108 __metaclass__ = TestPhaseMeta 109 abstract = True 110 111 def __init__(self, main_args, *args): 112 self.main_args = main_args 113 self.args = args 114 115 def __lt__(self, other): 116 return self.number < other.number 117 118class CompilePhase(TestPhaseBase): 119 name = 'compile' 120 number = 1 121 122 def run(self, tests): 123 targets = list([test.full_path() for test in tests])
|
125 scons(*scons_args) 126 127class RunPhase(TestPhaseBase): 128 name = 'execute' 129 number = 2 130 131 def run(self, tests): 132 parser = argparse.ArgumentParser() 133 parser.add_argument('--timeout', type=int, metavar='SECONDS', 134 help='Time limit for each run in seconds.', 135 default=0) 136 parser.add_argument('-j', type=int, default=1, 137 help='How many tests to run in parallel.') 138 args = parser.parse_args(self.args) 139 140 timeout_cmd = [ 141 'timeout', 142 '--kill-after', str(args.timeout * 2), 143 str(args.timeout) 144 ] 145 def run_test(test): 146 cmd = [] 147 if args.timeout: 148 cmd.extend(timeout_cmd) 149 cmd.extend([ 150 test.full_path(), 151 '-red', test.m5out_dir(), 152 '--listener-mode=off', 153 '--quiet', 154 config_path 155 ]) 156 # Ensure the output directory exists. 157 if not os.path.exists(test.m5out_dir()): 158 os.makedirs(test.m5out_dir()) 159 try: 160 subprocess.check_call(cmd) 161 except subprocess.CalledProcessError, error: 162 returncode = error.returncode 163 else: 164 returncode = 0 165 with open(test.returncode_file(), 'w') as rc: 166 rc.write('%d\n' % returncode) 167 168 runnable = filter(lambda t: not t.compile_only, tests) 169 if args.j == 1: 170 map(run_test, runnable) 171 else: 172 tp = multiprocessing.pool.ThreadPool(args.j) 173 map(lambda t: tp.apply_async(run_test, (t,)), runnable) 174 tp.close() 175 tp.join() 176 177class Checker(object): 178 def __init__(self, ref, test, tag): 179 self.ref = ref 180 self.test = test 181 self.tag = tag 182 183 def check(self): 184 with open(self.text) as test_f, open(self.ref) as ref_f: 185 return test_f.read() == ref_f.read() 186 187class LogChecker(Checker): 188 def merge_filts(*filts): 189 filts = map(lambda f: '(' + f + ')', filts) 190 filts = '|'.join(filts) 191 return re.compile(filts, flags=re.MULTILINE) 192 193 def warning_filt(num): 194 return (r'^\nWarning: \(W{}\) .*\n(In file: .*\n)?' 195 r'(In process: [\w.]* @ .*\n)?').format(num) 196 197 ref_filt = merge_filts( 198 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n', 199 r'^SystemC Simulation\n', 200 warning_filt(571), 201 warning_filt(540), 202 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' + 203 r'You can turn off(.*\n){7}' 204 ) 205 test_filt = merge_filts( 206 r'^Global frequency set at \d* ticks per second\n' 207 ) 208 209 def __init__(self, ref, test, tag, out_dir): 210 super(LogChecker, self).__init__(ref, test, tag) 211 self.out_dir = out_dir 212 213 def apply_filters(self, data, filts): 214 re.sub(filt, '', data) 215 216 def check(self): 217 test_file = os.path.basename(self.test) 218 ref_file = os.path.basename(self.ref) 219 with open(self.test) as test_f, open(self.ref) as ref_f: 220 test = re.sub(self.test_filt, '', test_f.read()) 221 ref = re.sub(self.ref_filt, '', ref_f.read()) 222 diff_file = '.'.join([ref_file, 'diff']) 223 diff_path = os.path.join(self.out_dir, diff_file) 224 if test != ref: 225 with open(diff_path, 'w') as diff_f: 226 for line in difflib.unified_diff( 227 ref.splitlines(True), test.splitlines(True), 228 fromfile=ref_file, 229 tofile=test_file): 230 diff_f.write(line) 231 return False 232 else: 233 if os.path.exists(diff_path): 234 os.unlink(diff_path) 235 return True 236 237class GoldenDir(object): 238 def __init__(self, path, platform): 239 self.path = path 240 self.platform = platform 241 242 contents = os.listdir(path) 243 suffix = '.' + platform 244 suffixed = filter(lambda c: c.endswith(suffix), contents) 245 bases = map(lambda t: t[:-len(platform)], suffixed) 246 common = filter(lambda t: not t.startswith(tuple(bases)), contents) 247 248 self.entries = {} 249 class Entry(object): 250 def __init__(self, e_path): 251 self.used = False 252 self.path = os.path.join(path, e_path) 253 254 def use(self): 255 self.used = True 256 257 for entry in contents: 258 self.entries[entry] = Entry(entry) 259 260 def entry(self, name): 261 def match(n): 262 return (n == name) or n.startswith(name + '.') 263 matches = { n: e for n, e in self.entries.items() if match(n) } 264 265 for match in matches.values(): 266 match.use() 267 268 platform_name = '.'.join([ name, self.platform ]) 269 if platform_name in matches: 270 return matches[platform_name].path 271 if name in matches: 272 return matches[name].path 273 else: 274 return None 275 276 def unused(self): 277 items = self.entries.items() 278 items = filter(lambda i: not i[1].used, items) 279 280 items.sort() 281 sources = [] 282 i = 0 283 while i < len(items): 284 root = items[i][0] 285 sources.append(root) 286 i += 1 287 while i < len(items) and items[i][0].startswith(root): 288 i += 1 289 return sources 290 291class VerifyPhase(TestPhaseBase): 292 name = 'verify' 293 number = 3 294 295 def reset_status(self): 296 self._passed = [] 297 self._failed = {} 298 299 def passed(self, test): 300 self._passed.append(test) 301 302 def failed(self, test, cause, note=''): 303 test.set_prop('note', note) 304 self._failed.setdefault(cause, []).append(test) 305 306 def print_status(self): 307 total_passed = len(self._passed) 308 total_failed = sum(map(len, self._failed.values())) 309 print() 310 print('Passed: {passed:4} - Failed: {failed:4}'.format( 311 passed=total_passed, failed=total_failed)) 312 313 def write_result_file(self, path): 314 results = { 315 'passed': map(lambda t: t.props, self._passed), 316 'failed': { 317 cause: map(lambda t: t.props, tests) for 318 cause, tests in self._failed.iteritems() 319 } 320 } 321 with open(path, 'w') as rf: 322 json.dump(results, rf) 323 324 def print_results(self): 325 print() 326 print('Passed:') 327 for path in sorted(list([ t.path for t in self._passed ])): 328 print(' ', path) 329 330 print() 331 print('Failed:') 332 333 causes = [] 334 for cause, tests in sorted(self._failed.items()): 335 block = ' ' + cause.capitalize() + ':\n' 336 for test in sorted(tests, key=lambda t: t.path): 337 block += ' ' + test.path 338 if test.note: 339 block += ' - ' + test.note 340 block += '\n' 341 causes.append(block) 342 343 print('\n'.join(causes)) 344 345 def run(self, tests): 346 parser = argparse.ArgumentParser() 347 result_opts = parser.add_mutually_exclusive_group() 348 result_opts.add_argument('--result-file', action='store_true', 349 help='Create a results.json file in the current directory.') 350 result_opts.add_argument('--result-file-at', metavar='PATH', 351 help='Create a results json file at the given path.') 352 parser.add_argument('--print-results', action='store_true', 353 help='Print a list of tests that passed or failed') 354 args = parser.parse_args(self.args) 355 356 self.reset_status() 357 358 runnable = filter(lambda t: not t.compile_only, tests) 359 compile_only = filter(lambda t: t.compile_only, tests) 360 361 for test in compile_only: 362 if os.path.exists(test.full_path()): 363 self.passed(test) 364 else: 365 self.failed(test, 'compile failed') 366 367 for test in runnable: 368 with open(test.returncode_file()) as rc: 369 returncode = int(rc.read()) 370 371 if returncode == 124: 372 self.failed(test, 'time out') 373 continue 374 elif returncode != 0: 375 self.failed(test, 'abort') 376 continue 377 378 out_dir = test.m5out_dir() 379 380 Diff = collections.namedtuple( 381 'Diff', 'ref, test, tag, ref_filter') 382 383 diffs = [] 384 385 gd = GoldenDir(test.golden_dir(), 'linux64') 386 387 missing = [] 388 log_file = '.'.join([test.name, 'log']) 389 log_path = gd.entry(log_file) 390 simout_path = os.path.join(out_dir, 'simout') 391 if not os.path.exists(simout_path): 392 missing.append('log output') 393 elif log_path: 394 diffs.append(LogChecker(log_path, simout_path, 395 log_file, out_dir)) 396 397 for name in gd.unused(): 398 test_path = os.path.join(out_dir, name) 399 ref_path = gd.entry(name) 400 if not os.path.exists(test_path): 401 missing.append(name) 402 else: 403 diffs.append(Checker(ref_path, test_path, name)) 404 405 if missing: 406 self.failed(test, 'missing output', ' '.join(missing)) 407 continue 408 409 failed_diffs = filter(lambda d: not d.check(), diffs) 410 if failed_diffs: 411 tags = map(lambda d: d.tag, failed_diffs) 412 self.failed(test, 'failed diffs', ' '.join(tags)) 413 continue 414 415 self.passed(test) 416 417 if args.print_results: 418 self.print_results() 419 420 self.print_status() 421 422 result_path = None 423 if args.result_file: 424 result_path = os.path.join(os.getcwd(), 'results.json') 425 elif args.result_file_at: 426 result_path = args.result_file_at 427 428 if result_path: 429 self.write_result_file(result_path) 430 431 432parser = argparse.ArgumentParser(description='SystemC test utility') 433 434parser.add_argument('build_dir', metavar='BUILD_DIR', 435 help='The build directory (ie. build/ARM).') 436 437parser.add_argument('--update-json', action='store_true', 438 help='Update the json manifest of tests.') 439 440parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], 441 default='opt', 442 help='Flavor of binary to test.') 443 444parser.add_argument('--list', action='store_true', 445 help='List the available tests') 446 447filter_opts = parser.add_mutually_exclusive_group() 448filter_opts.add_argument('--filter', default='True', 449 help='Python expression which filters tests based ' 450 'on their properties') 451filter_opts.add_argument('--filter-file', default=None, 452 type=argparse.FileType('r'), 453 help='Same as --filter, but read from a file') 454 455def collect_phases(args): 456 phase_groups = [list(g) for k, g in 457 itertools.groupby(args, lambda x: x != '--phase') if k] 458 main_args = parser.parse_args(phase_groups[0][1:]) 459 phases = [] 460 names = [] 461 for group in phase_groups[1:]: 462 name = group[0] 463 if name in names: 464 raise RuntimeException('Phase %s specified more than once' % name) 465 phase = test_phase_classes[name] 466 phases.append(phase(main_args, *group[1:])) 467 phases.sort() 468 return main_args, phases 469 470main_args, phases = collect_phases(sys.argv) 471 472if len(phases) == 0: 473 phases = [ 474 CompilePhase(main_args), 475 RunPhase(main_args), 476 VerifyPhase(main_args) 477 ] 478 479 480 481json_path = os.path.join(main_args.build_dir, json_rel_path) 482 483if main_args.update_json: 484 scons(os.path.join(json_path)) 485 486with open(json_path) as f: 487 test_data = json.load(f) 488 489 if main_args.filter_file: 490 f = main_args.filter_file 491 filt = compile(f.read(), f.name, 'eval') 492 else: 493 filt = compile(main_args.filter, '<string>', 'eval') 494 495 filtered_tests = { 496 target: props for (target, props) in 497 test_data.iteritems() if eval(filt, dict(props)) 498 } 499 500 if main_args.list: 501 for target, props in sorted(filtered_tests.iteritems()): 502 print('%s.%s' % (target, main_args.flavor)) 503 for key, val in props.iteritems(): 504 print(' %s: %s' % (key, val)) 505 print('Total tests: %d' % len(filtered_tests)) 506 else: 507 tests_to_run = list([ 508 Test(target, main_args.flavor, main_args.build_dir, props) for 509 target, props in sorted(filtered_tests.iteritems()) 510 ]) 511 512 for phase in phases: 513 phase.run(tests_to_run)
| 125 scons(*scons_args) 126 127class RunPhase(TestPhaseBase): 128 name = 'execute' 129 number = 2 130 131 def run(self, tests): 132 parser = argparse.ArgumentParser() 133 parser.add_argument('--timeout', type=int, metavar='SECONDS', 134 help='Time limit for each run in seconds.', 135 default=0) 136 parser.add_argument('-j', type=int, default=1, 137 help='How many tests to run in parallel.') 138 args = parser.parse_args(self.args) 139 140 timeout_cmd = [ 141 'timeout', 142 '--kill-after', str(args.timeout * 2), 143 str(args.timeout) 144 ] 145 def run_test(test): 146 cmd = [] 147 if args.timeout: 148 cmd.extend(timeout_cmd) 149 cmd.extend([ 150 test.full_path(), 151 '-red', test.m5out_dir(), 152 '--listener-mode=off', 153 '--quiet', 154 config_path 155 ]) 156 # Ensure the output directory exists. 157 if not os.path.exists(test.m5out_dir()): 158 os.makedirs(test.m5out_dir()) 159 try: 160 subprocess.check_call(cmd) 161 except subprocess.CalledProcessError, error: 162 returncode = error.returncode 163 else: 164 returncode = 0 165 with open(test.returncode_file(), 'w') as rc: 166 rc.write('%d\n' % returncode) 167 168 runnable = filter(lambda t: not t.compile_only, tests) 169 if args.j == 1: 170 map(run_test, runnable) 171 else: 172 tp = multiprocessing.pool.ThreadPool(args.j) 173 map(lambda t: tp.apply_async(run_test, (t,)), runnable) 174 tp.close() 175 tp.join() 176 177class Checker(object): 178 def __init__(self, ref, test, tag): 179 self.ref = ref 180 self.test = test 181 self.tag = tag 182 183 def check(self): 184 with open(self.text) as test_f, open(self.ref) as ref_f: 185 return test_f.read() == ref_f.read() 186 187class LogChecker(Checker): 188 def merge_filts(*filts): 189 filts = map(lambda f: '(' + f + ')', filts) 190 filts = '|'.join(filts) 191 return re.compile(filts, flags=re.MULTILINE) 192 193 def warning_filt(num): 194 return (r'^\nWarning: \(W{}\) .*\n(In file: .*\n)?' 195 r'(In process: [\w.]* @ .*\n)?').format(num) 196 197 ref_filt = merge_filts( 198 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n', 199 r'^SystemC Simulation\n', 200 warning_filt(571), 201 warning_filt(540), 202 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' + 203 r'You can turn off(.*\n){7}' 204 ) 205 test_filt = merge_filts( 206 r'^Global frequency set at \d* ticks per second\n' 207 ) 208 209 def __init__(self, ref, test, tag, out_dir): 210 super(LogChecker, self).__init__(ref, test, tag) 211 self.out_dir = out_dir 212 213 def apply_filters(self, data, filts): 214 re.sub(filt, '', data) 215 216 def check(self): 217 test_file = os.path.basename(self.test) 218 ref_file = os.path.basename(self.ref) 219 with open(self.test) as test_f, open(self.ref) as ref_f: 220 test = re.sub(self.test_filt, '', test_f.read()) 221 ref = re.sub(self.ref_filt, '', ref_f.read()) 222 diff_file = '.'.join([ref_file, 'diff']) 223 diff_path = os.path.join(self.out_dir, diff_file) 224 if test != ref: 225 with open(diff_path, 'w') as diff_f: 226 for line in difflib.unified_diff( 227 ref.splitlines(True), test.splitlines(True), 228 fromfile=ref_file, 229 tofile=test_file): 230 diff_f.write(line) 231 return False 232 else: 233 if os.path.exists(diff_path): 234 os.unlink(diff_path) 235 return True 236 237class GoldenDir(object): 238 def __init__(self, path, platform): 239 self.path = path 240 self.platform = platform 241 242 contents = os.listdir(path) 243 suffix = '.' + platform 244 suffixed = filter(lambda c: c.endswith(suffix), contents) 245 bases = map(lambda t: t[:-len(platform)], suffixed) 246 common = filter(lambda t: not t.startswith(tuple(bases)), contents) 247 248 self.entries = {} 249 class Entry(object): 250 def __init__(self, e_path): 251 self.used = False 252 self.path = os.path.join(path, e_path) 253 254 def use(self): 255 self.used = True 256 257 for entry in contents: 258 self.entries[entry] = Entry(entry) 259 260 def entry(self, name): 261 def match(n): 262 return (n == name) or n.startswith(name + '.') 263 matches = { n: e for n, e in self.entries.items() if match(n) } 264 265 for match in matches.values(): 266 match.use() 267 268 platform_name = '.'.join([ name, self.platform ]) 269 if platform_name in matches: 270 return matches[platform_name].path 271 if name in matches: 272 return matches[name].path 273 else: 274 return None 275 276 def unused(self): 277 items = self.entries.items() 278 items = filter(lambda i: not i[1].used, items) 279 280 items.sort() 281 sources = [] 282 i = 0 283 while i < len(items): 284 root = items[i][0] 285 sources.append(root) 286 i += 1 287 while i < len(items) and items[i][0].startswith(root): 288 i += 1 289 return sources 290 291class VerifyPhase(TestPhaseBase): 292 name = 'verify' 293 number = 3 294 295 def reset_status(self): 296 self._passed = [] 297 self._failed = {} 298 299 def passed(self, test): 300 self._passed.append(test) 301 302 def failed(self, test, cause, note=''): 303 test.set_prop('note', note) 304 self._failed.setdefault(cause, []).append(test) 305 306 def print_status(self): 307 total_passed = len(self._passed) 308 total_failed = sum(map(len, self._failed.values())) 309 print() 310 print('Passed: {passed:4} - Failed: {failed:4}'.format( 311 passed=total_passed, failed=total_failed)) 312 313 def write_result_file(self, path): 314 results = { 315 'passed': map(lambda t: t.props, self._passed), 316 'failed': { 317 cause: map(lambda t: t.props, tests) for 318 cause, tests in self._failed.iteritems() 319 } 320 } 321 with open(path, 'w') as rf: 322 json.dump(results, rf) 323 324 def print_results(self): 325 print() 326 print('Passed:') 327 for path in sorted(list([ t.path for t in self._passed ])): 328 print(' ', path) 329 330 print() 331 print('Failed:') 332 333 causes = [] 334 for cause, tests in sorted(self._failed.items()): 335 block = ' ' + cause.capitalize() + ':\n' 336 for test in sorted(tests, key=lambda t: t.path): 337 block += ' ' + test.path 338 if test.note: 339 block += ' - ' + test.note 340 block += '\n' 341 causes.append(block) 342 343 print('\n'.join(causes)) 344 345 def run(self, tests): 346 parser = argparse.ArgumentParser() 347 result_opts = parser.add_mutually_exclusive_group() 348 result_opts.add_argument('--result-file', action='store_true', 349 help='Create a results.json file in the current directory.') 350 result_opts.add_argument('--result-file-at', metavar='PATH', 351 help='Create a results json file at the given path.') 352 parser.add_argument('--print-results', action='store_true', 353 help='Print a list of tests that passed or failed') 354 args = parser.parse_args(self.args) 355 356 self.reset_status() 357 358 runnable = filter(lambda t: not t.compile_only, tests) 359 compile_only = filter(lambda t: t.compile_only, tests) 360 361 for test in compile_only: 362 if os.path.exists(test.full_path()): 363 self.passed(test) 364 else: 365 self.failed(test, 'compile failed') 366 367 for test in runnable: 368 with open(test.returncode_file()) as rc: 369 returncode = int(rc.read()) 370 371 if returncode == 124: 372 self.failed(test, 'time out') 373 continue 374 elif returncode != 0: 375 self.failed(test, 'abort') 376 continue 377 378 out_dir = test.m5out_dir() 379 380 Diff = collections.namedtuple( 381 'Diff', 'ref, test, tag, ref_filter') 382 383 diffs = [] 384 385 gd = GoldenDir(test.golden_dir(), 'linux64') 386 387 missing = [] 388 log_file = '.'.join([test.name, 'log']) 389 log_path = gd.entry(log_file) 390 simout_path = os.path.join(out_dir, 'simout') 391 if not os.path.exists(simout_path): 392 missing.append('log output') 393 elif log_path: 394 diffs.append(LogChecker(log_path, simout_path, 395 log_file, out_dir)) 396 397 for name in gd.unused(): 398 test_path = os.path.join(out_dir, name) 399 ref_path = gd.entry(name) 400 if not os.path.exists(test_path): 401 missing.append(name) 402 else: 403 diffs.append(Checker(ref_path, test_path, name)) 404 405 if missing: 406 self.failed(test, 'missing output', ' '.join(missing)) 407 continue 408 409 failed_diffs = filter(lambda d: not d.check(), diffs) 410 if failed_diffs: 411 tags = map(lambda d: d.tag, failed_diffs) 412 self.failed(test, 'failed diffs', ' '.join(tags)) 413 continue 414 415 self.passed(test) 416 417 if args.print_results: 418 self.print_results() 419 420 self.print_status() 421 422 result_path = None 423 if args.result_file: 424 result_path = os.path.join(os.getcwd(), 'results.json') 425 elif args.result_file_at: 426 result_path = args.result_file_at 427 428 if result_path: 429 self.write_result_file(result_path) 430 431 432parser = argparse.ArgumentParser(description='SystemC test utility') 433 434parser.add_argument('build_dir', metavar='BUILD_DIR', 435 help='The build directory (ie. build/ARM).') 436 437parser.add_argument('--update-json', action='store_true', 438 help='Update the json manifest of tests.') 439 440parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], 441 default='opt', 442 help='Flavor of binary to test.') 443 444parser.add_argument('--list', action='store_true', 445 help='List the available tests') 446 447filter_opts = parser.add_mutually_exclusive_group() 448filter_opts.add_argument('--filter', default='True', 449 help='Python expression which filters tests based ' 450 'on their properties') 451filter_opts.add_argument('--filter-file', default=None, 452 type=argparse.FileType('r'), 453 help='Same as --filter, but read from a file') 454 455def collect_phases(args): 456 phase_groups = [list(g) for k, g in 457 itertools.groupby(args, lambda x: x != '--phase') if k] 458 main_args = parser.parse_args(phase_groups[0][1:]) 459 phases = [] 460 names = [] 461 for group in phase_groups[1:]: 462 name = group[0] 463 if name in names: 464 raise RuntimeException('Phase %s specified more than once' % name) 465 phase = test_phase_classes[name] 466 phases.append(phase(main_args, *group[1:])) 467 phases.sort() 468 return main_args, phases 469 470main_args, phases = collect_phases(sys.argv) 471 472if len(phases) == 0: 473 phases = [ 474 CompilePhase(main_args), 475 RunPhase(main_args), 476 VerifyPhase(main_args) 477 ] 478 479 480 481json_path = os.path.join(main_args.build_dir, json_rel_path) 482 483if main_args.update_json: 484 scons(os.path.join(json_path)) 485 486with open(json_path) as f: 487 test_data = json.load(f) 488 489 if main_args.filter_file: 490 f = main_args.filter_file 491 filt = compile(f.read(), f.name, 'eval') 492 else: 493 filt = compile(main_args.filter, '<string>', 'eval') 494 495 filtered_tests = { 496 target: props for (target, props) in 497 test_data.iteritems() if eval(filt, dict(props)) 498 } 499 500 if main_args.list: 501 for target, props in sorted(filtered_tests.iteritems()): 502 print('%s.%s' % (target, main_args.flavor)) 503 for key, val in props.iteritems(): 504 print(' %s: %s' % (key, val)) 505 print('Total tests: %d' % len(filtered_tests)) 506 else: 507 tests_to_run = list([ 508 Test(target, main_args.flavor, main_args.build_dir, props) for 509 target, props in sorted(filtered_tests.iteritems()) 510 ]) 511 512 for phase in phases: 513 phase.run(tests_to_run)
|