1#!/usr/bin/env python2.7 2# 3# Copyright 2018 Google, Inc. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Gabe Black 29 30from __future__ import print_function 31 32import argparse 33import collections 34import difflib 35import functools 36import inspect 37import itertools 38import json 39import multiprocessing.pool 40import os 41import re 42import subprocess 43import sys 44 45script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) 46script_dir = os.path.dirname(script_path) 47config_path = os.path.join(script_dir, 'config.py') 48# Parent directories if checked out as part of gem5. 49systemc_dir = os.path.dirname(script_dir) 50src_dir = os.path.dirname(systemc_dir) 51checkout_dir = os.path.dirname(src_dir) 52 53systemc_rel_path = 'systemc' 54tests_rel_path = os.path.join(systemc_rel_path, 'tests') 55json_rel_path = os.path.join(tests_rel_path, 'tests.json') 56 57 58 59def scons(*args): 60 args = ['scons'] + list(args) 61 subprocess.check_call(args) 62 63 64 65class Test(object): 66 def __init__(self, target, suffix, build_dir, props): 67 self.target = target 68 self.suffix = suffix 69 self.build_dir = build_dir 70 self.props = {} 71 72 for key, val in props.iteritems(): 73 self.set_prop(key, val) 74 75 def set_prop(self, key, val): 76 setattr(self, key, val) 77 self.props[key] = val 78 79 def dir(self): 80 return os.path.join(self.build_dir, tests_rel_path, self.path) 81 82 def src_dir(self): 83 return os.path.join(script_dir, self.path) 84 85 def expected_returncode_file(self): 86 return os.path.join(self.src_dir(), 'expected_returncode') 87 88 def golden_dir(self): 89 return os.path.join(self.src_dir(), 'golden') 90 91 def bin(self): 92 return '.'.join([self.name, self.suffix]) 93 94 def full_path(self): 95 return os.path.join(self.dir(), self.bin()) 96 97 def m5out_dir(self): 98 return os.path.join(self.dir(), 'm5out.' + self.suffix) 99 100 def returncode_file(self): 101 return os.path.join(self.m5out_dir(), 'returncode') 102 103 104 105test_phase_classes = {} 106 107class TestPhaseMeta(type): 108 def __init__(cls, name, bases, d): 109 if not d.pop('abstract', False): 110 test_phase_classes[d['name']] = cls 111 112 super(TestPhaseMeta, cls).__init__(name, bases, d) 113 114class TestPhaseBase(object): 115 __metaclass__ = TestPhaseMeta 116 abstract = True 117 118 def __init__(self, main_args, *args): 119 self.main_args = main_args 120 self.args = args 121 122 def __lt__(self, other): 123 return self.number < other.number 124 125class CompilePhase(TestPhaseBase): 126 name = 'compile' 127 number = 1 128 129 def run(self, tests): 130 targets = list([test.full_path() for test in tests]) 131 132 parser = argparse.ArgumentParser() 133 parser.add_argument('-j', type=int, default=0) 134 args, leftovers = parser.parse_known_args(self.args) 135 if args.j == 0: 136 self.args = ('-j', str(self.main_args.j)) + self.args 137 138 scons_args = [ '--directory', self.main_args.scons_dir, 139 'USE_SYSTEMC=1' ] + list(self.args) + targets 140 scons(*scons_args) 141 142class RunPhase(TestPhaseBase): 143 name = 'execute' 144 number = 2 145 146 def run(self, tests): 147 parser = argparse.ArgumentParser() 148 parser.add_argument('--timeout', type=int, metavar='SECONDS', 149 help='Time limit for each run in seconds, ' 150 '0 to disable.', 151 default=60) 152 parser.add_argument('-j', type=int, default=0, 153 help='How many tests to run in parallel.') 154 args = parser.parse_args(self.args) 155 156 timeout_cmd = [ 157 'timeout', 158 '--kill-after', str(args.timeout * 2), 159 str(args.timeout) 160 ] 161 def run_test(test): 162 cmd = [] 163 if args.timeout: 164 cmd.extend(timeout_cmd) 165 cmd.extend([ 166 os.path.abspath(test.full_path()), 167 '-rd', os.path.abspath(test.m5out_dir()), 168 '--listener-mode=off', 169 '--quiet', 170 os.path.abspath(config_path), 171 ]) 172 # Ensure the output directory exists. 173 if not os.path.exists(test.m5out_dir()): 174 os.makedirs(test.m5out_dir()) 175 try: 176 subprocess.check_call(cmd, cwd=os.path.dirname(test.dir())) 177 except subprocess.CalledProcessError, error: 178 returncode = error.returncode 179 else: 180 returncode = 0 181 with open(test.returncode_file(), 'w') as rc: 182 rc.write('%d\n' % returncode) 183 184 j = self.main_args.j if args.j == 0 else args.j 185 186 runnable = filter(lambda t: not t.compile_only, tests) 187 if j == 1: 188 map(run_test, runnable) 189 else: 190 tp = multiprocessing.pool.ThreadPool(j) 191 map(lambda t: tp.apply_async(run_test, (t,)), runnable) 192 tp.close() 193 tp.join() 194 195class Checker(object): 196 def __init__(self, ref, test, tag): 197 self.ref = ref 198 self.test = test 199 self.tag = tag 200 201 def check(self): 202 with open(self.test) as test_f, open(self.ref) as ref_f: 203 return test_f.read() == ref_f.read() 204 205def tagged_filt(tag, num): 206 return (r'\n{}: \({}{}\) .*\n(In file: .*\n)?' 207 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num) 208 209def error_filt(num): 210 return tagged_filt('Error', num) 211 212def warning_filt(num): 213 return tagged_filt('Warning', num) 214 215def info_filt(num): 216 return tagged_filt('Info', num) 217 218class DiffingChecker(Checker): 219 def __init__(self, ref, test, tag, out_dir): 220 super(DiffingChecker, self).__init__(ref, test, tag) 221 self.out_dir = out_dir 222 223 def diffing_check(self, ref_lines, test_lines): 224 test_file = os.path.basename(self.test) 225 ref_file = os.path.basename(self.ref) 226 227 diff_file = '.'.join([ref_file, 'diff']) 228 diff_path = os.path.join(self.out_dir, diff_file) 229 if test_lines != ref_lines: 230 with open(diff_path, 'w') as diff_f: 231 for line in difflib.unified_diff( 232 ref_lines, test_lines, 233 fromfile=ref_file, 234 tofile=test_file): 235 diff_f.write(line) 236 return False 237 else: 238 if os.path.exists(diff_path): 239 os.unlink(diff_path) 240 return True 241 242class LogChecker(DiffingChecker): 243 def merge_filts(*filts): 244 filts = map(lambda f: '(' + f + ')', filts) 245 filts = '|'.join(filts) 246 return re.compile(filts, flags=re.MULTILINE) 247 248 # The reporting mechanism will print the actual filename when running in 249 # gem5, and the "golden" output will say "<removed by verify.py>". We want 250 # to strip out both versions to make comparing the output sensible. 251 in_file_filt = r'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$' 252 253 ref_filt = merge_filts( 254 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n', 255 r'^SystemC Simulation\n', 256 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' + 257 r'You can turn off(.*\n){7}', 258 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' + 259 r' sc_clock\(const char(.*\n){3}', 260 warning_filt(540), 261 warning_filt(571), 262 info_filt(804), 263 info_filt(704), 264 in_file_filt, 265 ) 266 test_filt = merge_filts( 267 r'^Global frequency set at \d* ticks per second\n', 268 r'^info: Entering event queue @ \d*\. Starting simulation\.\.\.\n', 269 r'warn: Ignoring request to set stack size\.\n', 270 info_filt(804), 271 in_file_filt, 272 ) 273 274 def apply_filters(self, data, filts): 275 re.sub(filt, '', data) 276 277 def check(self): 278 with open(self.test) as test_f, open(self.ref) as ref_f: 279 test = re.sub(self.test_filt, '', test_f.read()) 280 ref = re.sub(self.ref_filt, '', ref_f.read()) 281 return self.diffing_check(ref.splitlines(True), 282 test.splitlines(True)) 283 284class VcdChecker(DiffingChecker): 285 def check(self): 286 with open (self.test) as test_f, open(self.ref) as ref_f: 287 ref = ref_f.read().splitlines(True) 288 test = test_f.read().splitlines(True) 289 # Strip off the first seven lines of the test output which are 290 # date and version information. 291 test = test[7:] 292 293 return self.diffing_check(ref, test) 294 295class GoldenDir(object): 296 def __init__(self, path, platform): 297 self.path = path 298 self.platform = platform 299 300 contents = os.listdir(path) 301 suffix = '.' + platform 302 suffixed = filter(lambda c: c.endswith(suffix), contents) 303 bases = map(lambda t: t[:-len(platform)], suffixed) 304 common = filter(lambda t: not t.startswith(tuple(bases)), contents) 305 306 self.entries = {} 307 class Entry(object): 308 def __init__(self, e_path): 309 self.used = False 310 self.path = os.path.join(path, e_path) 311 312 def use(self): 313 self.used = True 314 315 for entry in contents: 316 self.entries[entry] = Entry(entry) 317 318 def entry(self, name): 319 def match(n): 320 return (n == name) or n.startswith(name + '.') 321 matches = { n: e for n, e in self.entries.items() if match(n) } 322 323 for match in matches.values(): 324 match.use() 325 326 platform_name = '.'.join([ name, self.platform ]) 327 if platform_name in matches: 328 return matches[platform_name].path 329 if name in matches: 330 return matches[name].path 331 else: 332 return None 333 334 def unused(self): 335 items = self.entries.items() 336 items = filter(lambda i: not i[1].used, items) 337 338 items.sort() 339 sources = [] 340 i = 0 341 while i < len(items): 342 root = items[i][0] 343 sources.append(root) 344 i += 1 345 while i < len(items) and items[i][0].startswith(root): 346 i += 1 347 return sources 348 349class VerifyPhase(TestPhaseBase): 350 name = 'verify' 351 number = 3 352 353 def reset_status(self): 354 self._passed = [] 355 self._failed = {} 356 357 def passed(self, test): 358 self._passed.append(test) 359 360 def failed(self, test, cause, note=''): 361 test.set_prop('note', note) 362 self._failed.setdefault(cause, []).append(test) 363 364 def print_status(self): 365 total_passed = len(self._passed) 366 total_failed = sum(map(len, self._failed.values())) 367 print() 368 print('Passed: {passed:4} - Failed: {failed:4}'.format( 369 passed=total_passed, failed=total_failed)) 370 371 def write_result_file(self, path): 372 results = { 373 'passed': map(lambda t: t.props, self._passed), 374 'failed': { 375 cause: map(lambda t: t.props, tests) for 376 cause, tests in self._failed.iteritems() 377 } 378 } 379 with open(path, 'w') as rf: 380 json.dump(results, rf) 381 382 def print_results(self): 383 print() 384 print('Passed:') 385 for path in sorted(list([ t.path for t in self._passed ])): 386 print(' ', path) 387 388 print() 389 print('Failed:') 390 391 causes = [] 392 for cause, tests in sorted(self._failed.items()): 393 block = ' ' + cause.capitalize() + ':\n' 394 for test in sorted(tests, key=lambda t: t.path): 395 block += ' ' + test.path 396 if test.note: 397 block += ' - ' + test.note 398 block += '\n' 399 causes.append(block) 400 401 print('\n'.join(causes)) 402 403 def run(self, tests): 404 parser = argparse.ArgumentParser() 405 result_opts = parser.add_mutually_exclusive_group() 406 result_opts.add_argument('--result-file', action='store_true', 407 help='Create a results.json file in the current directory.') 408 result_opts.add_argument('--result-file-at', metavar='PATH', 409 help='Create a results json file at the given path.') 410 parser.add_argument('--no-print-results', action='store_true', 411 help='Don\'t print a list of tests that passed or failed') 412 args = parser.parse_args(self.args) 413 414 self.reset_status() 415 416 runnable = filter(lambda t: not t.compile_only, tests) 417 compile_only = filter(lambda t: t.compile_only, tests) 418 419 for test in compile_only: 420 if os.path.exists(test.full_path()): 421 self.passed(test) 422 else: 423 self.failed(test, 'compile failed') 424 425 for test in runnable: 426 with open(test.returncode_file()) as rc: 427 returncode = int(rc.read()) 428 429 expected_returncode = 0 430 if os.path.exists(test.expected_returncode_file()): 431 with open(test.expected_returncode_file()) as erc: 432 expected_returncode = int(erc.read()) 433 434 if returncode == 124: 435 self.failed(test, 'time out') 436 continue 437 elif returncode != expected_returncode: 438 if expected_returncode == 0: 439 self.failed(test, 'abort') 440 else: 441 self.failed(test, 'missed abort') 442 continue 443 444 out_dir = test.m5out_dir() 445 446 Diff = collections.namedtuple( 447 'Diff', 'ref, test, tag, ref_filter') 448 449 diffs = [] 450 451 gd = GoldenDir(test.golden_dir(), 'linux64') 452 453 missing = [] 454 log_file = '.'.join([test.name, 'log']) 455 log_path = gd.entry(log_file) 456 simout_path = os.path.join(out_dir, 'simout') 457 if not os.path.exists(simout_path): 458 missing.append('log output') 459 elif log_path: 460 diffs.append(LogChecker(log_path, simout_path, 461 log_file, out_dir)) 462 463 for name in gd.unused(): 464 test_path = os.path.join(out_dir, name) 465 ref_path = gd.entry(name) 466 if not os.path.exists(test_path): 467 missing.append(name) 468 elif name.endswith('.vcd'): 469 diffs.append(VcdChecker(ref_path, test_path, 470 name, out_dir)) 471 else: 472 diffs.append(Checker(ref_path, test_path, name)) 473 474 if missing: 475 self.failed(test, 'missing output', ' '.join(missing)) 476 continue 477 478 failed_diffs = filter(lambda d: not d.check(), diffs) 479 if failed_diffs: 480 tags = map(lambda d: d.tag, failed_diffs) 481 self.failed(test, 'failed diffs', ' '.join(tags)) 482 continue 483 484 self.passed(test) 485 486 if not args.no_print_results: 487 self.print_results() 488 489 self.print_status() 490 491 result_path = None 492 if args.result_file: 493 result_path = os.path.join(os.getcwd(), 'results.json') 494 elif args.result_file_at: 495 result_path = args.result_file_at 496 497 if result_path: 498 self.write_result_file(result_path) 499 500 501parser = argparse.ArgumentParser(description='SystemC test utility') 502 503parser.add_argument('build_dir', metavar='BUILD_DIR', 504 help='The build directory (ie. build/ARM).') 505 506parser.add_argument('--update-json', action='store_true', 507 help='Update the json manifest of tests.') 508 509parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], 510 default='opt', 511 help='Flavor of binary to test.') 512 513parser.add_argument('--list', action='store_true', 514 help='List the available tests') 515 516parser.add_argument('-j', type=int, default=1, 517 help='Default level of parallelism, can be overriden ' 518 'for individual stages') 519 520parser.add_argument('-C', '--scons-dir', metavar='SCONS_DIR', 521 default=checkout_dir, 522 help='Directory to run scons from') 523 524filter_opts = parser.add_mutually_exclusive_group() 525filter_opts.add_argument('--filter', default='True', 526 help='Python expression which filters tests based ' 527 'on their properties') 528filter_opts.add_argument('--filter-file', default=None, 529 type=argparse.FileType('r'), 530 help='Same as --filter, but read from a file') 531 532def collect_phases(args): 533 phase_groups = [list(g) for k, g in 534 itertools.groupby(args, lambda x: x != '--phase') if k] 535 main_args = parser.parse_args(phase_groups[0][1:]) 536 phases = [] 537 names = [] 538 for group in phase_groups[1:]: 539 name = group[0] 540 if name in names: 541 raise RuntimeException('Phase %s specified more than once' % name) 542 phase = test_phase_classes[name] 543 phases.append(phase(main_args, *group[1:])) 544 phases.sort() 545 return main_args, phases 546 547main_args, phases = collect_phases(sys.argv) 548 549if len(phases) == 0: 550 phases = [ 551 CompilePhase(main_args), 552 RunPhase(main_args), 553 VerifyPhase(main_args) 554 ] 555 556 557 558json_path = os.path.join(main_args.build_dir, json_rel_path) 559 560if main_args.update_json: 561 scons('--directory', main_args.scons_dir, os.path.join(json_path)) 562 563with open(json_path) as f: 564 test_data = json.load(f) 565 566 if main_args.filter_file: 567 f = main_args.filter_file 568 filt = compile(f.read(), f.name, 'eval') 569 else: 570 filt = compile(main_args.filter, '<string>', 'eval') 571 572 filtered_tests = { 573 target: props for (target, props) in 574 test_data.iteritems() if eval(filt, dict(props)) 575 } 576 577 if len(filtered_tests) == 0: 578 print('All tests were filtered out.') 579 exit() 580 581 if main_args.list: 582 for target, props in sorted(filtered_tests.iteritems()): 583 print('%s.%s' % (target, main_args.flavor)) 584 for key, val in props.iteritems(): 585 print(' %s: %s' % (key, val)) 586 print('Total tests: %d' % len(filtered_tests)) 587 else: 588 tests_to_run = list([ 589 Test(target, main_args.flavor, main_args.build_dir, props) for 590 target, props in sorted(filtered_tests.iteritems()) 591 ]) 592 593 for phase in phases: 594 phase.run(tests_to_run) 595