verify.py revision 13005:9e97204bf57f
1#!/usr/bin/env python2 2# 3# Copyright 2018 Google, Inc. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Gabe Black 29 30from __future__ import print_function 31 32import argparse 33import collections 34import difflib 35import functools 36import inspect 37import itertools 38import json 39import multiprocessing.pool 40import os 41import re 42import subprocess 43import sys 44 45script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) 46script_dir = os.path.dirname(script_path) 47config_path = os.path.join(script_dir, 'config.py') 48 49systemc_rel_path = 'systemc' 50tests_rel_path = os.path.join(systemc_rel_path, 'tests') 51json_rel_path = os.path.join(tests_rel_path, 'tests.json') 52 53 54 55def scons(*args): 56 args = ['scons'] + list(args) 57 subprocess.check_call(args) 58 59 60 61class Test(object): 62 def __init__(self, target, suffix, build_dir, props): 63 self.target = target 64 self.suffix = suffix 65 self.build_dir = build_dir 66 self.props = {} 67 68 for key, val in props.iteritems(): 69 self.set_prop(key, val) 70 71 def set_prop(self, key, val): 72 setattr(self, key, val) 73 self.props[key] = val 74 75 def dir(self): 76 return os.path.join(self.build_dir, tests_rel_path, self.path) 77 78 def src_dir(self): 79 return os.path.join(script_dir, self.path) 80 81 def golden_dir(self): 82 return os.path.join(self.src_dir(), 'golden') 83 84 def bin(self): 85 return '.'.join([self.name, self.suffix]) 86 87 def full_path(self): 88 return os.path.join(self.dir(), self.bin()) 89 90 def m5out_dir(self): 91 return os.path.join(self.dir(), 'm5out.' + self.suffix) 92 93 def returncode_file(self): 94 return os.path.join(self.m5out_dir(), 'returncode') 95 96 97 98test_phase_classes = {} 99 100class TestPhaseMeta(type): 101 def __init__(cls, name, bases, d): 102 if not d.pop('abstract', False): 103 test_phase_classes[d['name']] = cls 104 105 super(TestPhaseMeta, cls).__init__(name, bases, d) 106 107class TestPhaseBase(object): 108 __metaclass__ = TestPhaseMeta 109 abstract = True 110 111 def __init__(self, main_args, *args): 112 self.main_args = main_args 113 self.args = args 114 115 def __lt__(self, other): 116 return self.number < other.number 117 118class CompilePhase(TestPhaseBase): 119 name = 'compile' 120 number = 1 121 122 def run(self, tests): 123 targets = list([test.full_path() for test in tests]) 124 scons_args = list(self.args) + targets 125 scons(*scons_args) 126 127class RunPhase(TestPhaseBase): 128 name = 'execute' 129 number = 2 130 131 def run(self, tests): 132 parser = argparse.ArgumentParser() 133 parser.add_argument('--timeout', type=int, metavar='SECONDS', 134 help='Time limit for each run in seconds.', 135 default=0) 136 parser.add_argument('-j', type=int, default=1, 137 help='How many tests to run in parallel.') 138 args = parser.parse_args(self.args) 139 140 timeout_cmd = [ 141 'timeout', 142 '--kill-after', str(args.timeout * 2), 143 str(args.timeout) 144 ] 145 def run_test(test): 146 cmd = [] 147 if args.timeout: 148 cmd.extend(timeout_cmd) 149 cmd.extend([ 150 test.full_path(), 151 '-red', test.m5out_dir(), 152 '--listener-mode=off', 153 '--quiet', 154 config_path 155 ]) 156 # Ensure the output directory exists. 157 if not os.path.exists(test.m5out_dir()): 158 os.makedirs(test.m5out_dir()) 159 try: 160 subprocess.check_call(cmd) 161 except subprocess.CalledProcessError, error: 162 returncode = error.returncode 163 else: 164 returncode = 0 165 with open(test.returncode_file(), 'w') as rc: 166 rc.write('%d\n' % returncode) 167 168 runnable = filter(lambda t: not t.compile_only, tests) 169 if args.j == 1: 170 map(run_test, runnable) 171 else: 172 tp = multiprocessing.pool.ThreadPool(args.j) 173 map(lambda t: tp.apply_async(run_test, (t,)), runnable) 174 tp.close() 175 tp.join() 176 177class Checker(object): 178 def __init__(self, ref, test, tag): 179 self.ref = ref 180 self.test = test 181 self.tag = tag 182 183 def check(self): 184 with open(self.text) as test_f, open(self.ref) as ref_f: 185 return test_f.read() == ref_f.read() 186 187class LogChecker(Checker): 188 def merge_filts(*filts): 189 filts = map(lambda f: '(' + f + ')', filts) 190 filts = '|'.join(filts) 191 return re.compile(filts, flags=re.MULTILINE) 192 193 ref_filt = merge_filts( 194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n', 195 r'^SystemC Simulation\n', 196 r'^\nWarning: .*\nIn file: .*\n' 197 ) 198 test_filt = merge_filts( 199 r'^Global frequency set at \d* ticks per second\n' 200 ) 201 202 def __init__(self, ref, test, tag, out_dir): 203 super(LogChecker, self).__init__(ref, test, tag) 204 self.out_dir = out_dir 205 206 def apply_filters(self, data, filts): 207 re.sub(filt, '', data) 208 209 def check(self): 210 test_file = os.path.basename(self.test) 211 ref_file = os.path.basename(self.ref) 212 with open(self.test) as test_f, open(self.ref) as ref_f: 213 test = re.sub(self.test_filt, '', test_f.read()) 214 ref = re.sub(self.ref_filt, '', ref_f.read()) 215 diff_file = '.'.join([ref_file, 'diff']) 216 diff_path = os.path.join(self.out_dir, diff_file) 217 if test != ref: 218 with open(diff_path, 'w') as diff_f: 219 for line in difflib.unified_diff( 220 ref.splitlines(True), test.splitlines(True), 221 fromfile=ref_file, 222 tofile=test_file): 223 diff_f.write(line) 224 return False 225 else: 226 if os.path.exists(diff_path): 227 os.unlink(diff_path) 228 return True 229 230class VerifyPhase(TestPhaseBase): 231 name = 'verify' 232 number = 3 233 234 def reset_status(self): 235 self._passed = [] 236 self._failed = {} 237 238 def passed(self, test): 239 self._passed.append(test) 240 241 def failed(self, test, cause, note=''): 242 test.set_prop('note', note) 243 self._failed.setdefault(cause, []).append(test) 244 245 def print_status(self): 246 total_passed = len(self._passed) 247 total_failed = sum(map(len, self._failed.values())) 248 print() 249 print('Passed: {passed:4} - Failed: {failed:4}'.format( 250 passed=total_passed, failed=total_failed)) 251 252 def write_result_file(self, path): 253 results = { 254 'passed': map(lambda t: t.props, self._passed), 255 'failed': { 256 cause: map(lambda t: t.props, tests) for 257 cause, tests in self._failed.iteritems() 258 } 259 } 260 with open(path, 'w') as rf: 261 json.dump(results, rf) 262 263 def print_results(self): 264 print() 265 print('Passed:') 266 for path in sorted(list([ t.path for t in self._passed ])): 267 print(' ', path) 268 269 print() 270 print('Failed:') 271 272 causes = [] 273 for cause, tests in sorted(self._failed.items()): 274 block = ' ' + cause.capitalize() + ':\n' 275 for test in sorted(tests, key=lambda t: t.path): 276 block += ' ' + test.path 277 if test.note: 278 block += ' - ' + test.note 279 block += '\n' 280 causes.append(block) 281 282 print('\n'.join(causes)) 283 284 def run(self, tests): 285 parser = argparse.ArgumentParser() 286 result_opts = parser.add_mutually_exclusive_group() 287 result_opts.add_argument('--result-file', action='store_true', 288 help='Create a results.json file in the current directory.') 289 result_opts.add_argument('--result-file-at', metavar='PATH', 290 help='Create a results json file at the given path.') 291 parser.add_argument('--print-results', action='store_true', 292 help='Print a list of tests that passed or failed') 293 args = parser.parse_args(self.args) 294 295 self.reset_status() 296 297 runnable = filter(lambda t: not t.compile_only, tests) 298 compile_only = filter(lambda t: t.compile_only, tests) 299 300 for test in compile_only: 301 if os.path.exists(test.full_path()): 302 self.passed(test) 303 else: 304 self.failed(test, 'compile failed') 305 306 for test in runnable: 307 with open(test.returncode_file()) as rc: 308 returncode = int(rc.read()) 309 310 if returncode == 124: 311 self.failed(test, 'time out') 312 continue 313 elif returncode != 0: 314 self.failed(test, 'abort') 315 continue 316 317 out_dir = test.m5out_dir() 318 319 Diff = collections.namedtuple( 320 'Diff', 'ref, test, tag, ref_filter') 321 322 diffs = [] 323 324 log_file = '.'.join([test.name, 'log']) 325 log_path = os.path.join(test.golden_dir(), log_file) 326 simout_path = os.path.join(out_dir, 'simout') 327 if not os.path.exists(simout_path): 328 self.failed(test, 'no log output') 329 if os.path.exists(log_path): 330 diffs.append(LogChecker( 331 log_path, simout_path, log_file, out_dir)) 332 333 failed_diffs = filter(lambda d: not d.check(), diffs) 334 if failed_diffs: 335 tags = map(lambda d: d.tag, failed_diffs) 336 self.failed(test, 'failed diffs', ' '.join(tags)) 337 continue 338 339 self.passed(test) 340 341 if args.print_results: 342 self.print_results() 343 344 self.print_status() 345 346 result_path = None 347 if args.result_file: 348 result_path = os.path.join(os.getcwd(), 'results.json') 349 elif args.result_file_at: 350 result_path = args.result_file_at 351 352 if result_path: 353 self.write_result_file(result_path) 354 355 356parser = argparse.ArgumentParser(description='SystemC test utility') 357 358parser.add_argument('build_dir', metavar='BUILD_DIR', 359 help='The build directory (ie. build/ARM).') 360 361parser.add_argument('--update-json', action='store_true', 362 help='Update the json manifest of tests.') 363 364parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], 365 default='opt', 366 help='Flavor of binary to test.') 367 368parser.add_argument('--list', action='store_true', 369 help='List the available tests') 370 371filter_opts = parser.add_mutually_exclusive_group() 372filter_opts.add_argument('--filter', default='True', 373 help='Python expression which filters tests based ' 374 'on their properties') 375filter_opts.add_argument('--filter-file', default=None, 376 type=argparse.FileType('r'), 377 help='Same as --filter, but read from a file') 378 379def collect_phases(args): 380 phase_groups = [list(g) for k, g in 381 itertools.groupby(args, lambda x: x != '--phase') if k] 382 main_args = parser.parse_args(phase_groups[0][1:]) 383 phases = [] 384 names = [] 385 for group in phase_groups[1:]: 386 name = group[0] 387 if name in names: 388 raise RuntimeException('Phase %s specified more than once' % name) 389 phase = test_phase_classes[name] 390 phases.append(phase(main_args, *group[1:])) 391 phases.sort() 392 return main_args, phases 393 394main_args, phases = collect_phases(sys.argv) 395 396if len(phases) == 0: 397 phases = [ 398 CompilePhase(main_args), 399 RunPhase(main_args), 400 VerifyPhase(main_args) 401 ] 402 403 404 405json_path = os.path.join(main_args.build_dir, json_rel_path) 406 407if main_args.update_json: 408 scons(os.path.join(json_path)) 409 410with open(json_path) as f: 411 test_data = json.load(f) 412 413 if main_args.filter_file: 414 f = main_args.filter_file 415 filt = compile(f.read(), f.name, 'eval') 416 else: 417 filt = compile(main_args.filter, '<string>', 'eval') 418 419 filtered_tests = { 420 target: props for (target, props) in 421 test_data.iteritems() if eval(filt, dict(props)) 422 } 423 424 if main_args.list: 425 for target, props in sorted(filtered_tests.iteritems()): 426 print('%s.%s' % (target, main_args.flavor)) 427 for key, val in props.iteritems(): 428 print(' %s: %s' % (key, val)) 429 print('Total tests: %d' % len(filtered_tests)) 430 else: 431 tests_to_run = list([ 432 Test(target, main_args.flavor, main_args.build_dir, props) for 433 target, props in sorted(filtered_tests.iteritems()) 434 ]) 435 436 for phase in phases: 437 phase.run(tests_to_run) 438