verify.py revision 13002
1#!/usr/bin/env python2 2# 3# Copyright 2018 Google, Inc. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Gabe Black 29 30from __future__ import print_function 31 32import argparse 33import functools 34import inspect 35import itertools 36import json 37import multiprocessing.pool 38import os 39import subprocess 40import sys 41 42script_path = os.path.abspath(inspect.getfile(inspect.currentframe())) 43script_dir = os.path.dirname(script_path) 44config_path = os.path.join(script_dir, 'config.py') 45 46systemc_rel_path = 'systemc' 47tests_rel_path = os.path.join(systemc_rel_path, 'tests') 48json_rel_path = os.path.join(tests_rel_path, 'tests.json') 49 50 51 52def scons(*args): 53 args = ['scons'] + list(args) 54 subprocess.check_call(args) 55 56 57 58class Test(object): 59 def __init__(self, target, suffix, build_dir, props): 60 self.target = target 61 self.suffix = suffix 62 self.build_dir = build_dir 63 64 for key, val in props.iteritems(): 65 setattr(self, key, val) 66 67 def dir(self): 68 return os.path.join(self.build_dir, tests_rel_path, self.path) 69 70 def src_dir(self): 71 return os.path.join(script_dir, self.path) 72 73 def golden_dir(self): 74 return os.path.join(self.src_dir(), 'golden') 75 76 def bin(self): 77 return '.'.join([self.name, self.suffix]) 78 79 def full_path(self): 80 return os.path.join(self.dir(), self.bin()) 81 82 def m5out_dir(self): 83 return os.path.join(self.dir(), 'm5out.' + self.suffix) 84 85 def returncode_file(self): 86 return os.path.join(self.m5out_dir(), 'returncode') 87 88 89 90test_phase_classes = {} 91 92class TestPhaseMeta(type): 93 def __init__(cls, name, bases, d): 94 if not d.pop('abstract', False): 95 test_phase_classes[d['name']] = cls 96 97 super(TestPhaseMeta, cls).__init__(name, bases, d) 98 99class TestPhaseBase(object): 100 __metaclass__ = TestPhaseMeta 101 abstract = True 102 103 def __init__(self, main_args, *args): 104 self.main_args = main_args 105 self.args = args 106 107 def __lt__(self, other): 108 return self.number < other.number 109 110class CompilePhase(TestPhaseBase): 111 name = 'compile' 112 number = 1 113 114 def run(self, tests): 115 targets = list([test.full_path() for test in tests]) 116 scons_args = list(self.args) + targets 117 scons(*scons_args) 118 119class RunPhase(TestPhaseBase): 120 name = 'execute' 121 number = 2 122 123 def run(self, tests): 124 parser = argparse.ArgumentParser() 125 parser.add_argument('--timeout', type=int, metavar='SECONDS', 126 help='Time limit for each run in seconds.', 127 default=0) 128 parser.add_argument('-j', type=int, default=1, 129 help='How many tests to run in parallel.') 130 args = parser.parse_args(self.args) 131 132 timeout_cmd = [ 133 'timeout', 134 '--kill-after', str(args.timeout * 2), 135 str(args.timeout) 136 ] 137 def run_test(test): 138 cmd = [] 139 if args.timeout: 140 cmd.extend(timeout_cmd) 141 cmd.extend([ 142 test.full_path(), 143 '-red', test.m5out_dir(), 144 '--listener-mode=off', 145 config_path 146 ]) 147 # Ensure the output directory exists. 148 if not os.path.exists(test.m5out_dir()): 149 os.makedirs(test.m5out_dir()) 150 try: 151 subprocess.check_call(cmd) 152 except subprocess.CalledProcessError, error: 153 returncode = error.returncode 154 else: 155 returncode = 0 156 with open(test.returncode_file(), 'w') as rc: 157 rc.write('%d\n' % returncode) 158 159 runnable = filter(lambda t: not t.compile_only, tests) 160 if args.j == 1: 161 map(run_test, runnable) 162 else: 163 tp = multiprocessing.pool.ThreadPool(args.j) 164 map(lambda t: tp.apply_async(run_test, (t,)), runnable) 165 tp.close() 166 tp.join() 167 168class VerifyPhase(TestPhaseBase): 169 name = 'verify' 170 number = 3 171 172 def reset_status(self): 173 self._passed = [] 174 self._failed = {} 175 176 def passed(self, test): 177 self._passed.append(test) 178 179 def failed(self, test, cause): 180 self._failed.setdefault(cause, []).append(test) 181 182 def print_status(self): 183 total_passed = len(self._passed) 184 total_failed = sum(map(len, self._failed.values())) 185 print() 186 print('Passed: {passed:4} - Failed: {failed:4}'.format( 187 passed=total_passed, failed=total_failed)) 188 189 def write_result_file(self, path): 190 passed = map(lambda t: t.path, self._passed) 191 passed.sort() 192 failed = { 193 cause: map(lambda t: t.path, tests) for 194 cause, tests in self._failed.iteritems() 195 } 196 for tests in failed.values(): 197 tests.sort() 198 results = { 'passed': passed, 'failed': failed } 199 with open(path, 'w') as rf: 200 json.dump(results, rf) 201 202 def print_results(self): 203 passed = map(lambda t: t.path, self._passed) 204 passed.sort() 205 failed = { 206 cause: map(lambda t: t.path, tests) for 207 cause, tests in self._failed.iteritems() 208 } 209 for tests in failed.values(): 210 tests.sort() 211 212 print() 213 print('Passed:') 214 map(lambda t: print(' ', t), passed) 215 216 print() 217 print('Failed:') 218 categories = failed.items() 219 categories.sort() 220 221 def cat_str((cause, tests)): 222 heading = ' ' + cause.capitalize() + ':\n' 223 test_lines = [' ' + test + '\n'for test in tests] 224 return heading + ''.join(test_lines) 225 blocks = map(cat_str, categories) 226 227 print('\n'.join(blocks)) 228 229 def run(self, tests): 230 parser = argparse.ArgumentParser() 231 result_opts = parser.add_mutually_exclusive_group() 232 result_opts.add_argument('--result-file', action='store_true', 233 help='Create a results.json file in the current directory.') 234 result_opts.add_argument('--result-file-at', metavar='PATH', 235 help='Create a results json file at the given path.') 236 parser.add_argument('--print-results', action='store_true', 237 help='Print a list of tests that passed or failed') 238 args = parser.parse_args(self.args) 239 240 self.reset_status() 241 242 runnable = filter(lambda t: not t.compile_only, tests) 243 compile_only = filter(lambda t: t.compile_only, tests) 244 245 for test in compile_only: 246 if os.path.exists(test.full_path()): 247 self.passed(test) 248 else: 249 self.failed(test, 'compile failed') 250 251 for test in runnable: 252 with open(test.returncode_file()) as rc: 253 returncode = int(rc.read()) 254 255 if returncode == 0: 256 self.passed(test) 257 elif returncode == 124: 258 self.failed(test, 'time out') 259 else: 260 self.failed(test, 'abort') 261 262 if args.print_results: 263 self.print_results() 264 265 self.print_status() 266 267 result_path = None 268 if args.result_file: 269 result_path = os.path.join(os.getcwd(), 'results.json') 270 elif args.result_file_at: 271 result_path = args.result_file_at 272 273 if result_path: 274 self.write_result_file(result_path) 275 276 277parser = argparse.ArgumentParser(description='SystemC test utility') 278 279parser.add_argument('build_dir', metavar='BUILD_DIR', 280 help='The build directory (ie. build/ARM).') 281 282parser.add_argument('--update-json', action='store_true', 283 help='Update the json manifest of tests.') 284 285parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'], 286 default='opt', 287 help='Flavor of binary to test.') 288 289parser.add_argument('--list', action='store_true', 290 help='List the available tests') 291 292filter_opts = parser.add_mutually_exclusive_group() 293filter_opts.add_argument('--filter', default='True', 294 help='Python expression which filters tests based ' 295 'on their properties') 296filter_opts.add_argument('--filter-file', default=None, 297 type=argparse.FileType('r'), 298 help='Same as --filter, but read from a file') 299 300def collect_phases(args): 301 phase_groups = [list(g) for k, g in 302 itertools.groupby(args, lambda x: x != '--phase') if k] 303 main_args = parser.parse_args(phase_groups[0][1:]) 304 phases = [] 305 names = [] 306 for group in phase_groups[1:]: 307 name = group[0] 308 if name in names: 309 raise RuntimeException('Phase %s specified more than once' % name) 310 phase = test_phase_classes[name] 311 phases.append(phase(main_args, *group[1:])) 312 phases.sort() 313 return main_args, phases 314 315main_args, phases = collect_phases(sys.argv) 316 317if len(phases) == 0: 318 phases = [ 319 CompilePhase(main_args), 320 RunPhase(main_args), 321 VerifyPhase(main_args) 322 ] 323 324 325 326json_path = os.path.join(main_args.build_dir, json_rel_path) 327 328if main_args.update_json: 329 scons(os.path.join(json_path)) 330 331with open(json_path) as f: 332 test_data = json.load(f) 333 334 if main_args.filter_file: 335 f = main_args.filter_file 336 filt = compile(f.read(), f.name, 'eval') 337 else: 338 filt = compile(main_args.filter, '<string>', 'eval') 339 340 filtered_tests = { 341 target: props for (target, props) in 342 test_data.iteritems() if eval(filt, dict(props)) 343 } 344 345 if main_args.list: 346 for target, props in sorted(filtered_tests.iteritems()): 347 print('%s.%s' % (target, main_args.flavor)) 348 for key, val in props.iteritems(): 349 print(' %s: %s' % (key, val)) 350 print('Total tests: %d' % len(filtered_tests)) 351 else: 352 tests_to_run = list([ 353 Test(target, main_args.flavor, main_args.build_dir, props) for 354 target, props in sorted(filtered_tests.iteritems()) 355 ]) 356 357 for phase in phases: 358 phase.run(tests_to_run) 359