verify.py revision 13010:09b975489550
1#!/usr/bin/env python2
2#
3# Copyright 2018 Google, Inc.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Gabe Black
29
30from __future__ import print_function
31
32import argparse
33import collections
34import difflib
35import functools
36import inspect
37import itertools
38import json
39import multiprocessing.pool
40import os
41import re
42import subprocess
43import sys
44
45script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46script_dir = os.path.dirname(script_path)
47config_path = os.path.join(script_dir, 'config.py')
48
49systemc_rel_path = 'systemc'
50tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55def scons(*args):
56    args = ['scons'] + list(args)
57    subprocess.check_call(args)
58
59
60
61class Test(object):
62    def __init__(self, target, suffix, build_dir, props):
63        self.target = target
64        self.suffix = suffix
65        self.build_dir = build_dir
66        self.props = {}
67
68        for key, val in props.iteritems():
69            self.set_prop(key, val)
70
71    def set_prop(self, key, val):
72        setattr(self, key, val)
73        self.props[key] = val
74
75    def dir(self):
76        return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78    def src_dir(self):
79        return os.path.join(script_dir, self.path)
80
81    def golden_dir(self):
82        return os.path.join(self.src_dir(), 'golden')
83
84    def bin(self):
85        return '.'.join([self.name, self.suffix])
86
87    def full_path(self):
88        return os.path.join(self.dir(), self.bin())
89
90    def m5out_dir(self):
91        return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93    def returncode_file(self):
94        return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98test_phase_classes = {}
99
100class TestPhaseMeta(type):
101    def __init__(cls, name, bases, d):
102        if not d.pop('abstract', False):
103            test_phase_classes[d['name']] = cls
104
105        super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107class TestPhaseBase(object):
108    __metaclass__ = TestPhaseMeta
109    abstract = True
110
111    def __init__(self, main_args, *args):
112        self.main_args = main_args
113        self.args = args
114
115    def __lt__(self, other):
116        return self.number < other.number
117
118class CompilePhase(TestPhaseBase):
119    name = 'compile'
120    number = 1
121
122    def run(self, tests):
123        targets = list([test.full_path() for test in tests])
124        scons_args = list(self.args) + targets
125        scons(*scons_args)
126
127class RunPhase(TestPhaseBase):
128    name = 'execute'
129    number = 2
130
131    def run(self, tests):
132        parser = argparse.ArgumentParser()
133        parser.add_argument('--timeout', type=int, metavar='SECONDS',
134                            help='Time limit for each run in seconds.',
135                            default=0)
136        parser.add_argument('-j', type=int, default=1,
137                help='How many tests to run in parallel.')
138        args = parser.parse_args(self.args)
139
140        timeout_cmd = [
141            'timeout',
142            '--kill-after', str(args.timeout * 2),
143            str(args.timeout)
144        ]
145        def run_test(test):
146            cmd = []
147            if args.timeout:
148                cmd.extend(timeout_cmd)
149            cmd.extend([
150                test.full_path(),
151                '-red', test.m5out_dir(),
152                '--listener-mode=off',
153                '--quiet',
154                config_path
155            ])
156            # Ensure the output directory exists.
157            if not os.path.exists(test.m5out_dir()):
158                os.makedirs(test.m5out_dir())
159            try:
160                subprocess.check_call(cmd)
161            except subprocess.CalledProcessError, error:
162                returncode = error.returncode
163            else:
164                returncode = 0
165            with open(test.returncode_file(), 'w') as rc:
166                rc.write('%d\n' % returncode)
167
168        runnable = filter(lambda t: not t.compile_only, tests)
169        if args.j == 1:
170            map(run_test, runnable)
171        else:
172            tp = multiprocessing.pool.ThreadPool(args.j)
173            map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174            tp.close()
175            tp.join()
176
177class Checker(object):
178    def __init__(self, ref, test, tag):
179        self.ref = ref
180        self.test = test
181        self.tag = tag
182
183    def check(self):
184        with open(self.text) as test_f, open(self.ref) as ref_f:
185            return test_f.read() == ref_f.read()
186
187class LogChecker(Checker):
188    def merge_filts(*filts):
189        filts = map(lambda f: '(' + f + ')', filts)
190        filts = '|'.join(filts)
191        return re.compile(filts, flags=re.MULTILINE)
192
193    def warning_filt(num):
194        return (r'^\nWarning: \(W{}\) .*\n(In file: .*\n)?'
195                r'(In process: [\w.]* @ .*\n)?').format(num)
196
197    ref_filt = merge_filts(
198        r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
199        r'^SystemC Simulation\n',
200        warning_filt(571),
201        warning_filt(540)
202    )
203    test_filt = merge_filts(
204        r'^Global frequency set at \d* ticks per second\n'
205    )
206
207    def __init__(self, ref, test, tag, out_dir):
208        super(LogChecker, self).__init__(ref, test, tag)
209        self.out_dir = out_dir
210
211    def apply_filters(self, data, filts):
212        re.sub(filt, '', data)
213
214    def check(self):
215        test_file = os.path.basename(self.test)
216        ref_file = os.path.basename(self.ref)
217        with open(self.test) as test_f, open(self.ref) as ref_f:
218            test = re.sub(self.test_filt, '', test_f.read())
219            ref = re.sub(self.ref_filt, '', ref_f.read())
220            diff_file = '.'.join([ref_file, 'diff'])
221            diff_path = os.path.join(self.out_dir, diff_file)
222            if test != ref:
223                with open(diff_path, 'w') as diff_f:
224                    for line in difflib.unified_diff(
225                            ref.splitlines(True), test.splitlines(True),
226                            fromfile=ref_file,
227                            tofile=test_file):
228                        diff_f.write(line)
229                return False
230            else:
231                if os.path.exists(diff_path):
232                    os.unlink(diff_path)
233        return True
234
235class GoldenDir(object):
236    def __init__(self, path, platform):
237        self.path = path
238        self.platform = platform
239
240        contents = os.listdir(path)
241        suffix = '.' + platform
242        suffixed = filter(lambda c: c.endswith(suffix), contents)
243        bases = map(lambda t: t[:-len(platform)], suffixed)
244        common = filter(lambda t: not t.startswith(tuple(bases)), contents)
245
246        self.entries = {}
247        class Entry(object):
248            def __init__(self, e_path):
249                self.used = False
250                self.path = os.path.join(path, e_path)
251
252            def use(self):
253                self.used = True
254
255        for entry in contents:
256            self.entries[entry] = Entry(entry)
257
258    def entry(self, name):
259        def match(n):
260            return (n == name) or n.startswith(name + '.')
261        matches = { n: e for n, e in self.entries.items() if match(n) }
262
263        for match in matches.values():
264            match.use()
265
266        platform_name = '.'.join([ name, self.platform ])
267        if platform_name in matches:
268            return matches[platform_name].path
269        if name in matches:
270            return matches[name].path
271        else:
272            return None
273
274    def unused(self):
275        items = self.entries.items()
276        items = filter(lambda i: not i[1].used, items)
277
278        items.sort()
279        sources = []
280        i = 0
281        while i < len(items):
282            root = items[i][0]
283            sources.append(root)
284            i += 1
285            while i < len(items) and items[i][0].startswith(root):
286                i += 1
287        return sources
288
289class VerifyPhase(TestPhaseBase):
290    name = 'verify'
291    number = 3
292
293    def reset_status(self):
294        self._passed = []
295        self._failed = {}
296
297    def passed(self, test):
298        self._passed.append(test)
299
300    def failed(self, test, cause, note=''):
301        test.set_prop('note', note)
302        self._failed.setdefault(cause, []).append(test)
303
304    def print_status(self):
305        total_passed = len(self._passed)
306        total_failed = sum(map(len, self._failed.values()))
307        print()
308        print('Passed: {passed:4} - Failed: {failed:4}'.format(
309                  passed=total_passed, failed=total_failed))
310
311    def write_result_file(self, path):
312        results = {
313            'passed': map(lambda t: t.props, self._passed),
314            'failed': {
315                cause: map(lambda t: t.props, tests) for
316                       cause, tests in self._failed.iteritems()
317            }
318        }
319        with open(path, 'w') as rf:
320            json.dump(results, rf)
321
322    def print_results(self):
323        print()
324        print('Passed:')
325        for path in sorted(list([ t.path for t in self._passed ])):
326            print('    ', path)
327
328        print()
329        print('Failed:')
330
331        causes = []
332        for cause, tests in sorted(self._failed.items()):
333            block = '  ' + cause.capitalize() + ':\n'
334            for test in sorted(tests, key=lambda t: t.path):
335                block += '    ' + test.path
336                if test.note:
337                    block += ' - ' + test.note
338                block += '\n'
339            causes.append(block)
340
341        print('\n'.join(causes))
342
343    def run(self, tests):
344        parser = argparse.ArgumentParser()
345        result_opts = parser.add_mutually_exclusive_group()
346        result_opts.add_argument('--result-file', action='store_true',
347                help='Create a results.json file in the current directory.')
348        result_opts.add_argument('--result-file-at', metavar='PATH',
349                help='Create a results json file at the given path.')
350        parser.add_argument('--print-results', action='store_true',
351                help='Print a list of tests that passed or failed')
352        args = parser.parse_args(self.args)
353
354        self.reset_status()
355
356        runnable = filter(lambda t: not t.compile_only, tests)
357        compile_only = filter(lambda t: t.compile_only, tests)
358
359        for test in compile_only:
360            if os.path.exists(test.full_path()):
361                self.passed(test)
362            else:
363                self.failed(test, 'compile failed')
364
365        for test in runnable:
366            with open(test.returncode_file()) as rc:
367                returncode = int(rc.read())
368
369            if returncode == 124:
370                self.failed(test, 'time out')
371                continue
372            elif returncode != 0:
373                self.failed(test, 'abort')
374                continue
375
376            out_dir = test.m5out_dir()
377
378            Diff = collections.namedtuple(
379                    'Diff', 'ref, test, tag, ref_filter')
380
381            diffs = []
382
383            gd = GoldenDir(test.golden_dir(), 'linux64')
384
385            missing = []
386            log_file = '.'.join([test.name, 'log'])
387            log_path = gd.entry(log_file)
388            simout_path = os.path.join(out_dir, 'simout')
389            if not os.path.exists(simout_path):
390                missing.append('log output')
391            elif log_path:
392                diffs.append(LogChecker(log_path, simout_path,
393                                        log_file, out_dir))
394
395            for name in gd.unused():
396                test_path = os.path.join(out_dir, name)
397                ref_path = gd.entry(name)
398                if not os.path.exists(test_path):
399                    missing.append(name)
400                else:
401                    diffs.append(Checker(ref_path, test_path, name))
402
403            if missing:
404                self.failed(test, 'missing output', ' '.join(missing))
405                continue
406
407            failed_diffs = filter(lambda d: not d.check(), diffs)
408            if failed_diffs:
409                tags = map(lambda d: d.tag, failed_diffs)
410                self.failed(test, 'failed diffs', ' '.join(tags))
411                continue
412
413            self.passed(test)
414
415        if args.print_results:
416            self.print_results()
417
418        self.print_status()
419
420        result_path = None
421        if args.result_file:
422            result_path = os.path.join(os.getcwd(), 'results.json')
423        elif args.result_file_at:
424            result_path = args.result_file_at
425
426        if result_path:
427            self.write_result_file(result_path)
428
429
430parser = argparse.ArgumentParser(description='SystemC test utility')
431
432parser.add_argument('build_dir', metavar='BUILD_DIR',
433                    help='The build directory (ie. build/ARM).')
434
435parser.add_argument('--update-json', action='store_true',
436                    help='Update the json manifest of tests.')
437
438parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
439                    default='opt',
440                    help='Flavor of binary to test.')
441
442parser.add_argument('--list', action='store_true',
443                    help='List the available tests')
444
445filter_opts = parser.add_mutually_exclusive_group()
446filter_opts.add_argument('--filter', default='True',
447                         help='Python expression which filters tests based '
448                         'on their properties')
449filter_opts.add_argument('--filter-file', default=None,
450                         type=argparse.FileType('r'),
451                         help='Same as --filter, but read from a file')
452
453def collect_phases(args):
454    phase_groups = [list(g) for k, g in
455                    itertools.groupby(args, lambda x: x != '--phase') if k]
456    main_args = parser.parse_args(phase_groups[0][1:])
457    phases = []
458    names = []
459    for group in phase_groups[1:]:
460        name = group[0]
461        if name in names:
462            raise RuntimeException('Phase %s specified more than once' % name)
463        phase = test_phase_classes[name]
464        phases.append(phase(main_args, *group[1:]))
465    phases.sort()
466    return main_args, phases
467
468main_args, phases = collect_phases(sys.argv)
469
470if len(phases) == 0:
471    phases = [
472        CompilePhase(main_args),
473        RunPhase(main_args),
474        VerifyPhase(main_args)
475    ]
476
477
478
479json_path = os.path.join(main_args.build_dir, json_rel_path)
480
481if main_args.update_json:
482    scons(os.path.join(json_path))
483
484with open(json_path) as f:
485    test_data = json.load(f)
486
487    if main_args.filter_file:
488        f = main_args.filter_file
489        filt = compile(f.read(), f.name, 'eval')
490    else:
491        filt = compile(main_args.filter, '<string>', 'eval')
492
493    filtered_tests = {
494        target: props for (target, props) in
495                    test_data.iteritems() if eval(filt, dict(props))
496    }
497
498    if main_args.list:
499        for target, props in sorted(filtered_tests.iteritems()):
500            print('%s.%s' % (target, main_args.flavor))
501            for key, val in props.iteritems():
502                print('    %s: %s' % (key, val))
503        print('Total tests: %d' % len(filtered_tests))
504    else:
505        tests_to_run = list([
506            Test(target, main_args.flavor, main_args.build_dir, props) for
507                target, props in sorted(filtered_tests.iteritems())
508        ])
509
510        for phase in phases:
511            phase.run(tests_to_run)
512