1# -*- mode:python -*-
|
2
|
2# 3# Copyright (c) 2016 ARM Limited 4# All rights reserved 5# 6# The license below extends only to copyright in the software and shall 7# not be construed as granting a license to any other intellectual 8# property including but not limited to intellectual property relating 9# to a hardware implementation of the functionality of the software 10# licensed hereunder. You may use the software subject to the license 11# terms below provided that you ensure that this notice is replicated 12# unmodified and in its entirety in all distributions of the software, 13# modified or unmodified, in source code or in binary form. 14# |
15# Copyright (c) 2004-2006 The Regents of The University of Michigan 16# All rights reserved. 17# 18# Redistribution and use in source and binary forms, with or without 19# modification, are permitted provided that the following conditions are 20# met: redistributions of source code must retain the above copyright 21# notice, this list of conditions and the following disclaimer; 22# redistributions in binary form must reproduce the above copyright
--- 12 unchanged lines hidden (view full) ---
35# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40# 41# Authors: Steve Reinhardt 42# Kevin Lim
|
43# Andreas Sandberg |
44
|
32import os, signal
33import sys, time
34import glob
|
45from SCons.Script.SConscript import SConsEnvironment
|
46import os 47import pickle 48import sys |
49
|
50sys.path.insert(0, Dir(".").srcnode().abspath) 51import testing.tests as tests 52import testing.results as results 53 |
54Import('env') 55
|
39env['DIFFOUT'] = File('diff-out')
40
|
56# get the termcap from the environment 57termcap = env['TERMCAP'] 58 59# Dict that accumulates lists of tests by category (quick, medium, long) 60env.Tests = {}
|
61gpu_isa = env['TARGET_GPU_ISA'] if env['BUILD_GPU'] else None 62for cat in tests.all_categories: 63 env.Tests[cat] = tuple( 64 tests.get_tests(env["TARGET_ISA"], 65 categories=(cat, ), 66 ruby_protocol=env["PROTOCOL"], 67 gpu_isa=gpu_isa)) |
68
|
47def contents(node):
48 return file(str(node)).read()
|
69def color_message(color, msg): 70 return color + msg + termcap.Normal |
71
|
50# functions to parse return value from scons Execute()... not the same
51# as wait() etc., so python built-in os funcs don't work.
52def signaled(status):
53 return (status & 0x80) != 0;
54
55def signum(status):
56 return (status & 0x7f);
57
58# List of signals that indicate that we should retry the test rather
59# than consider it failed.
60retry_signals = (signal.SIGTERM, signal.SIGKILL, signal.SIGINT,
61 signal.SIGQUIT, signal.SIGHUP)
62
63# regular expressions of lines to ignore when diffing outputs
64output_ignore_regexes = (
65 '^command line:', # for stdout file
66 '^gem5 compiled ', # for stderr file
67 '^gem5 started ', # for stderr file
68 '^gem5 executing on ', # for stderr file
69 '^Simulation complete at', # for stderr file
70 '^Listening for', # for stderr file
71 'listening for remote gdb', # for stderr file
72 )
73
74output_ignore_args = ' '.join(["-I '"+s+"'" for s in output_ignore_regexes])
75
76output_ignore_args += ' --exclude=stats.txt --exclude=outdiff'
77
|
72def run_test(target, source, env):
|
79 """Check output from running test.
|
73 """Run a test and produce results as a pickle file. |
74 75 Targets are as follows:
|
82 target[0] : status
|
76 target[0] : Pickle file |
77 78 Sources are: 79 source[0] : gem5 binary 80 source[1] : tests/run.py script
|
87 source[2] : reference stats file
|
81 source[2:] : reference files |
82 83 """
|
90 # make sure target files are all gone
91 for t in target:
92 if os.path.exists(t.abspath):
93 env.Execute(Delete(t.abspath))
94
|
84 tgt_dir = os.path.dirname(str(target[0]))
|
85 config = tests.ClassicConfig(*tgt_dir.split('/')[-6:]) 86 test = tests.ClassicTest(source[0].abspath, tgt_dir, config, 87 timeout=5*60*60, 88 skip_diff_out=True) |
89
|
97 # Base command for running test. We mess around with indirectly
98 # referring to files via SOURCES and TARGETS so that scons can mess
99 # with paths all it wants to and we still get the right files.
100 cmd = '${SOURCES[0]} -d %s -re ${SOURCES[1]} %s' % (tgt_dir, tgt_dir)
|
90 for ref in test.ref_files(): 91 out_file = os.path.join(tgt_dir, ref) 92 if os.path.exists(out_file): 93 env.Execute(Delete(out_file)) |
94
|
102 # Prefix test run with batch job submission command if appropriate.
103 # Batch command also supports timeout arg (in seconds, not minutes).
104 timeout = 15 * 60 # used to be a param, probably should be again
105 if env['BATCH']:
106 cmd = '%s -t %d %s' % (env['BATCH_CMD'], timeout, cmd)
107 # The slowest regression (bzip2) requires ~4 hours;
108 # 5 hours was chosen to be conservative.
109 elif env['TIMEOUT']:
110 cmd = 'timeout --foreground 5h %s' % cmd
|
95 with open(target[0].abspath, "wb") as fout: 96 formatter = results.Pickle(fout=fout) 97 formatter.dump_suites([ test.run() ]) |
98
|
112 # Create a default value for the status string, changed as needed
113 # based on the status.
114 status_str = "passed."
115
116 pre_exec_time = time.time()
117 status = env.Execute(env.subst(cmd, target=target, source=source))
118 if status == 0:
119 # gem5 terminated normally.
120 # Run diff on output & ref directories to find differences.
121 # Exclude the stats file since we will use diff-out on that.
122
123 # NFS file systems can be annoying and not have updated yet
124 # wait until we see the file modified
125 statsdiff = os.path.join(tgt_dir, 'statsdiff')
126 m_time = 0
127 nap = 0
128 while m_time < pre_exec_time and nap < 10:
129 try:
130 m_time = os.stat(statsdiff).st_mtime
131 except OSError:
132 pass
133 time.sleep(1)
134 nap += 1
135
136 outdiff = os.path.join(tgt_dir, 'outdiff')
137 # tack 'true' on the end so scons doesn't report diff's
138 # non-zero exit code as a build error
139 diffcmd = 'diff -ubrs %s ${SOURCES[2].dir} %s > %s; true' \
140 % (output_ignore_args, tgt_dir, outdiff)
141 env.Execute(env.subst(diffcmd, target=target, source=source))
142 print "===== Output differences ====="
143 print contents(outdiff)
144 # Run diff-out on stats.txt file
145 diffcmd = '$DIFFOUT ${SOURCES[2]} %s > %s' \
146 % (os.path.join(tgt_dir, 'stats.txt'), statsdiff)
147 diffcmd = env.subst(diffcmd, target=target, source=source)
148 diff_status = env.Execute(diffcmd, strfunction=None)
149 # If there is a difference, change the status string to say so
150 if diff_status != 0:
151 status_str = "CHANGED!"
152 print "===== Statistics differences ====="
153 print contents(statsdiff)
154
155 else: # gem5 exit status != 0
156 # Consider it a failed test unless the exit status is 2
157 status_str = "FAILED!"
158 # gem5 did not terminate properly, so no need to check the output
159 if env['TIMEOUT'] and status == 124:
160 status_str = "TIMED-OUT!"
161 elif signaled(status):
162 print 'gem5 terminated with signal', signum(status)
163 if signum(status) in retry_signals:
164 # Consider the test incomplete; don't create a 'status' output.
165 # Hand the return status to scons and let scons decide what
166 # to do about it (typically terminate unless run with -k).
167 return status
168 elif status == 2:
169 # The test was skipped, change the status string to say so
170 status_str = "skipped."
171 else:
172 print 'gem5 exited with non-zero status', status
173 # complete but failed execution (call to exit() with non-zero
174 # status, SIGABORT due to assertion failure, etc.)... fall through
175 # and generate FAILED status as if output comparison had failed
176
177 # Generate status file contents based on exit status of gem5 and diff-out
178 f = file(str(target[0]), 'w')
179 print >>f, tgt_dir, status_str
180 f.close()
181 # done
|
99 return 0 100 101def run_test_string(target, source, env): 102 return env.subst("Running test in ${TARGETS[0].dir}.", 103 target=target, source=source) 104 105testAction = env.Action(run_test, run_test_string) 106 107def print_test(target, source, env):
|
191 # print the status with colours to make it easier to see what
192 # passed and what failed
193 line = contents(source[0])
|
108 """Run a test and produce results as a pickle file. |
109
|
195 # split the line to words and get the last one
196 words = line.split()
197 status = words[-1]
|
110 Targets are as follows: 111 target[*] : Dummy targets |
112
|
199 # if the test failed make it red, if it passed make it green, and
200 # skip the punctuation
201 if status == "FAILED!" or status == "TIMED-OUT!":
202 status = termcap.Red + status[:-1] + termcap.Normal + status[-1]
203 elif status == "CHANGED!":
204 status = termcap.Yellow + status[:-1] + termcap.Normal + status[-1]
205 elif status == "passed.":
206 status = termcap.Green + status[:-1] + termcap.Normal + status[-1]
207 elif status == "skipped.":
208 status = termcap.Cyan + status[:-1] + termcap.Normal + status[-1]
|
113 Sources are: 114 source[0] : Pickle file |
115
|
210 # put it back in the list and join with space
211 words[-1] = status
212 line = " ".join(words)
|
116 """ 117 with open(source[0].abspath, "rb") as fin: 118 result = pickle.load(fin) |
119
|
214 print '***** ' + line
215 return 0
|
120 assert len(result) == 1 121 result = result[0] |
122
|
217printAction = env.Action(print_test, strfunction = None)
|
123 run = result.results[0] 124 assert run.name == "gem5" |
125
|
219# Static vars for update_test:
220# - long-winded message about ignored sources
221ignore_msg = '''
222Note: The following file(s) will not be copied. New non-standard
223 output files must be copied manually once before --update-ref will
224 recognize them as outputs. Otherwise they are assumed to be
225 inputs and are ignored.
226'''
227# - reference files always needed
228needed_files = set(['simout', 'simerr', 'stats.txt', 'config.ini'])
229# - source files we always want to ignore
230known_ignores = set(['status', 'outdiff', 'statsdiff'])
|
126 formatter = None 127 if not run: 128 status = color_message(termcap.Red, "FAILED!") 129 formatter = results.Text() 130 elif run.skipped(): 131 status = color_message(termcap.Cyan, "skipped.") 132 elif result: 133 status = color_message(termcap.Green, "passed.") 134 else: 135 status = color_message(termcap.Yellow, "CHANGED!") 136 formatter = results.Text() |
137
|
138 if formatter: 139 formatter.dump_suites([result]) 140 141 print "***** %s: %s" % (source[0].dir, status) 142 return 0 143 144printAction = env.Action(print_test, strfunction=None) 145 |
146def update_test(target, source, env):
|
233 """Update reference test outputs.
|
147 """Update test reference data |
148
|
235 Target is phony. First two sources are the ref & new stats.txt file
236 files, respectively. We actually copy everything in the
237 respective directories except the status & diff output files.
|
149 Targets are as follows: 150 target[0] : Dummy file |
151
|
152 Sources are: 153 source[0] : Pickle file |
154 """
|
240 dest_dir = str(source[0].get_dir())
241 src_dir = str(source[1].get_dir())
242 dest_files = set(os.listdir(dest_dir))
243 src_files = set(os.listdir(src_dir))
244 # Copy all of the required files plus any existing dest files.
245 wanted_files = needed_files | dest_files
246 missing_files = wanted_files - src_files
247 if len(missing_files) > 0:
248 print " WARNING: the following file(s) are missing " \
249 "and will not be updated:"
250 print " ", " ,".join(missing_files)
251 copy_files = wanted_files - missing_files
252 warn_ignored_files = (src_files - copy_files) - known_ignores
253 if len(warn_ignored_files) > 0:
254 print ignore_msg,
255 print " ", ", ".join(warn_ignored_files)
256 for f in copy_files:
257 if f in dest_files:
258 print " Replacing file", f
259 dest_files.remove(f)
260 else:
261 print " Creating new file", f
262 copyAction = Copy(os.path.join(dest_dir, f), os.path.join(src_dir, f))
263 copyAction.strfunction = None
264 env.Execute(copyAction)
|
155 156 src_dir = os.path.dirname(str(source[0])) 157 config = tests.ClassicConfig(*src_dir.split('/')[-6:]) 158 test = tests.ClassicTest(source[0].abspath, src_dir, config) 159 ref_dir = test.ref_dir 160 161 with open(source[0].abspath, "rb") as fin: 162 result = pickle.load(fin) 163 164 assert len(result) == 1 165 result = result[0] 166 167 run = result.results[0] 168 assert run.name == "gem5" 169 170 if run.skipped(): 171 print "*** %s: %s: Test skipped, not updating." % ( 172 source[0].dir, color_message(termcap.Yellow, "WARNING"), ) 173 return 0 174 elif result: 175 print "*** %s: %s: Test successful, not updating." % ( 176 source[0].dir, color_message(termcap.Green, "skipped"), ) 177 return 0 178 elif not run.success(): 179 print "*** %s: %s: Test failed, not updating." % ( 180 source[0].dir, color_message(termcap.Red, "ERROR"), ) 181 return 1 182 183 print "** Updating %s" % (test, ) 184 test.update_ref() 185 |
186 return 0 187 188def update_test_string(target, source, env):
|
268 return env.subst("Updating ${SOURCES[0].dir} from ${SOURCES[1].dir}",
|
189 return env.subst("Updating ${SOURCES[0].dir}", |
190 target=target, source=source) 191 192updateAction = env.Action(update_test, update_test_string) 193
|
273def test_builder(env, ref_dir):
|
194def test_builder(test_tuple): |
195 """Define a test.""" 196
|
276 path = list(ref_dir.split('/'))
|
197 out_dir = "/".join(test_tuple) 198 binary = env.M5Binary.abspath 199 test = tests.ClassicTest(binary, out_dir, test_tuple) |
200
|
278 # target path (where test output goes) consists of category, mode,
279 # name, isa, opsys, and config (skips the 'ref' component)
280 assert(path.pop(-4) == 'ref')
281 tgt_dir = os.path.join(*path[-6:])
|
201 def tgt(name): 202 return os.path.join(out_dir, name) |
203
|
283 # local closure for prepending target path to filename
284 def tgt(f):
285 return os.path.join(tgt_dir, f)
|
204 def ref(name): 205 return os.path.join(test.ref_dir, name) |
206
|
287 ref_stats = os.path.join(ref_dir, 'stats.txt')
288 new_stats = tgt('stats.txt')
289 status_file = tgt('status')
|
207 pickle_file = tgt("status.pickle") 208 targets = [ 209 pickle_file, 210 ] |
211
|
291 env.Command([status_file, new_stats],
292 [env.M5Binary, 'run.py', ref_stats],
293 testAction)
|
212 sources = [ 213 env.M5Binary, 214 "run.py", 215 ] + [ ref(f) for f in test.ref_files() ] |
216
|
217 env.Command(targets, sources, testAction) 218 |
219 # phony target to echo status 220 if GetOption('update_ref'):
|
297 p = env.Command(tgt('_update'),
298 [ref_stats, new_stats, status_file],
299 updateAction)
|
221 p = env.Command(tgt("_update"), [pickle_file], updateAction) |
222 else:
|
301 p = env.Command(tgt('_print'), [status_file], printAction)
|
223 p = env.Command(tgt("_print"), [pickle_file], printAction) |
224 225 env.AlwaysBuild(p) 226
|
227def list_tests(target, source, env): 228 """Create a list of tests |
229
|
306# Figure out applicable configs based on build type
307configs = []
308if env['TARGET_ISA'] == 'alpha':
309 configs += ['tsunami-simple-atomic',
310 'tsunami-simple-timing',
311 'tsunami-simple-atomic-dual',
312 'tsunami-simple-timing-dual',
313 'twosys-tsunami-simple-atomic',
314 'tsunami-o3', 'tsunami-o3-dual',
315 'tsunami-minor', 'tsunami-minor-dual',
316 'tsunami-switcheroo-full']
317if env['TARGET_ISA'] == 'sparc':
318 configs += ['t1000-simple-atomic',
319 't1000-simple-timing']
320if env['TARGET_ISA'] == 'arm':
321 configs += ['simple-atomic-dummychecker',
322 'o3-timing-checker',
323 'realview-simple-atomic',
324 'realview-simple-atomic-dual',
325 'realview-simple-atomic-checkpoint',
326 'realview-simple-timing',
327 'realview-simple-timing-dual',
328 'realview-o3',
329 'realview-o3-checker',
330 'realview-o3-dual',
331 'realview-minor',
332 'realview-minor-dual',
333 'realview-switcheroo-atomic',
334 'realview-switcheroo-timing',
335 'realview-switcheroo-o3',
336 'realview-switcheroo-full',
337 'realview64-simple-atomic',
338 'realview64-simple-atomic-checkpoint',
339 'realview64-simple-atomic-dual',
340 'realview64-simple-timing',
341 'realview64-simple-timing-dual',
342 'realview64-o3',
343 'realview64-o3-checker',
344 'realview64-o3-dual',
345 'realview64-minor',
346 'realview64-minor-dual',
347 'realview64-switcheroo-atomic',
348 'realview64-switcheroo-timing',
349 'realview64-switcheroo-o3',
350 'realview64-switcheroo-full']
351if env['TARGET_ISA'] == 'x86' and not env['BUILD_GPU']:
352 configs += ['pc-simple-atomic',
353 'pc-simple-timing',
354 'pc-o3-timing',
355 'pc-switcheroo-full']
|
230 Targets are as follows: 231 target[0] : List file (e.g., tests/opt/all.list, tests/opt/quick.list) |
232
|
357if env['TARGET_ISA'] == 'x86' and env['BUILD_GPU'] and \
358 env['TARGET_GPU_ISA'] == 'hsail':
359 configs += ['gpu']
360 if env['PROTOCOL'] == 'GPU_RfO':
361 configs += ['gpu-randomtest']
362else:
363 configs += ['simple-atomic', 'simple-atomic-mp',
364 'simple-timing', 'simple-timing-mp',
365 'minor-timing', 'minor-timing-mp',
366 'o3-timing', 'o3-timing-mt', 'o3-timing-mp',
367 'rubytest', 'memcheck', 'memtest', 'memtest-filter',
368 'tgen-simple-mem', 'tgen-dram-ctrl']
|
233 Sources are: - |
234
|
370 configs += ['learning-gem5-p1-simple', 'learning-gem5-p1-two-level']
|
235 """ |
236
|
372if env['PROTOCOL'] != 'None':
373 if env['PROTOCOL'] == 'MI_example':
374 configs += [c + "-ruby" for c in configs]
375 else:
376 configs = [c + "-ruby-" + env['PROTOCOL'] for c in configs]
|
237 tgt_name = os.path.basename(str(target[0])) 238 base, ext = os.path.splitext(tgt_name) 239 categories = tests.all_categories if base == "all" else (base, ) |
240
|
378src = Dir('.').srcdir
379for config in configs:
380 dirs = src.glob('*/*/*/ref/%s/*/%s' % (env['TARGET_ISA'], config))
381 for d in dirs:
382 d = str(d)
383 if not os.path.exists(os.path.join(d, 'skip')):
384 test_builder(env, d)
|
241 with open(target[0].abspath, "w") as fout: 242 for cat in categories: 243 for test in env.Tests[cat]: 244 print >> fout,"/".join(test) 245 246 return 0 247 248testListAction = env.Action(list_tests, strfunction=None) 249 250env.Command("all.list", tuple(), testListAction) 251for cat, test_list in env.Tests.items(): 252 env.Command("%s.list" % cat, tuple(), testListAction) 253 for test in test_list: 254 test_builder(test) |
|