1# -*- mode:python -*- 2
| 1# -*- mode:python -*- 2
|
3# Copyright (c) 2004-2005 The Regents of The University of Michigan
| 3# Copyright (c) 2004-2006 The Regents of The University of Michigan
|
4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 28# 29# Authors: Steve Reinhardt 30# Kevin Lim
|
28 29import os 30import sys 31import glob 32from SCons.Script.SConscript import SConsEnvironment 33 34Import('env') 35 36env['DIFFOUT'] = File('diff-out') 37 38# Dict that accumulates lists of tests by category (quick, medium, long) 39env.Tests = {} 40 41def contents(node): 42 return file(str(node)).read() 43 44def check_test(target, source, env): 45 """Check output from running test. 46 47 Targets are as follows: 48 target[0] : outdiff 49 target[1] : statsdiff 50 target[2] : status 51 52 """ 53 # make sure target files are all gone 54 for t in target: 55 if os.path.exists(t.abspath): 56 Execute(Delete(t.abspath)) 57 # Run diff on output & ref directories to find differences. 58 # Exclude m5stats.txt since we will use diff-out on that. 59 Execute(env.subst('diff -ubr ${SOURCES[0].dir} ${SOURCES[1].dir} ' + 60 '-I "^command line:" ' + # for stdout file 61 '-I "^M5 compiled on" ' + # for stderr file 62 '-I "^M5 simulation started" ' + # for stderr file 63 '-I "^Simulation complete at" ' + # for stderr file 64 '-I "^Listening for" ' + # for stderr file 65 '--exclude=m5stats.txt --exclude=SCCS ' + 66 '--exclude=${TARGETS[0].file} ' + 67 '> ${TARGETS[0]}', target=target, source=source), None) 68 print "===== Output differences =====" 69 print contents(target[0]) 70 # Run diff-out on m5stats.txt file 71 status = Execute(env.subst('$DIFFOUT $SOURCES > ${TARGETS[1]}', 72 target=target, source=source), 73 strfunction=None) 74 print "===== Statistics differences =====" 75 print contents(target[1]) 76 # Generate status file contents based on exit status of diff-out 77 if status == 0: 78 status_str = "passed." 79 else: 80 status_str = "FAILED!" 81 f = file(str(target[2]), 'w') 82 print >>f, env.subst('${TARGETS[2].dir}', target=target, source=source), \ 83 status_str 84 f.close() 85 # done 86 return 0 87 88def check_test_string(target, source, env): 89 return env.subst("Comparing outputs in ${TARGETS[0].dir}.", 90 target=target, source=source) 91 92testAction = env.Action(check_test, check_test_string) 93 94def print_test(target, source, env): 95 print '***** ' + contents(source[0]) 96 return 0 97 98printAction = env.Action(print_test, strfunction = None) 99 100def update_test(target, source, env): 101 """Update reference test outputs. 102 103 Target is phony. First two sources are the ref & new m5stats.txt 104 files, respectively. We actually copy everything in the 105 respective directories except the status & diff output files. 106 107 """ 108 dest_dir = str(source[0].get_dir()) 109 src_dir = str(source[1].get_dir()) 110 dest_files = os.listdir(dest_dir) 111 src_files = os.listdir(src_dir) 112 # Exclude status & diff outputs 113 for f in ('outdiff', 'statsdiff', 'status'): 114 if f in src_files: 115 src_files.remove(f) 116 for f in src_files: 117 if f in dest_files: 118 print " Replacing file", f 119 dest_files.remove(f) 120 else: 121 print " Creating new file", f 122 copyAction = Copy(os.path.join(dest_dir, f), os.path.join(src_dir, f)) 123 copyAction.strfunction = None 124 Execute(copyAction) 125 # warn about any files in dest not overwritten (other than SCCS dir) 126 if 'SCCS' in dest_files: 127 dest_files.remove('SCCS') 128 if dest_files: 129 print "Warning: file(s) in", dest_dir, "not updated:", 130 print ', '.join(dest_files) 131 return 0 132 133def update_test_string(target, source, env): 134 return env.subst("Updating ${SOURCES[0].dir} from ${SOURCES[1].dir}", 135 target=target, source=source) 136 137updateAction = env.Action(update_test, update_test_string) 138
| 31 32import os 33import sys 34import glob 35from SCons.Script.SConscript import SConsEnvironment 36 37Import('env') 38 39env['DIFFOUT'] = File('diff-out') 40 41# Dict that accumulates lists of tests by category (quick, medium, long) 42env.Tests = {} 43 44def contents(node): 45 return file(str(node)).read() 46 47def check_test(target, source, env): 48 """Check output from running test. 49 50 Targets are as follows: 51 target[0] : outdiff 52 target[1] : statsdiff 53 target[2] : status 54 55 """ 56 # make sure target files are all gone 57 for t in target: 58 if os.path.exists(t.abspath): 59 Execute(Delete(t.abspath)) 60 # Run diff on output & ref directories to find differences. 61 # Exclude m5stats.txt since we will use diff-out on that. 62 Execute(env.subst('diff -ubr ${SOURCES[0].dir} ${SOURCES[1].dir} ' + 63 '-I "^command line:" ' + # for stdout file 64 '-I "^M5 compiled on" ' + # for stderr file 65 '-I "^M5 simulation started" ' + # for stderr file 66 '-I "^Simulation complete at" ' + # for stderr file 67 '-I "^Listening for" ' + # for stderr file 68 '--exclude=m5stats.txt --exclude=SCCS ' + 69 '--exclude=${TARGETS[0].file} ' + 70 '> ${TARGETS[0]}', target=target, source=source), None) 71 print "===== Output differences =====" 72 print contents(target[0]) 73 # Run diff-out on m5stats.txt file 74 status = Execute(env.subst('$DIFFOUT $SOURCES > ${TARGETS[1]}', 75 target=target, source=source), 76 strfunction=None) 77 print "===== Statistics differences =====" 78 print contents(target[1]) 79 # Generate status file contents based on exit status of diff-out 80 if status == 0: 81 status_str = "passed." 82 else: 83 status_str = "FAILED!" 84 f = file(str(target[2]), 'w') 85 print >>f, env.subst('${TARGETS[2].dir}', target=target, source=source), \ 86 status_str 87 f.close() 88 # done 89 return 0 90 91def check_test_string(target, source, env): 92 return env.subst("Comparing outputs in ${TARGETS[0].dir}.", 93 target=target, source=source) 94 95testAction = env.Action(check_test, check_test_string) 96 97def print_test(target, source, env): 98 print '***** ' + contents(source[0]) 99 return 0 100 101printAction = env.Action(print_test, strfunction = None) 102 103def update_test(target, source, env): 104 """Update reference test outputs. 105 106 Target is phony. First two sources are the ref & new m5stats.txt 107 files, respectively. We actually copy everything in the 108 respective directories except the status & diff output files. 109 110 """ 111 dest_dir = str(source[0].get_dir()) 112 src_dir = str(source[1].get_dir()) 113 dest_files = os.listdir(dest_dir) 114 src_files = os.listdir(src_dir) 115 # Exclude status & diff outputs 116 for f in ('outdiff', 'statsdiff', 'status'): 117 if f in src_files: 118 src_files.remove(f) 119 for f in src_files: 120 if f in dest_files: 121 print " Replacing file", f 122 dest_files.remove(f) 123 else: 124 print " Creating new file", f 125 copyAction = Copy(os.path.join(dest_dir, f), os.path.join(src_dir, f)) 126 copyAction.strfunction = None 127 Execute(copyAction) 128 # warn about any files in dest not overwritten (other than SCCS dir) 129 if 'SCCS' in dest_files: 130 dest_files.remove('SCCS') 131 if dest_files: 132 print "Warning: file(s) in", dest_dir, "not updated:", 133 print ', '.join(dest_files) 134 return 0 135 136def update_test_string(target, source, env): 137 return env.subst("Updating ${SOURCES[0].dir} from ${SOURCES[1].dir}", 138 target=target, source=source) 139 140updateAction = env.Action(update_test, update_test_string) 141
|
139def test_builder(env, category, cpu_list=[], os_list=[], refdir='ref', timeout=15):
| 142def test_builder(env, category, cpu_list=[], os_list=[], refdir='ref', 143 timeout=15):
|
140 """Define a test. 141 142 Args: 143 category -- string describing test category (e.g., 'quick') 144 cpu_list -- list of CPUs to runs this test on (blank means all compiled CPUs) 145 os_list -- list of OSs to run this test on 146 refdir -- subdirectory containing reference output (default 'ref') 147 timeout -- test timeout in minutes (only enforced on pool) 148 149 """ 150 151 default_refdir = False 152 if refdir == 'ref': 153 default_refdir = True 154 if len(cpu_list) == 0: 155 cpu_list = env['CPU_MODELS']
| 144 """Define a test. 145 146 Args: 147 category -- string describing test category (e.g., 'quick') 148 cpu_list -- list of CPUs to runs this test on (blank means all compiled CPUs) 149 os_list -- list of OSs to run this test on 150 refdir -- subdirectory containing reference output (default 'ref') 151 timeout -- test timeout in minutes (only enforced on pool) 152 153 """ 154 155 default_refdir = False 156 if refdir == 'ref': 157 default_refdir = True 158 if len(cpu_list) == 0: 159 cpu_list = env['CPU_MODELS']
|
| 160 if env['TEST_CPU_MODELS']: 161 temp_cpu_list = [] 162 for i in env['TEST_CPU_MODELS']: 163 if i in cpu_list: 164 temp_cpu_list.append(i) 165 cpu_list = temp_cpu_list 166# Code commented out that shows the general structure if we want to test 167# different OS's as well.
|
156# if len(os_list) == 0:
| 168# if len(os_list) == 0:
|
157# raise RuntimeError, "No OS specified"
| 169# for test_cpu in cpu_list: 170# build_cpu_test(env, category, '', test_cpu, refdir, timeout)
|
158# else: 159# for test_os in os_list:
| 171# else: 172# for test_os in os_list:
|
160# build_cpu_test(env, category, test_os, cpu_list, refdir, timeout) 161 # Loop through CPU models and generate proper options, ref directories for each
| 173# for test_cpu in cpu_list: 174# build_cpu_test(env, category, test_os, test_cpu, refdir, 175# timeout) 176 # Loop through CPU models and generate proper options, ref directories
|
162 for cpu in cpu_list: 163 test_os = '' 164 if cpu == "AtomicSimpleCPU": 165 cpu_option = ('','atomic/') 166 elif cpu == "TimingSimpleCPU": 167 cpu_option = ('--timing','timing/') 168 elif cpu == "O3CPU": 169 cpu_option = ('--detailed','detailed/') 170 else: 171 raise TypeError, "Unknown CPU model specified" 172 173 if default_refdir:
| 177 for cpu in cpu_list: 178 test_os = '' 179 if cpu == "AtomicSimpleCPU": 180 cpu_option = ('','atomic/') 181 elif cpu == "TimingSimpleCPU": 182 cpu_option = ('--timing','timing/') 183 elif cpu == "O3CPU": 184 cpu_option = ('--detailed','detailed/') 185 else: 186 raise TypeError, "Unknown CPU model specified" 187 188 if default_refdir:
|
174 # Reference stats located in ref/arch/os/cpu or ref/arch/cpu if no OS specified
| 189 # Reference stats located in ref/arch/os/cpu or ref/arch/cpu 190 # if no OS specified
|
175 test_refdir = os.path.join(refdir, env['TARGET_ISA']) 176 if test_os != '': 177 test_refdir = os.path.join(test_refdir, test_os) 178 cpu_refdir = os.path.join(test_refdir, cpu_option[1]) 179 180 ref_stats = os.path.join(cpu_refdir, 'm5stats.txt') 181 182 # base command for running test 183 base_cmd = '${SOURCES[0]} -d $TARGET.dir ${SOURCES[1]}' 184 base_cmd = base_cmd + ' ' + cpu_option[0] 185 # stdout and stderr files 186 cmd_stdout = '${TARGETS[0]}' 187 cmd_stderr = '${TARGETS[1]}' 188 189 stdout_string = cpu_option[1] + 'stdout' 190 stderr_string = cpu_option[1] + 'stderr' 191 m5stats_string = cpu_option[1] + 'm5stats.txt' 192 outdiff_string = cpu_option[1] + 'outdiff' 193 statsdiff_string = cpu_option[1] + 'statsdiff' 194 status_string = cpu_option[1] + 'status' 195 196 # Prefix test run with batch job submission command if appropriate. 197 # Output redirection is also different for batch runs. 198 # Batch command also supports timeout arg (in seconds, not minutes). 199 if env['BATCH']: 200 cmd = [env['BATCH_CMD'], '-t', str(timeout * 60), 201 '-o', cmd_stdout, '-e', cmd_stderr, base_cmd] 202 else: 203 cmd = [base_cmd, '>', cmd_stdout, '2>', cmd_stderr] 204
| 191 test_refdir = os.path.join(refdir, env['TARGET_ISA']) 192 if test_os != '': 193 test_refdir = os.path.join(test_refdir, test_os) 194 cpu_refdir = os.path.join(test_refdir, cpu_option[1]) 195 196 ref_stats = os.path.join(cpu_refdir, 'm5stats.txt') 197 198 # base command for running test 199 base_cmd = '${SOURCES[0]} -d $TARGET.dir ${SOURCES[1]}' 200 base_cmd = base_cmd + ' ' + cpu_option[0] 201 # stdout and stderr files 202 cmd_stdout = '${TARGETS[0]}' 203 cmd_stderr = '${TARGETS[1]}' 204 205 stdout_string = cpu_option[1] + 'stdout' 206 stderr_string = cpu_option[1] + 'stderr' 207 m5stats_string = cpu_option[1] + 'm5stats.txt' 208 outdiff_string = cpu_option[1] + 'outdiff' 209 statsdiff_string = cpu_option[1] + 'statsdiff' 210 status_string = cpu_option[1] + 'status' 211 212 # Prefix test run with batch job submission command if appropriate. 213 # Output redirection is also different for batch runs. 214 # Batch command also supports timeout arg (in seconds, not minutes). 215 if env['BATCH']: 216 cmd = [env['BATCH_CMD'], '-t', str(timeout * 60), 217 '-o', cmd_stdout, '-e', cmd_stderr, base_cmd] 218 else: 219 cmd = [base_cmd, '>', cmd_stdout, '2>', cmd_stderr] 220
|
205 env.Command([stdout_string, stderr_string, m5stats_string], [env.M5Binary, 'run.py'], 206 ' '.join(cmd))
| 221 env.Command([stdout_string, stderr_string, m5stats_string], 222 [env.M5Binary, 'run.py'], ' '.join(cmd))
|
207 208 # order of targets is important... see check_test 209 env.Command([outdiff_string, statsdiff_string, status_string], 210 [ref_stats, m5stats_string], 211 testAction) 212 213 # phony target to echo status 214 if env['update_ref']:
| 223 224 # order of targets is important... see check_test 225 env.Command([outdiff_string, statsdiff_string, status_string], 226 [ref_stats, m5stats_string], 227 testAction) 228 229 # phony target to echo status 230 if env['update_ref']:
|
215 p = env.Command(cpu_option[1] + '_update', [ref_stats, m5stats_string, status_string],
| 231 p = env.Command(cpu_option[1] + '_update', 232 [ref_stats, m5stats_string, status_string],
|
216 updateAction) 217 else:
| 233 updateAction) 234 else:
|
218 p = env.Command(cpu_option[1] + '_print', [status_string], printAction)
| 235 p = env.Command(cpu_option[1] + '_print', [status_string], 236 printAction)
|
219 env.AlwaysBuild(p) 220 221 env.Tests.setdefault(category, []) 222 env.Tests[category] += p 223 224# Make test_builder a "wrapper" function. See SCons wiki page at 225# http://www.scons.org/cgi-bin/wiki/WrapperFunctions. 226SConsEnvironment.Test = test_builder 227 228cwd = os.getcwd() 229os.chdir(str(Dir('.').srcdir)) 230scripts = glob.glob('*/SConscript') 231os.chdir(cwd) 232 233for s in scripts: 234 SConscript(s, exports = 'env', duplicate = False) 235 236# Set up phony commands for various test categories 237allTests = [] 238for (key, val) in env.Tests.iteritems(): 239 env.Command(key, val, env.NoAction) 240 allTests += val 241 242# The 'all' target is redundant since just specifying the test 243# directory name (e.g., ALPHA_SE/test/opt) has the same effect. 244env.Command('all', allTests, env.NoAction)
| 237 env.AlwaysBuild(p) 238 239 env.Tests.setdefault(category, []) 240 env.Tests[category] += p 241 242# Make test_builder a "wrapper" function. See SCons wiki page at 243# http://www.scons.org/cgi-bin/wiki/WrapperFunctions. 244SConsEnvironment.Test = test_builder 245 246cwd = os.getcwd() 247os.chdir(str(Dir('.').srcdir)) 248scripts = glob.glob('*/SConscript') 249os.chdir(cwd) 250 251for s in scripts: 252 SConscript(s, exports = 'env', duplicate = False) 253 254# Set up phony commands for various test categories 255allTests = [] 256for (key, val) in env.Tests.iteritems(): 257 env.Command(key, val, env.NoAction) 258 allTests += val 259 260# The 'all' target is redundant since just specifying the test 261# directory name (e.g., ALPHA_SE/test/opt) has the same effect. 262env.Command('all', allTests, env.NoAction)
|