gpu-ruby.py revision 11308
1# 2# Copyright (c) 2015 Advanced Micro Devices, Inc. 3# All rights reserved. 4# 5# For use for simulation and test purposes only 6# 7# Redistribution and use in source and binary forms, with or without 8# modification, are permitted provided that the following conditions are met: 9# 10# 1. Redistributions of source code must retain the above copyright notice, 11# this list of conditions and the following disclaimer. 12# 13# 2. Redistributions in binary form must reproduce the above copyright notice, 14# this list of conditions and the following disclaimer in the documentation 15# and/or other materials provided with the distribution. 16# 17# 3. Neither the name of the copyright holder nor the names of its contributors 18# may be used to endorse or promote products derived from this software 19# without specific prior written permission. 20# 21# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31# POSSIBILITY OF SUCH DAMAGE. 32# 33# Author: Brad Beckmann 34# 35 36import m5 37from m5.objects import * 38from m5.defines import buildEnv 39from m5.util import addToPath 40import os, optparse, sys, math, glob 41 42# Get paths we might need 43config_path = os.path.dirname(os.path.abspath(__file__)) 44config_root = os.path.dirname(config_path) 45addToPath(config_root+'/configs/common') 46addToPath(config_root+'/configs/ruby') 47addToPath(config_root+'/configs/topologies') 48 49import Ruby 50import Options 51import GPUTLBOptions, GPUTLBConfig 52 53########################## Script Options ######################## 54def setOption(parser, opt_str, value = 1): 55 # check to make sure the option actually exists 56 if not parser.has_option(opt_str): 57 raise Exception("cannot find %s in list of possible options" % opt_str) 58 59 opt = parser.get_option(opt_str) 60 # set the value 61 exec("parser.values.%s = %s" % (opt.dest, value)) 62 63def getOption(parser, opt_str): 64 # check to make sure the option actually exists 65 if not parser.has_option(opt_str): 66 raise Exception("cannot find %s in list of possible options" % opt_str) 67 68 opt = parser.get_option(opt_str) 69 # get the value 70 exec("return_value = parser.values.%s" % opt.dest) 71 return return_value 72 73def run_test(root): 74 """gpu test requires a specialized run_test implementation to set up the 75 mmio space.""" 76 77 # instantiate configuration 78 m5.instantiate() 79 80 # Now that the system has been constructed, setup the mmio space 81 root.system.cpu[0].workload[0].map(0x10000000, 0x200000000, 4096) 82 83 # simulate until program terminates 84 exit_event = m5.simulate(maxtick) 85 print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause() 86 87parser = optparse.OptionParser() 88Options.addCommonOptions(parser) 89Options.addSEOptions(parser) 90 91parser.add_option("-k", "--kernel-files", 92 help="file(s) containing GPU kernel code (colon separated)") 93parser.add_option("-u", "--num-compute-units", type="int", default=2, 94 help="number of GPU compute units"), 95parser.add_option("--numCPs", type="int", default=0, 96 help="Number of GPU Command Processors (CP)") 97parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \ 98 "per CU") 99parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs" \ 100 "sharing an SQC (icache, and thus icache TLB)") 101parser.add_option("--wf-size", type="int", default=64, 102 help="Wavefront size(in workitems)") 103parser.add_option("--wfs-per-simd", type="int", default=8, help="Number of " \ 104 "WF slots per SIMD") 105parser.add_option("--sp-bypass-path-length", type="int", default=4, \ 106 help="Number of stages of bypass path in vector ALU for Single "\ 107 "Precision ops") 108parser.add_option("--dp-bypass-path-length", type="int", default=4, \ 109 help="Number of stages of bypass path in vector ALU for Double "\ 110 "Precision ops") 111parser.add_option("--issue-period", type="int", default=4, \ 112 help="Number of cycles per vector instruction issue period") 113parser.add_option("--glbmem-wr-bus-width", type="int", default=32, \ 114 help="VGPR to Coalescer (Global Memory) data bus width in bytes") 115parser.add_option("--glbmem-rd-bus-width", type="int", default=32, \ 116 help="Coalescer to VGPR (Global Memory) data bus width in bytes") 117parser.add_option("--shr-mem-pipes-per-cu", type="int", default=1, \ 118 help="Number of Shared Memory pipelines per CU") 119parser.add_option("--glb-mem-pipes-per-cu", type="int", default=1, \ 120 help="Number of Global Memory pipelines per CU") 121parser.add_option("--vreg-file-size", type="int", default=2048, 122 help="number of physical vector registers per SIMD") 123parser.add_option("--bw-scalor", type="int", default=0, 124 help="bandwidth scalor for scalability analysis") 125parser.add_option("--CPUClock", type="string", default="2GHz", 126 help="CPU clock") 127parser.add_option("--GPUClock", type="string", default="1GHz", 128 help="GPU clock") 129parser.add_option("--cpu-voltage", action="store", type="string", 130 default='1.0V', 131 help = """CPU voltage domain""") 132parser.add_option("--gpu-voltage", action="store", type="string", 133 default='1.0V', 134 help = """CPU voltage domain""") 135parser.add_option("--CUExecPolicy", type="string", default="OLDEST-FIRST", 136 help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)") 137parser.add_option("--xact-cas-mode", action="store_true", 138 help="enable load_compare mode (transactional CAS)") 139parser.add_option("--SegFaultDebug",action="store_true", 140 help="checks for GPU seg fault before TLB access") 141parser.add_option("--LocalMemBarrier",action="store_true", 142 help="Barrier does not wait for writethroughs to complete") 143parser.add_option("--countPages", action="store_true", 144 help="Count Page Accesses and output in per-CU output files") 145parser.add_option("--TLB-prefetch", type="int", help = "prefetch depth for"\ 146 "TLBs") 147parser.add_option("--pf-type", type="string", help="type of prefetch: "\ 148 "PF_CU, PF_WF, PF_PHASE, PF_STRIDE") 149parser.add_option("--pf-stride", type="int", help="set prefetch stride") 150parser.add_option("--numLdsBanks", type="int", default=32, 151 help="number of physical banks per LDS module") 152parser.add_option("--ldsBankConflictPenalty", type="int", default=1, 153 help="number of cycles per LDS bank conflict") 154 155# Add the ruby specific and protocol specific options 156Ruby.define_options(parser) 157 158GPUTLBOptions.tlb_options(parser) 159 160(options, args) = parser.parse_args() 161 162# The GPU cache coherence protocols only work with the backing store 163setOption(parser, "--access-backing-store") 164 165# Currently, the sqc (I-Cache of GPU) is shared by 166# multiple compute units(CUs). The protocol works just fine 167# even if sqc is not shared. Overriding this option here 168# so that the user need not explicitly set this (assuming 169# sharing sqc is the common usage) 170n_cu = options.num_compute_units 171num_sqc = int(math.ceil(float(n_cu) / options.cu_per_sqc)) 172options.num_sqc = num_sqc # pass this to Ruby 173 174########################## Creating the GPU system ######################## 175# shader is the GPU 176shader = Shader(n_wf = options.wfs_per_simd, 177 clk_domain = SrcClockDomain( 178 clock = options.GPUClock, 179 voltage_domain = VoltageDomain( 180 voltage = options.gpu_voltage)), 181 timing = True) 182 183# GPU_RfO(Read For Ownership) implements SC/TSO memory model. 184# Other GPU protocols implement release consistency at GPU side. 185# So, all GPU protocols other than GPU_RfO should make their writes 186# visible to the global memory and should read from global memory 187# during kernal boundary. The pipeline initiates(or do not initiate) 188# the acquire/release operation depending on this impl_kern_boundary_sync 189# flag. This flag=true means pipeline initiates a acquire/release operation 190# at kernel boundary. 191if buildEnv['PROTOCOL'] == 'GPU_RfO': 192 shader.impl_kern_boundary_sync = False 193else: 194 shader.impl_kern_boundary_sync = True 195 196# Switching off per-lane TLB by default 197per_lane = False 198if options.TLB_config == "perLane": 199 per_lane = True 200 201# List of compute units; one GPU can have multiple compute units 202compute_units = [] 203for i in xrange(n_cu): 204 compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane, 205 num_SIMDs = options.simds_per_cu, 206 wfSize = options.wf_size, 207 spbypass_pipe_length = \ 208 options.sp_bypass_path_length, 209 dpbypass_pipe_length = \ 210 options.dp_bypass_path_length, 211 issue_period = options.issue_period, 212 coalescer_to_vrf_bus_width = \ 213 options.glbmem_rd_bus_width, 214 vrf_to_coalescer_bus_width = \ 215 options.glbmem_wr_bus_width, 216 num_global_mem_pipes = \ 217 options.glb_mem_pipes_per_cu, 218 num_shared_mem_pipes = \ 219 options.shr_mem_pipes_per_cu, 220 n_wf = options.wfs_per_simd, 221 execPolicy = options.CUExecPolicy, 222 xactCasMode = options.xact_cas_mode, 223 debugSegFault = options.SegFaultDebug, 224 functionalTLB = True, 225 localMemBarrier = options.LocalMemBarrier, 226 countPages = options.countPages, 227 localDataStore = \ 228 LdsState(banks = options.numLdsBanks, 229 bankConflictPenalty = \ 230 options.ldsBankConflictPenalty))) 231 wavefronts = [] 232 vrfs = [] 233 for j in xrange(options.simds_per_cu): 234 for k in xrange(shader.n_wf): 235 wavefronts.append(Wavefront(simdId = j, wf_slot_id = k)) 236 vrfs.append(VectorRegisterFile(simd_id=j, 237 num_regs_per_simd=options.vreg_file_size)) 238 compute_units[-1].wavefronts = wavefronts 239 compute_units[-1].vector_register_file = vrfs 240 if options.TLB_prefetch: 241 compute_units[-1].prefetch_depth = options.TLB_prefetch 242 compute_units[-1].prefetch_prev_type = options.pf_type 243 244 # attach the LDS and the CU to the bus (actually a Bridge) 245 compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave 246 compute_units[-1].ldsBus.master = compute_units[-1].localDataStore.cuPort 247 248# Attach compute units to GPU 249shader.CUs = compute_units 250 251# this is a uniprocessor only test, thus the shader is the second index in the 252# list of "system.cpus" 253options.num_cpus = 1 254shader_idx = 1 255cpu = TimingSimpleCPU(cpu_id=0) 256 257########################## Creating the GPU dispatcher ######################## 258# Dispatcher dispatches work from host CPU to GPU 259host_cpu = cpu 260dispatcher = GpuDispatcher() 261 262# Currently does not test for command processors 263cpu_list = [cpu] + [shader] + [dispatcher] 264 265system = System(cpu = cpu_list, 266 mem_ranges = [AddrRange(options.mem_size)], 267 mem_mode = 'timing') 268 269# Dummy voltage domain for all our clock domains 270system.voltage_domain = VoltageDomain(voltage = options.sys_voltage) 271system.clk_domain = SrcClockDomain(clock = '1GHz', 272 voltage_domain = system.voltage_domain) 273 274# Create a seperate clock domain for components that should run at 275# CPUs frequency 276system.cpu[0].clk_domain = SrcClockDomain(clock = '2GHz', 277 voltage_domain = \ 278 system.voltage_domain) 279 280# configure the TLB hierarchy 281GPUTLBConfig.config_tlb_hierarchy(options, system, shader_idx) 282 283# create Ruby system 284system.piobus = IOXBar(width=32, response_latency=0, 285 frontend_latency=0, forward_latency=0) 286Ruby.create_system(options, None, system) 287 288# Create a separate clock for Ruby 289system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, 290 voltage_domain = system.voltage_domain) 291 292# create the interrupt controller 293cpu.createInterruptController() 294 295# 296# Tie the cpu cache ports to the ruby cpu ports and 297# physmem, respectively 298# 299cpu.connectAllPorts(system.ruby._cpu_ports[0]) 300system.ruby._cpu_ports[0].mem_master_port = system.piobus.slave 301 302# attach CU ports to Ruby 303# Because of the peculiarities of the CP core, you may have 1 CPU but 2 304# sequencers and thus 2 _cpu_ports created. Your GPUs shouldn't be 305# hooked up until after the CP. To make this script generic, figure out 306# the index as below, but note that this assumes there is one sequencer 307# per compute unit and one sequencer per SQC for the math to work out 308# correctly. 309gpu_port_idx = len(system.ruby._cpu_ports) \ 310 - options.num_compute_units - options.num_sqc 311gpu_port_idx = gpu_port_idx - options.numCPs * 2 312 313wavefront_size = options.wf_size 314for i in xrange(n_cu): 315 # The pipeline issues wavefront_size number of uncoalesced requests 316 # in one GPU issue cycle. Hence wavefront_size mem ports. 317 for j in xrange(wavefront_size): 318 system.cpu[shader_idx].CUs[i].memory_port[j] = \ 319 system.ruby._cpu_ports[gpu_port_idx].slave[j] 320 gpu_port_idx += 1 321 322for i in xrange(n_cu): 323 if i > 0 and not i % options.cu_per_sqc: 324 gpu_port_idx += 1 325 system.cpu[shader_idx].CUs[i].sqc_port = \ 326 system.ruby._cpu_ports[gpu_port_idx].slave 327gpu_port_idx = gpu_port_idx + 1 328 329assert(options.numCPs == 0) 330 331# connect dispatcher to the system.piobus 332dispatcher.pio = system.piobus.master 333dispatcher.dma = system.piobus.slave 334 335################# Connect the CPU and GPU via GPU Dispatcher ################### 336# CPU rings the GPU doorbell to notify a pending task 337# using this interface. 338# And GPU uses this interface to notify the CPU of task completion 339# The communcation happens through emulated driver. 340 341# Note this implicit setting of the cpu_pointer, shader_pointer and tlb array 342# parameters must be after the explicit setting of the System cpu list 343shader.cpu_pointer = host_cpu 344dispatcher.cpu = host_cpu 345dispatcher.shader_pointer = shader 346 347# ----------------------- 348# run simulation 349# ----------------------- 350 351root = Root(full_system = False, system = system) 352m5.ticks.setGlobalFrequency('1THz') 353root.system.mem_mode = 'timing' 354