apu_se.py revision 11670:6ce719503eae
1# 2# Copyright (c) 2015 Advanced Micro Devices, Inc. 3# All rights reserved. 4# 5# For use for simulation and test purposes only 6# 7# Redistribution and use in source and binary forms, with or without 8# modification, are permitted provided that the following conditions are met: 9# 10# 1. Redistributions of source code must retain the above copyright notice, 11# this list of conditions and the following disclaimer. 12# 13# 2. Redistributions in binary form must reproduce the above copyright notice, 14# this list of conditions and the following disclaimer in the documentation 15# and/or other materials provided with the distribution. 16# 17# 3. Neither the name of the copyright holder nor the names of its contributors 18# may be used to endorse or promote products derived from this software 19# without specific prior written permission. 20# 21# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31# POSSIBILITY OF SUCH DAMAGE. 32# 33# Author: Sooraj Puthoor 34# 35 36import optparse, os, re 37import math 38import glob 39import inspect 40 41import m5 42from m5.objects import * 43from m5.util import addToPath 44 45addToPath('../') 46addToPath('../common') 47 48from ruby import Ruby 49 50import Options 51import Simulation 52import GPUTLBOptions, GPUTLBConfig 53 54########################## Script Options ######################## 55def setOption(parser, opt_str, value = 1): 56 # check to make sure the option actually exists 57 if not parser.has_option(opt_str): 58 raise Exception("cannot find %s in list of possible options" % opt_str) 59 60 opt = parser.get_option(opt_str) 61 # set the value 62 exec("parser.values.%s = %s" % (opt.dest, value)) 63 64def getOption(parser, opt_str): 65 # check to make sure the option actually exists 66 if not parser.has_option(opt_str): 67 raise Exception("cannot find %s in list of possible options" % opt_str) 68 69 opt = parser.get_option(opt_str) 70 # get the value 71 exec("return_value = parser.values.%s" % opt.dest) 72 return return_value 73 74# Adding script options 75parser = optparse.OptionParser() 76Options.addCommonOptions(parser) 77Options.addSEOptions(parser) 78 79parser.add_option("--cpu-only-mode", action="store_true", default=False, 80 help="APU mode. Used to take care of problems in "\ 81 "Ruby.py while running APU protocols") 82parser.add_option("-k", "--kernel-files", 83 help="file(s) containing GPU kernel code (colon separated)") 84parser.add_option("-u", "--num-compute-units", type="int", default=1, 85 help="number of GPU compute units"), 86parser.add_option("--num-cp", type="int", default=0, 87 help="Number of GPU Command Processors (CP)") 88parser.add_option("--benchmark-root", help="Root of benchmark directory tree") 89 90# not super important now, but to avoid putting the number 4 everywhere, make 91# it an option/knob 92parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs" \ 93 "sharing an SQC (icache, and thus icache TLB)") 94parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \ 95 "per CU") 96parser.add_option("--wf-size", type="int", default=64, 97 help="Wavefront size(in workitems)") 98parser.add_option("--sp-bypass-path-length", type="int", default=4, \ 99 help="Number of stages of bypass path in vector ALU for Single Precision ops") 100parser.add_option("--dp-bypass-path-length", type="int", default=4, \ 101 help="Number of stages of bypass path in vector ALU for Double Precision ops") 102# issue period per SIMD unit: number of cycles before issuing another vector 103parser.add_option("--issue-period", type="int", default=4, \ 104 help="Number of cycles per vector instruction issue period") 105parser.add_option("--glbmem-wr-bus-width", type="int", default=32, \ 106 help="VGPR to Coalescer (Global Memory) data bus width in bytes") 107parser.add_option("--glbmem-rd-bus-width", type="int", default=32, \ 108 help="Coalescer to VGPR (Global Memory) data bus width in bytes") 109# Currently we only support 1 local memory pipe 110parser.add_option("--shr-mem-pipes-per-cu", type="int", default=1, \ 111 help="Number of Shared Memory pipelines per CU") 112# Currently we only support 1 global memory pipe 113parser.add_option("--glb-mem-pipes-per-cu", type="int", default=1, \ 114 help="Number of Global Memory pipelines per CU") 115parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \ 116 "WF slots per SIMD") 117 118parser.add_option("--vreg-file-size", type="int", default=2048, 119 help="number of physical vector registers per SIMD") 120parser.add_option("--bw-scalor", type="int", default=0, 121 help="bandwidth scalor for scalability analysis") 122parser.add_option("--CPUClock", type="string", default="2GHz", 123 help="CPU clock") 124parser.add_option("--GPUClock", type="string", default="1GHz", 125 help="GPU clock") 126parser.add_option("--cpu-voltage", action="store", type="string", 127 default='1.0V', 128 help = """CPU voltage domain""") 129parser.add_option("--gpu-voltage", action="store", type="string", 130 default='1.0V', 131 help = """CPU voltage domain""") 132parser.add_option("--CUExecPolicy", type="string", default="OLDEST-FIRST", 133 help="WF exec policy (OLDEST-FIRST, ROUND-ROBIN)") 134parser.add_option("--xact-cas-mode", action="store_true", 135 help="enable load_compare mode (transactional CAS)") 136parser.add_option("--SegFaultDebug",action="store_true", 137 help="checks for GPU seg fault before TLB access") 138parser.add_option("--FunctionalTLB",action="store_true", 139 help="Assumes TLB has no latency") 140parser.add_option("--LocalMemBarrier",action="store_true", 141 help="Barrier does not wait for writethroughs to complete") 142parser.add_option("--countPages", action="store_true", 143 help="Count Page Accesses and output in per-CU output files") 144parser.add_option("--TLB-prefetch", type="int", help = "prefetch depth for"\ 145 "TLBs") 146parser.add_option("--pf-type", type="string", help="type of prefetch: "\ 147 "PF_CU, PF_WF, PF_PHASE, PF_STRIDE") 148parser.add_option("--pf-stride", type="int", help="set prefetch stride") 149parser.add_option("--numLdsBanks", type="int", default=32, 150 help="number of physical banks per LDS module") 151parser.add_option("--ldsBankConflictPenalty", type="int", default=1, 152 help="number of cycles per LDS bank conflict") 153parser.add_option('--fast-forward-pseudo-op', action='store_true', 154 help = 'fast forward using kvm until the m5_switchcpu' 155 ' pseudo-op is encountered, then switch cpus. subsequent' 156 ' m5_switchcpu pseudo-ops will toggle back and forth') 157 158 159Ruby.define_options(parser) 160 161#add TLB options to the parser 162GPUTLBOptions.tlb_options(parser) 163 164(options, args) = parser.parse_args() 165 166# The GPU cache coherence protocols only work with the backing store 167setOption(parser, "--access-backing-store") 168 169# if benchmark root is specified explicitly, that overrides the search path 170if options.benchmark_root: 171 benchmark_path = [options.benchmark_root] 172else: 173 # Set default benchmark search path to current dir 174 benchmark_path = ['.'] 175 176########################## Sanity Check ######################## 177 178# Currently the gpu model requires ruby 179if buildEnv['PROTOCOL'] == 'None': 180 fatal("GPU model requires ruby") 181 182# Currently the gpu model requires only timing or detailed CPU 183if not (options.cpu_type == "timing" or 184 options.cpu_type == "detailed"): 185 fatal("GPU model requires timing or detailed CPU") 186 187# This file can support multiple compute units 188assert(options.num_compute_units >= 1) 189 190# Currently, the sqc (I-Cache of GPU) is shared by 191# multiple compute units(CUs). The protocol works just fine 192# even if sqc is not shared. Overriding this option here 193# so that the user need not explicitly set this (assuming 194# sharing sqc is the common usage) 195n_cu = options.num_compute_units 196num_sqc = int(math.ceil(float(n_cu) / options.cu_per_sqc)) 197options.num_sqc = num_sqc # pass this to Ruby 198 199########################## Creating the GPU system ######################## 200# shader is the GPU 201shader = Shader(n_wf = options.wfs_per_simd, 202 clk_domain = SrcClockDomain( 203 clock = options.GPUClock, 204 voltage_domain = VoltageDomain( 205 voltage = options.gpu_voltage))) 206 207# GPU_RfO(Read For Ownership) implements SC/TSO memory model. 208# Other GPU protocols implement release consistency at GPU side. 209# So, all GPU protocols other than GPU_RfO should make their writes 210# visible to the global memory and should read from global memory 211# during kernal boundary. The pipeline initiates(or do not initiate) 212# the acquire/release operation depending on this impl_kern_boundary_sync 213# flag. This flag=true means pipeline initiates a acquire/release operation 214# at kernel boundary. 215if buildEnv['PROTOCOL'] == 'GPU_RfO': 216 shader.impl_kern_boundary_sync = False 217else: 218 shader.impl_kern_boundary_sync = True 219 220# Switching off per-lane TLB by default 221per_lane = False 222if options.TLB_config == "perLane": 223 per_lane = True 224 225# List of compute units; one GPU can have multiple compute units 226compute_units = [] 227for i in xrange(n_cu): 228 compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane, 229 num_SIMDs = options.simds_per_cu, 230 wfSize = options.wf_size, 231 spbypass_pipe_length = options.sp_bypass_path_length, 232 dpbypass_pipe_length = options.dp_bypass_path_length, 233 issue_period = options.issue_period, 234 coalescer_to_vrf_bus_width = \ 235 options.glbmem_rd_bus_width, 236 vrf_to_coalescer_bus_width = \ 237 options.glbmem_wr_bus_width, 238 num_global_mem_pipes = \ 239 options.glb_mem_pipes_per_cu, 240 num_shared_mem_pipes = \ 241 options.shr_mem_pipes_per_cu, 242 n_wf = options.wfs_per_simd, 243 execPolicy = options.CUExecPolicy, 244 xactCasMode = options.xact_cas_mode, 245 debugSegFault = options.SegFaultDebug, 246 functionalTLB = options.FunctionalTLB, 247 localMemBarrier = options.LocalMemBarrier, 248 countPages = options.countPages, 249 localDataStore = \ 250 LdsState(banks = options.numLdsBanks, 251 bankConflictPenalty = \ 252 options.ldsBankConflictPenalty))) 253 wavefronts = [] 254 vrfs = [] 255 for j in xrange(options.simds_per_cu): 256 for k in xrange(shader.n_wf): 257 wavefronts.append(Wavefront(simdId = j, wf_slot_id = k, 258 wfSize = options.wf_size)) 259 vrfs.append(VectorRegisterFile(simd_id=j, 260 num_regs_per_simd=options.vreg_file_size)) 261 compute_units[-1].wavefronts = wavefronts 262 compute_units[-1].vector_register_file = vrfs 263 if options.TLB_prefetch: 264 compute_units[-1].prefetch_depth = options.TLB_prefetch 265 compute_units[-1].prefetch_prev_type = options.pf_type 266 267 # attach the LDS and the CU to the bus (actually a Bridge) 268 compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave 269 compute_units[-1].ldsBus.master = compute_units[-1].localDataStore.cuPort 270 271# Attach compute units to GPU 272shader.CUs = compute_units 273 274########################## Creating the CPU system ######################## 275options.num_cpus = options.num_cpus 276 277# The shader core will be whatever is after the CPU cores are accounted for 278shader_idx = options.num_cpus 279 280# The command processor will be whatever is after the shader is accounted for 281cp_idx = shader_idx + 1 282cp_list = [] 283 284# List of CPUs 285cpu_list = [] 286 287CpuClass, mem_mode = Simulation.getCPUClass(options.cpu_type) 288if CpuClass == AtomicSimpleCPU: 289 fatal("AtomicSimpleCPU is not supported") 290if mem_mode != 'timing': 291 fatal("Only the timing memory mode is supported") 292shader.timing = True 293 294if options.fast_forward and options.fast_forward_pseudo_op: 295 fatal("Cannot fast-forward based both on the number of instructions and" 296 " on pseudo-ops") 297fast_forward = options.fast_forward or options.fast_forward_pseudo_op 298 299if fast_forward: 300 FutureCpuClass, future_mem_mode = CpuClass, mem_mode 301 302 CpuClass = X86KvmCPU 303 mem_mode = 'atomic_noncaching' 304 # Leave shader.timing untouched, because its value only matters at the 305 # start of the simulation and because we require switching cpus 306 # *before* the first kernel launch. 307 308 future_cpu_list = [] 309 310 # Initial CPUs to be used during fast-forwarding. 311 for i in xrange(options.num_cpus): 312 cpu = CpuClass(cpu_id = i, 313 clk_domain = SrcClockDomain( 314 clock = options.CPUClock, 315 voltage_domain = VoltageDomain( 316 voltage = options.cpu_voltage))) 317 cpu_list.append(cpu) 318 319 if options.fast_forward: 320 cpu.max_insts_any_thread = int(options.fast_forward) 321 322if fast_forward: 323 MainCpuClass = FutureCpuClass 324else: 325 MainCpuClass = CpuClass 326 327# CPs to be used throughout the simulation. 328for i in xrange(options.num_cp): 329 cp = MainCpuClass(cpu_id = options.num_cpus + i, 330 clk_domain = SrcClockDomain( 331 clock = options.CPUClock, 332 voltage_domain = VoltageDomain( 333 voltage = options.cpu_voltage))) 334 cp_list.append(cp) 335 336# Main CPUs (to be used after fast-forwarding if fast-forwarding is specified). 337for i in xrange(options.num_cpus): 338 cpu = MainCpuClass(cpu_id = i, 339 clk_domain = SrcClockDomain( 340 clock = options.CPUClock, 341 voltage_domain = VoltageDomain( 342 voltage = options.cpu_voltage))) 343 if fast_forward: 344 cpu.switched_out = True 345 future_cpu_list.append(cpu) 346 else: 347 cpu_list.append(cpu) 348 349########################## Creating the GPU dispatcher ######################## 350# Dispatcher dispatches work from host CPU to GPU 351host_cpu = cpu_list[0] 352dispatcher = GpuDispatcher() 353 354########################## Create and assign the workload ######################## 355# Check for rel_path in elements of base_list using test, returning 356# the first full path that satisfies test 357def find_path(base_list, rel_path, test): 358 for base in base_list: 359 if not base: 360 # base could be None if environment var not set 361 continue 362 full_path = os.path.join(base, rel_path) 363 if test(full_path): 364 return full_path 365 fatal("%s not found in %s" % (rel_path, base_list)) 366 367def find_file(base_list, rel_path): 368 return find_path(base_list, rel_path, os.path.isfile) 369 370executable = find_path(benchmark_path, options.cmd, os.path.exists) 371# it's common for a benchmark to be in a directory with the same 372# name as the executable, so we handle that automatically 373if os.path.isdir(executable): 374 benchmark_path = [executable] 375 executable = find_file(benchmark_path, options.cmd) 376if options.kernel_files: 377 kernel_files = [find_file(benchmark_path, f) 378 for f in options.kernel_files.split(':')] 379else: 380 # if kernel_files is not set, see if there's a unique .asm file 381 # in the same directory as the executable 382 kernel_path = os.path.dirname(executable) 383 kernel_files = glob.glob(os.path.join(kernel_path, '*.asm')) 384 if kernel_files: 385 print "Using GPU kernel code file(s)", ",".join(kernel_files) 386 else: 387 fatal("Can't locate kernel code (.asm) in " + kernel_path) 388 389# OpenCL driver 390driver = ClDriver(filename="hsa", codefile=kernel_files) 391for cpu in cpu_list: 392 cpu.workload = LiveProcess(executable = executable, 393 cmd = [options.cmd] + options.options.split(), 394 drivers = [driver]) 395for cp in cp_list: 396 cp.workload = host_cpu.workload 397 398if fast_forward: 399 for i in xrange(len(future_cpu_list)): 400 future_cpu_list[i].workload = cpu_list[i].workload 401 402########################## Create the overall system ######################## 403# List of CPUs that must be switched when moving between KVM and simulation 404if fast_forward: 405 switch_cpu_list = \ 406 [(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)] 407 408# Full list of processing cores in the system. Note that 409# dispatcher is also added to cpu_list although it is 410# not a processing element 411cpu_list = cpu_list + [shader] + cp_list + [dispatcher] 412 413# creating the overall system 414# notice the cpu list is explicitly added as a parameter to System 415system = System(cpu = cpu_list, 416 mem_ranges = [AddrRange(options.mem_size)], 417 cache_line_size = options.cacheline_size, 418 mem_mode = mem_mode) 419if fast_forward: 420 system.future_cpu = future_cpu_list 421system.voltage_domain = VoltageDomain(voltage = options.sys_voltage) 422system.clk_domain = SrcClockDomain(clock = options.sys_clock, 423 voltage_domain = system.voltage_domain) 424 425if fast_forward: 426 have_kvm_support = 'BaseKvmCPU' in globals() 427 if have_kvm_support and buildEnv['TARGET_ISA'] == "x86": 428 system.vm = KvmVM() 429 for i in xrange(len(host_cpu.workload)): 430 host_cpu.workload[i].useArchPT = True 431 host_cpu.workload[i].kvmInSE = True 432 else: 433 fatal("KvmCPU can only be used in SE mode with x86") 434 435# configure the TLB hierarchy 436GPUTLBConfig.config_tlb_hierarchy(options, system, shader_idx) 437 438# create Ruby system 439system.piobus = IOXBar(width=32, response_latency=0, 440 frontend_latency=0, forward_latency=0) 441Ruby.create_system(options, None, system) 442system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, 443 voltage_domain = system.voltage_domain) 444 445# attach the CPU ports to Ruby 446for i in range(options.num_cpus): 447 ruby_port = system.ruby._cpu_ports[i] 448 449 # Create interrupt controller 450 system.cpu[i].createInterruptController() 451 452 # Connect cache port's to ruby 453 system.cpu[i].icache_port = ruby_port.slave 454 system.cpu[i].dcache_port = ruby_port.slave 455 456 ruby_port.mem_master_port = system.piobus.slave 457 if buildEnv['TARGET_ISA'] == "x86": 458 system.cpu[i].interrupts[0].pio = system.piobus.master 459 system.cpu[i].interrupts[0].int_master = system.piobus.slave 460 system.cpu[i].interrupts[0].int_slave = system.piobus.master 461 if fast_forward: 462 system.cpu[i].itb.walker.port = ruby_port.slave 463 system.cpu[i].dtb.walker.port = ruby_port.slave 464 465# attach CU ports to Ruby 466# Because of the peculiarities of the CP core, you may have 1 CPU but 2 467# sequencers and thus 2 _cpu_ports created. Your GPUs shouldn't be 468# hooked up until after the CP. To make this script generic, figure out 469# the index as below, but note that this assumes there is one sequencer 470# per compute unit and one sequencer per SQC for the math to work out 471# correctly. 472gpu_port_idx = len(system.ruby._cpu_ports) \ 473 - options.num_compute_units - options.num_sqc 474gpu_port_idx = gpu_port_idx - options.num_cp * 2 475 476wavefront_size = options.wf_size 477for i in xrange(n_cu): 478 # The pipeline issues wavefront_size number of uncoalesced requests 479 # in one GPU issue cycle. Hence wavefront_size mem ports. 480 for j in xrange(wavefront_size): 481 system.cpu[shader_idx].CUs[i].memory_port[j] = \ 482 system.ruby._cpu_ports[gpu_port_idx].slave[j] 483 gpu_port_idx += 1 484 485for i in xrange(n_cu): 486 if i > 0 and not i % options.cu_per_sqc: 487 print "incrementing idx on ", i 488 gpu_port_idx += 1 489 system.cpu[shader_idx].CUs[i].sqc_port = \ 490 system.ruby._cpu_ports[gpu_port_idx].slave 491gpu_port_idx = gpu_port_idx + 1 492 493# attach CP ports to Ruby 494for i in xrange(options.num_cp): 495 system.cpu[cp_idx].createInterruptController() 496 system.cpu[cp_idx].dcache_port = \ 497 system.ruby._cpu_ports[gpu_port_idx + i * 2].slave 498 system.cpu[cp_idx].icache_port = \ 499 system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].slave 500 system.cpu[cp_idx].interrupts[0].pio = system.piobus.master 501 system.cpu[cp_idx].interrupts[0].int_master = system.piobus.slave 502 system.cpu[cp_idx].interrupts[0].int_slave = system.piobus.master 503 cp_idx = cp_idx + 1 504 505# connect dispatcher to the system.piobus 506dispatcher.pio = system.piobus.master 507dispatcher.dma = system.piobus.slave 508 509################# Connect the CPU and GPU via GPU Dispatcher ################### 510# CPU rings the GPU doorbell to notify a pending task 511# using this interface. 512# And GPU uses this interface to notify the CPU of task completion 513# The communcation happens through emulated driver. 514 515# Note this implicit setting of the cpu_pointer, shader_pointer and tlb array 516# parameters must be after the explicit setting of the System cpu list 517if fast_forward: 518 shader.cpu_pointer = future_cpu_list[0] 519 dispatcher.cpu = future_cpu_list[0] 520else: 521 shader.cpu_pointer = host_cpu 522 dispatcher.cpu = host_cpu 523dispatcher.shader_pointer = shader 524dispatcher.cl_driver = driver 525 526########################## Start simulation ######################## 527 528root = Root(system=system, full_system=False) 529m5.ticks.setGlobalFrequency('1THz') 530if options.abs_max_tick: 531 maxtick = options.abs_max_tick 532else: 533 maxtick = m5.MaxTick 534 535# Benchmarks support work item annotations 536Simulation.setWorkCountOptions(system, options) 537 538# Checkpointing is not supported by APU model 539if (options.checkpoint_dir != None or 540 options.checkpoint_restore != None): 541 fatal("Checkpointing not supported by apu model") 542 543checkpoint_dir = None 544m5.instantiate(checkpoint_dir) 545 546# Map workload to this address space 547host_cpu.workload[0].map(0x10000000, 0x200000000, 4096) 548 549if options.fast_forward: 550 print "Switch at instruction count: %d" % \ 551 cpu_list[0].max_insts_any_thread 552 553exit_event = m5.simulate(maxtick) 554 555if options.fast_forward: 556 if exit_event.getCause() == "a thread reached the max instruction count": 557 m5.switchCpus(system, switch_cpu_list) 558 print "Switched CPUS @ tick %s" % (m5.curTick()) 559 m5.stats.reset() 560 exit_event = m5.simulate(maxtick - m5.curTick()) 561elif options.fast_forward_pseudo_op: 562 while exit_event.getCause() == "switchcpu": 563 # If we are switching *to* kvm, then the current stats are meaningful 564 # Note that we don't do any warmup by default 565 if type(switch_cpu_list[0][0]) == FutureCpuClass: 566 print "Dumping stats..." 567 m5.stats.dump() 568 m5.switchCpus(system, switch_cpu_list) 569 print "Switched CPUS @ tick %s" % (m5.curTick()) 570 m5.stats.reset() 571 # This lets us switch back and forth without keeping a counter 572 switch_cpu_list = [(x[1], x[0]) for x in switch_cpu_list] 573 exit_event = m5.simulate(maxtick - m5.curTick()) 574 575print "Ticks:", m5.curTick() 576print 'Exiting because ', exit_event.getCause() 577sys.exit(exit_event.getCode()) 578