1# Copyright (c) 2006-2007 The Regents of The University of Michigan 2# Copyright (c) 2009 Advanced Micro Devices, Inc. 3# Copyright (c) 2013 Mark D. Hill and David A. Wood 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright --- 113 unchanged lines hidden (view full) --- 122 # 123 # Add controllers and sequencers to the appropriate lists 124 # 125 cpu_sequencers.append(cpu_seq) 126 l0_cntrl_nodes.append(l0_cntrl) 127 l1_cntrl_nodes.append(l1_cntrl) 128 129 # Connect the L0 and L1 controllers |
130 l0_cntrl.mandatoryQueue = MessageBuffer() 131 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) 132 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 133 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) 134 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 |
135 136 # Connect the L1 controllers and the network |
137 l1_cntrl.requestToL2 = MessageBuffer() 138 l1_cntrl.requestToL2.master = ruby_system.network.slave 139 l1_cntrl.responseToL2 = MessageBuffer() 140 l1_cntrl.responseToL2.master = ruby_system.network.slave 141 l1_cntrl.unblockToL2 = MessageBuffer() 142 l1_cntrl.unblockToL2.master = ruby_system.network.slave |
143 |
144 l1_cntrl.requestFromL2 = MessageBuffer() 145 l1_cntrl.requestFromL2.slave = ruby_system.network.master 146 l1_cntrl.responseFromL2 = MessageBuffer() 147 l1_cntrl.responseFromL2.slave = ruby_system.network.master |
148 149 150 for j in xrange(num_l2caches_per_cluster): 151 l2_cache = L2Cache(size = options.l2_size, 152 assoc = options.l2_assoc, 153 start_index_bit = l2_index_start) 154 155 l2_cntrl = L2Cache_Controller( 156 version = i * num_l2caches_per_cluster + j, 157 L2cache = l2_cache, cluster_id = i, 158 transitions_per_cycle=options.ports, 159 ruby_system = ruby_system) 160 161 exec("ruby_system.l2_cntrl%d = l2_cntrl" % ( 162 i * num_l2caches_per_cluster + j)) 163 l2_cntrl_nodes.append(l2_cntrl) 164 165 # Connect the L2 controllers and the network |
166 l2_cntrl.DirRequestFromL2Cache = MessageBuffer() 167 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave 168 l2_cntrl.L1RequestFromL2Cache = MessageBuffer() 169 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave 170 l2_cntrl.responseFromL2Cache = MessageBuffer() 171 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave |
172 |
173 l2_cntrl.unblockToL2Cache = MessageBuffer() 174 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master 175 l2_cntrl.L1RequestToL2Cache = MessageBuffer() 176 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master 177 l2_cntrl.responseToL2Cache = MessageBuffer() 178 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master |
179 180 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 181 assert(phys_mem_size % options.num_dirs == 0) 182 mem_module_size = phys_mem_size / options.num_dirs 183 184 # Run each of the ruby memory controllers at a ratio of the frequency of 185 # the ruby system 186 # clk_divider value is a fix to pass regression. --- 13 unchanged lines hidden (view full) --- 200 version = i, size = dir_size), 201 transitions_per_cycle = options.ports, 202 ruby_system = ruby_system) 203 204 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 205 dir_cntrl_nodes.append(dir_cntrl) 206 207 # Connect the directory controllers and the network |
208 dir_cntrl.requestToDir = MessageBuffer() 209 dir_cntrl.requestToDir.slave = ruby_system.network.master 210 dir_cntrl.responseToDir = MessageBuffer() 211 dir_cntrl.responseToDir.slave = ruby_system.network.master 212 dir_cntrl.responseFromDir = MessageBuffer() 213 dir_cntrl.responseFromDir.master = ruby_system.network.slave 214 dir_cntrl.responseFromMemory = MessageBuffer() |
215 216 for i, dma_port in enumerate(dma_ports): 217 # 218 # Create the Ruby objects associated with the dma controller 219 # 220 dma_seq = DMASequencer(version = i, 221 ruby_system = ruby_system) 222 223 dma_cntrl = DMA_Controller(version = i, 224 dma_sequencer = dma_seq, 225 transitions_per_cycle = options.ports, 226 ruby_system = ruby_system) 227 228 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 229 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 230 dma_cntrl_nodes.append(dma_cntrl) 231 232 # Connect the dma controller to the network |
233 dma_cntrl.mandatoryQueue = MessageBuffer() 234 dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 235 dma_cntrl.responseFromDir.slave = ruby_system.network.master 236 dma_cntrl.requestToDir = MessageBuffer() 237 dma_cntrl.requestToDir.master = ruby_system.network.slave |
238 239 all_cntrls = l0_cntrl_nodes + \ 240 l1_cntrl_nodes + \ 241 l2_cntrl_nodes + \ 242 dir_cntrl_nodes + \ 243 dma_cntrl_nodes 244 245 # Create the io controller and the sequencer 246 if full_system: 247 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 248 ruby_system._io_port = io_seq 249 io_controller = DMA_Controller(version = len(dma_ports), 250 dma_sequencer = io_seq, 251 ruby_system = ruby_system) 252 ruby_system.io_controller = io_controller 253 254 # Connect the dma controller to the network |
255 io_controller.mandatoryQueue = MessageBuffer() 256 io_controller.responseFromDir = MessageBuffer(ordered = True) 257 io_controller.responseFromDir.slave = ruby_system.network.master 258 io_controller.requestToDir = MessageBuffer() 259 io_controller.requestToDir.master = ruby_system.network.slave |
260 261 all_cntrls = all_cntrls + [io_controller] 262 263 topology = create_topology(all_cntrls, options) 264 return (cpu_sequencers, dir_cntrl_nodes, topology) |