1# Copyright (c) 2006-2007 The Regents of The University of Michigan
| 1# Copyright (c) 2006-2007 The Regents of The University of Michigan
|
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
| 2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
|
3# Copyright (c) 2013 Mark D. Hill and David A. Wood 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28# 29# Authors: Brad Beckmann 30# Nilay Vaish 31 32import math 33import m5 34from m5.objects import * 35from m5.defines import buildEnv 36from Ruby import create_topology 37from Ruby import send_evicts 38 39# 40# Declare caches used by the protocol 41# 42class L0Cache(RubyCache): pass 43class L1Cache(RubyCache): pass 44class L2Cache(RubyCache): pass 45 46def define_options(parser):
| 3# Copyright (c) 2013 Mark D. Hill and David A. Wood 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28# 29# Authors: Brad Beckmann 30# Nilay Vaish 31 32import math 33import m5 34from m5.objects import * 35from m5.defines import buildEnv 36from Ruby import create_topology 37from Ruby import send_evicts 38 39# 40# Declare caches used by the protocol 41# 42class L0Cache(RubyCache): pass 43class L1Cache(RubyCache): pass 44class L2Cache(RubyCache): pass 45 46def define_options(parser):
|
47 parser.add_option("--num-clusters", type="int", default=1, 48 help="number of clusters in a design in which there are shared\
| 47 parser.add_option("--num-clusters", type = "int", default = 1, 48 help = "number of clusters in a design in which there are shared\
|
49 caches private to clusters") 50 return 51 52def create_system(options, full_system, system, dma_ports, ruby_system): 53 54 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
| 49 caches private to clusters") 50 return 51 52def create_system(options, full_system, system, dma_ports, ruby_system): 53 54 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
|
55 fatal("This script requires the MESI_Three_Level protocol to be built.")
| 55 fatal("This script requires the MESI_Three_Level protocol to be\ 56 built.")
|
56 57 cpu_sequencers = [] 58 59 # 60 # The ruby network creation expects the list of nodes in the system to be
| 57 58 cpu_sequencers = [] 59 60 # 61 # The ruby network creation expects the list of nodes in the system to be
|
61 # consistent with the NetDest list. Therefore the l1 controller nodes must be 62 # listed before the directory nodes and directory nodes before dma nodes, etc.
| 62 # consistent with the NetDest list. Therefore the l1 controller nodes 63 # must be listed before the directory nodes and directory nodes before 64 # dma nodes, etc.
|
63 # 64 l0_cntrl_nodes = [] 65 l1_cntrl_nodes = [] 66 l2_cntrl_nodes = [] 67 dir_cntrl_nodes = [] 68 dma_cntrl_nodes = [] 69 70 assert (options.num_cpus % options.num_clusters == 0) 71 num_cpus_per_cluster = options.num_cpus / options.num_clusters 72 73 assert (options.num_l2caches % options.num_clusters == 0) 74 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters 75 76 l2_bits = int(math.log(num_l2caches_per_cluster, 2)) 77 block_size_bits = int(math.log(options.cacheline_size, 2)) 78 l2_index_start = block_size_bits + l2_bits 79 80 # 81 # Must create the individual controllers before the network to ensure the 82 # controller constructors are called before the network constructor 83 # 84 for i in xrange(options.num_clusters): 85 for j in xrange(num_cpus_per_cluster): 86 # 87 # First create the Ruby objects associated with this cpu 88 # 89 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True, 90 start_index_bit = block_size_bits, 91 replacement_policy = LRUReplacementPolicy()) 92 93 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False, 94 start_index_bit = block_size_bits, 95 replacement_policy = LRUReplacementPolicy()) 96
| 65 # 66 l0_cntrl_nodes = [] 67 l1_cntrl_nodes = [] 68 l2_cntrl_nodes = [] 69 dir_cntrl_nodes = [] 70 dma_cntrl_nodes = [] 71 72 assert (options.num_cpus % options.num_clusters == 0) 73 num_cpus_per_cluster = options.num_cpus / options.num_clusters 74 75 assert (options.num_l2caches % options.num_clusters == 0) 76 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters 77 78 l2_bits = int(math.log(num_l2caches_per_cluster, 2)) 79 block_size_bits = int(math.log(options.cacheline_size, 2)) 80 l2_index_start = block_size_bits + l2_bits 81 82 # 83 # Must create the individual controllers before the network to ensure the 84 # controller constructors are called before the network constructor 85 # 86 for i in xrange(options.num_clusters): 87 for j in xrange(num_cpus_per_cluster): 88 # 89 # First create the Ruby objects associated with this cpu 90 # 91 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True, 92 start_index_bit = block_size_bits, 93 replacement_policy = LRUReplacementPolicy()) 94 95 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False, 96 start_index_bit = block_size_bits, 97 replacement_policy = LRUReplacementPolicy()) 98
|
97 l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j, 98 Icache = l0i_cache, Dcache = l0d_cache, 99 send_evictions = send_evicts(options), 100 clk_domain=system.cpu[i].clk_domain, 101 ruby_system = ruby_system)
| 99 # the ruby random tester reuses num_cpus to specify the 100 # number of cpu ports connected to the tester object, which 101 # is stored in system.cpu. because there is only ever one 102 # tester object, num_cpus is not necessarily equal to the 103 # size of system.cpu; therefore if len(system.cpu) == 1 104 # we use system.cpu[0] to set the clk_domain, thereby ensuring 105 # we don't index off the end of the cpu list. 106 if len(system.cpu) == 1: 107 clk_domain = system.cpu[0].clk_domain 108 else: 109 clk_domain = system.cpu[i].clk_domain
|
102
| 110
|
| 111 l0_cntrl = L0Cache_Controller( 112 version = i * num_cpus_per_cluster + j, Icache = l0i_cache, 113 Dcache = l0d_cache, send_evictions = send_evicts(options), 114 clk_domain = clk_domain, ruby_system = ruby_system) 115
|
103 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
| 116 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
|
104 icache = l0i_cache, 105 clk_domain=system.cpu[i].clk_domain, 106 dcache = l0d_cache, ruby_system = ruby_system)
| 117 icache = l0i_cache, 118 clk_domain = clk_domain, 119 dcache = l0d_cache, 120 ruby_system = ruby_system)
|
107 108 l0_cntrl.sequencer = cpu_seq 109
| 121 122 l0_cntrl.sequencer = cpu_seq 123
|
110 l1_cache = L1Cache(size = options.l1d_size, assoc = options.l1d_assoc, 111 start_index_bit = block_size_bits, is_icache = False)
| 124 l1_cache = L1Cache(size = options.l1d_size, 125 assoc = options.l1d_assoc, 126 start_index_bit = block_size_bits, 127 is_icache = False)
|
112
| 128
|
113 l1_cntrl = L1Cache_Controller(version = i*num_cpus_per_cluster+j, 114 cache = l1_cache, l2_select_num_bits = l2_bits, 115 cluster_id = i, ruby_system = ruby_system)
| 129 l1_cntrl = L1Cache_Controller( 130 version = i * num_cpus_per_cluster + j, 131 cache = l1_cache, l2_select_num_bits = l2_bits, 132 cluster_id = i, ruby_system = ruby_system)
|
116
| 133
|
117 exec("ruby_system.l0_cntrl%d = l0_cntrl" % ( 118 i*num_cpus_per_cluster+j)) 119 exec("ruby_system.l1_cntrl%d = l1_cntrl" % ( 120 i*num_cpus_per_cluster+j))
| 134 exec("ruby_system.l0_cntrl%d = l0_cntrl" 135 % ( i * num_cpus_per_cluster + j)) 136 exec("ruby_system.l1_cntrl%d = l1_cntrl" 137 % ( i * num_cpus_per_cluster + j))
|
121 122 # 123 # Add controllers and sequencers to the appropriate lists 124 # 125 cpu_sequencers.append(cpu_seq) 126 l0_cntrl_nodes.append(l0_cntrl) 127 l1_cntrl_nodes.append(l1_cntrl) 128 129 # Connect the L0 and L1 controllers 130 l0_cntrl.mandatoryQueue = MessageBuffer() 131 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) 132 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 133 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) 134 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 135 136 # Connect the L1 controllers and the network 137 l1_cntrl.requestToL2 = MessageBuffer() 138 l1_cntrl.requestToL2.master = ruby_system.network.slave 139 l1_cntrl.responseToL2 = MessageBuffer() 140 l1_cntrl.responseToL2.master = ruby_system.network.slave 141 l1_cntrl.unblockToL2 = MessageBuffer() 142 l1_cntrl.unblockToL2.master = ruby_system.network.slave 143 144 l1_cntrl.requestFromL2 = MessageBuffer() 145 l1_cntrl.requestFromL2.slave = ruby_system.network.master 146 l1_cntrl.responseFromL2 = MessageBuffer() 147 l1_cntrl.responseFromL2.slave = ruby_system.network.master 148 149 150 for j in xrange(num_l2caches_per_cluster): 151 l2_cache = L2Cache(size = options.l2_size, 152 assoc = options.l2_assoc, 153 start_index_bit = l2_index_start) 154 155 l2_cntrl = L2Cache_Controller( 156 version = i * num_l2caches_per_cluster + j, 157 L2cache = l2_cache, cluster_id = i,
| 138 139 # 140 # Add controllers and sequencers to the appropriate lists 141 # 142 cpu_sequencers.append(cpu_seq) 143 l0_cntrl_nodes.append(l0_cntrl) 144 l1_cntrl_nodes.append(l1_cntrl) 145 146 # Connect the L0 and L1 controllers 147 l0_cntrl.mandatoryQueue = MessageBuffer() 148 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) 149 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 150 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) 151 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 152 153 # Connect the L1 controllers and the network 154 l1_cntrl.requestToL2 = MessageBuffer() 155 l1_cntrl.requestToL2.master = ruby_system.network.slave 156 l1_cntrl.responseToL2 = MessageBuffer() 157 l1_cntrl.responseToL2.master = ruby_system.network.slave 158 l1_cntrl.unblockToL2 = MessageBuffer() 159 l1_cntrl.unblockToL2.master = ruby_system.network.slave 160 161 l1_cntrl.requestFromL2 = MessageBuffer() 162 l1_cntrl.requestFromL2.slave = ruby_system.network.master 163 l1_cntrl.responseFromL2 = MessageBuffer() 164 l1_cntrl.responseFromL2.slave = ruby_system.network.master 165 166 167 for j in xrange(num_l2caches_per_cluster): 168 l2_cache = L2Cache(size = options.l2_size, 169 assoc = options.l2_assoc, 170 start_index_bit = l2_index_start) 171 172 l2_cntrl = L2Cache_Controller( 173 version = i * num_l2caches_per_cluster + j, 174 L2cache = l2_cache, cluster_id = i,
|
158 transitions_per_cycle=options.ports,
| 175 transitions_per_cycle = options.ports,
|
159 ruby_system = ruby_system) 160
| 176 ruby_system = ruby_system) 177
|
161 exec("ruby_system.l2_cntrl%d = l2_cntrl" % ( 162 i * num_l2caches_per_cluster + j))
| 178 exec("ruby_system.l2_cntrl%d = l2_cntrl" 179 % (i * num_l2caches_per_cluster + j))
|
163 l2_cntrl_nodes.append(l2_cntrl) 164 165 # Connect the L2 controllers and the network 166 l2_cntrl.DirRequestFromL2Cache = MessageBuffer() 167 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave 168 l2_cntrl.L1RequestFromL2Cache = MessageBuffer() 169 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave 170 l2_cntrl.responseFromL2Cache = MessageBuffer() 171 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave 172 173 l2_cntrl.unblockToL2Cache = MessageBuffer() 174 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master 175 l2_cntrl.L1RequestToL2Cache = MessageBuffer() 176 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master 177 l2_cntrl.responseToL2Cache = MessageBuffer() 178 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master 179 180 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 181 assert(phys_mem_size % options.num_dirs == 0) 182 mem_module_size = phys_mem_size / options.num_dirs 183 184 # Run each of the ruby memory controllers at a ratio of the frequency of 185 # the ruby system 186 # clk_divider value is a fix to pass regression. 187 ruby_system.memctrl_clk_domain = DerivedClockDomain(
| 180 l2_cntrl_nodes.append(l2_cntrl) 181 182 # Connect the L2 controllers and the network 183 l2_cntrl.DirRequestFromL2Cache = MessageBuffer() 184 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave 185 l2_cntrl.L1RequestFromL2Cache = MessageBuffer() 186 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave 187 l2_cntrl.responseFromL2Cache = MessageBuffer() 188 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave 189 190 l2_cntrl.unblockToL2Cache = MessageBuffer() 191 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master 192 l2_cntrl.L1RequestToL2Cache = MessageBuffer() 193 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master 194 l2_cntrl.responseToL2Cache = MessageBuffer() 195 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master 196 197 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 198 assert(phys_mem_size % options.num_dirs == 0) 199 mem_module_size = phys_mem_size / options.num_dirs 200 201 # Run each of the ruby memory controllers at a ratio of the frequency of 202 # the ruby system 203 # clk_divider value is a fix to pass regression. 204 ruby_system.memctrl_clk_domain = DerivedClockDomain(
|
188 clk_domain=ruby_system.clk_domain, 189 clk_divider=3)
| 205 clk_domain = ruby_system.clk_domain, clk_divider = 3)
|
190 191 for i in xrange(options.num_dirs): 192 # 193 # Create the Ruby objects associated with the directory controller 194 # 195 dir_size = MemorySize('0B') 196 dir_size.value = mem_module_size 197 198 dir_cntrl = Directory_Controller(version = i,
| 206 207 for i in xrange(options.num_dirs): 208 # 209 # Create the Ruby objects associated with the directory controller 210 # 211 dir_size = MemorySize('0B') 212 dir_size.value = mem_module_size 213 214 dir_cntrl = Directory_Controller(version = i,
|
199 directory = RubyDirectoryMemory( 200 version = i, size = dir_size), 201 transitions_per_cycle = options.ports, 202 ruby_system = ruby_system)
| 215 directory = RubyDirectoryMemory(version = i, size = dir_size), 216 transitions_per_cycle = options.ports, 217 ruby_system = ruby_system)
|
203 204 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 205 dir_cntrl_nodes.append(dir_cntrl) 206 207 # Connect the directory controllers and the network 208 dir_cntrl.requestToDir = MessageBuffer() 209 dir_cntrl.requestToDir.slave = ruby_system.network.master 210 dir_cntrl.responseToDir = MessageBuffer() 211 dir_cntrl.responseToDir.slave = ruby_system.network.master 212 dir_cntrl.responseFromDir = MessageBuffer() 213 dir_cntrl.responseFromDir.master = ruby_system.network.slave 214 dir_cntrl.responseFromMemory = MessageBuffer() 215 216 for i, dma_port in enumerate(dma_ports): 217 # 218 # Create the Ruby objects associated with the dma controller 219 #
| 218 219 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 220 dir_cntrl_nodes.append(dir_cntrl) 221 222 # Connect the directory controllers and the network 223 dir_cntrl.requestToDir = MessageBuffer() 224 dir_cntrl.requestToDir.slave = ruby_system.network.master 225 dir_cntrl.responseToDir = MessageBuffer() 226 dir_cntrl.responseToDir.slave = ruby_system.network.master 227 dir_cntrl.responseFromDir = MessageBuffer() 228 dir_cntrl.responseFromDir.master = ruby_system.network.slave 229 dir_cntrl.responseFromMemory = MessageBuffer() 230 231 for i, dma_port in enumerate(dma_ports): 232 # 233 # Create the Ruby objects associated with the dma controller 234 #
|
220 dma_seq = DMASequencer(version = i, 221 ruby_system = ruby_system)
| 235 dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
|
222 223 dma_cntrl = DMA_Controller(version = i, 224 dma_sequencer = dma_seq, 225 transitions_per_cycle = options.ports, 226 ruby_system = ruby_system) 227 228 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 229 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 230 dma_cntrl_nodes.append(dma_cntrl) 231 232 # Connect the dma controller to the network 233 dma_cntrl.mandatoryQueue = MessageBuffer() 234 dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 235 dma_cntrl.responseFromDir.slave = ruby_system.network.master 236 dma_cntrl.requestToDir = MessageBuffer() 237 dma_cntrl.requestToDir.master = ruby_system.network.slave 238 239 all_cntrls = l0_cntrl_nodes + \ 240 l1_cntrl_nodes + \ 241 l2_cntrl_nodes + \ 242 dir_cntrl_nodes + \ 243 dma_cntrl_nodes 244 245 # Create the io controller and the sequencer 246 if full_system: 247 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 248 ruby_system._io_port = io_seq 249 io_controller = DMA_Controller(version = len(dma_ports), 250 dma_sequencer = io_seq, 251 ruby_system = ruby_system) 252 ruby_system.io_controller = io_controller 253 254 # Connect the dma controller to the network 255 io_controller.mandatoryQueue = MessageBuffer() 256 io_controller.responseFromDir = MessageBuffer(ordered = True) 257 io_controller.responseFromDir.slave = ruby_system.network.master 258 io_controller.requestToDir = MessageBuffer() 259 io_controller.requestToDir.master = ruby_system.network.slave 260 261 all_cntrls = all_cntrls + [io_controller] 262 263 ruby_system.network.number_of_virtual_networks = 3 264 topology = create_topology(all_cntrls, options) 265 return (cpu_sequencers, dir_cntrl_nodes, topology)
| 236 237 dma_cntrl = DMA_Controller(version = i, 238 dma_sequencer = dma_seq, 239 transitions_per_cycle = options.ports, 240 ruby_system = ruby_system) 241 242 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 243 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 244 dma_cntrl_nodes.append(dma_cntrl) 245 246 # Connect the dma controller to the network 247 dma_cntrl.mandatoryQueue = MessageBuffer() 248 dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 249 dma_cntrl.responseFromDir.slave = ruby_system.network.master 250 dma_cntrl.requestToDir = MessageBuffer() 251 dma_cntrl.requestToDir.master = ruby_system.network.slave 252 253 all_cntrls = l0_cntrl_nodes + \ 254 l1_cntrl_nodes + \ 255 l2_cntrl_nodes + \ 256 dir_cntrl_nodes + \ 257 dma_cntrl_nodes 258 259 # Create the io controller and the sequencer 260 if full_system: 261 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 262 ruby_system._io_port = io_seq 263 io_controller = DMA_Controller(version = len(dma_ports), 264 dma_sequencer = io_seq, 265 ruby_system = ruby_system) 266 ruby_system.io_controller = io_controller 267 268 # Connect the dma controller to the network 269 io_controller.mandatoryQueue = MessageBuffer() 270 io_controller.responseFromDir = MessageBuffer(ordered = True) 271 io_controller.responseFromDir.slave = ruby_system.network.master 272 io_controller.requestToDir = MessageBuffer() 273 io_controller.requestToDir.master = ruby_system.network.slave 274 275 all_cntrls = all_cntrls + [io_controller] 276 277 ruby_system.network.number_of_virtual_networks = 3 278 topology = create_topology(all_cntrls, options) 279 return (cpu_sequencers, dir_cntrl_nodes, topology)
|