MESI_Three_Level.py (12976:125099a94768) MESI_Three_Level.py (13731:67cd980cb20f)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
3# Copyright (c) 2013 Mark D. Hill and David A. Wood
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met: redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer;
10# redistributions in binary form must reproduce the above copyright
11# notice, this list of conditions and the following disclaimer in the
12# documentation and/or other materials provided with the distribution;
13# neither the name of the copyright holders nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29# Authors: Brad Beckmann
30# Nilay Vaish
31
32import math
33import m5
34from m5.objects import *
35from m5.defines import buildEnv
36from Ruby import create_topology, create_directories
37from Ruby import send_evicts
38
39#
40# Declare caches used by the protocol
41#
42class L0Cache(RubyCache): pass
43class L1Cache(RubyCache): pass
44class L2Cache(RubyCache): pass
45
46def define_options(parser):
47 parser.add_option("--num-clusters", type = "int", default = 1,
48 help = "number of clusters in a design in which there are shared\
49 caches private to clusters")
50 return
51
52def create_system(options, full_system, system, dma_ports, bootmem,
53 ruby_system):
54
55 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
56 fatal("This script requires the MESI_Three_Level protocol to be\
57 built.")
58
59 cpu_sequencers = []
60
61 #
62 # The ruby network creation expects the list of nodes in the system to be
63 # consistent with the NetDest list. Therefore the l1 controller nodes
64 # must be listed before the directory nodes and directory nodes before
65 # dma nodes, etc.
66 #
67 l0_cntrl_nodes = []
68 l1_cntrl_nodes = []
69 l2_cntrl_nodes = []
70 dma_cntrl_nodes = []
71
72 assert (options.num_cpus % options.num_clusters == 0)
73 num_cpus_per_cluster = options.num_cpus / options.num_clusters
74
75 assert (options.num_l2caches % options.num_clusters == 0)
76 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
77
78 l2_bits = int(math.log(num_l2caches_per_cluster, 2))
79 block_size_bits = int(math.log(options.cacheline_size, 2))
80 l2_index_start = block_size_bits + l2_bits
81
82 #
83 # Must create the individual controllers before the network to ensure the
84 # controller constructors are called before the network constructor
85 #
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
3# Copyright (c) 2013 Mark D. Hill and David A. Wood
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met: redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer;
10# redistributions in binary form must reproduce the above copyright
11# notice, this list of conditions and the following disclaimer in the
12# documentation and/or other materials provided with the distribution;
13# neither the name of the copyright holders nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29# Authors: Brad Beckmann
30# Nilay Vaish
31
32import math
33import m5
34from m5.objects import *
35from m5.defines import buildEnv
36from Ruby import create_topology, create_directories
37from Ruby import send_evicts
38
39#
40# Declare caches used by the protocol
41#
42class L0Cache(RubyCache): pass
43class L1Cache(RubyCache): pass
44class L2Cache(RubyCache): pass
45
46def define_options(parser):
47 parser.add_option("--num-clusters", type = "int", default = 1,
48 help = "number of clusters in a design in which there are shared\
49 caches private to clusters")
50 return
51
52def create_system(options, full_system, system, dma_ports, bootmem,
53 ruby_system):
54
55 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
56 fatal("This script requires the MESI_Three_Level protocol to be\
57 built.")
58
59 cpu_sequencers = []
60
61 #
62 # The ruby network creation expects the list of nodes in the system to be
63 # consistent with the NetDest list. Therefore the l1 controller nodes
64 # must be listed before the directory nodes and directory nodes before
65 # dma nodes, etc.
66 #
67 l0_cntrl_nodes = []
68 l1_cntrl_nodes = []
69 l2_cntrl_nodes = []
70 dma_cntrl_nodes = []
71
72 assert (options.num_cpus % options.num_clusters == 0)
73 num_cpus_per_cluster = options.num_cpus / options.num_clusters
74
75 assert (options.num_l2caches % options.num_clusters == 0)
76 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
77
78 l2_bits = int(math.log(num_l2caches_per_cluster, 2))
79 block_size_bits = int(math.log(options.cacheline_size, 2))
80 l2_index_start = block_size_bits + l2_bits
81
82 #
83 # Must create the individual controllers before the network to ensure the
84 # controller constructors are called before the network constructor
85 #
86 for i in xrange(options.num_clusters):
87 for j in xrange(num_cpus_per_cluster):
86 for i in range(options.num_clusters):
87 for j in range(num_cpus_per_cluster):
88 #
89 # First create the Ruby objects associated with this cpu
90 #
91 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
92 start_index_bit = block_size_bits,
93 replacement_policy = LRUReplacementPolicy())
94
95 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
96 start_index_bit = block_size_bits,
97 replacement_policy = LRUReplacementPolicy())
98
99 # the ruby random tester reuses num_cpus to specify the
100 # number of cpu ports connected to the tester object, which
101 # is stored in system.cpu. because there is only ever one
102 # tester object, num_cpus is not necessarily equal to the
103 # size of system.cpu; therefore if len(system.cpu) == 1
104 # we use system.cpu[0] to set the clk_domain, thereby ensuring
105 # we don't index off the end of the cpu list.
106 if len(system.cpu) == 1:
107 clk_domain = system.cpu[0].clk_domain
108 else:
109 clk_domain = system.cpu[i].clk_domain
110
111 l0_cntrl = L0Cache_Controller(
112 version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
113 Dcache = l0d_cache, send_evictions = send_evicts(options),
114 clk_domain = clk_domain, ruby_system = ruby_system)
115
116 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
117 icache = l0i_cache,
118 clk_domain = clk_domain,
119 dcache = l0d_cache,
120 ruby_system = ruby_system)
121
122 l0_cntrl.sequencer = cpu_seq
123
124 l1_cache = L1Cache(size = options.l1d_size,
125 assoc = options.l1d_assoc,
126 start_index_bit = block_size_bits,
127 is_icache = False)
128
129 l1_cntrl = L1Cache_Controller(
130 version = i * num_cpus_per_cluster + j,
131 cache = l1_cache, l2_select_num_bits = l2_bits,
132 cluster_id = i, ruby_system = ruby_system)
133
134 exec("ruby_system.l0_cntrl%d = l0_cntrl"
135 % ( i * num_cpus_per_cluster + j))
136 exec("ruby_system.l1_cntrl%d = l1_cntrl"
137 % ( i * num_cpus_per_cluster + j))
138
139 #
140 # Add controllers and sequencers to the appropriate lists
141 #
142 cpu_sequencers.append(cpu_seq)
143 l0_cntrl_nodes.append(l0_cntrl)
144 l1_cntrl_nodes.append(l1_cntrl)
145
146 # Connect the L0 and L1 controllers
147 l0_cntrl.mandatoryQueue = MessageBuffer()
148 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
149 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
150 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
151 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
152
153 # Connect the L1 controllers and the network
154 l1_cntrl.requestToL2 = MessageBuffer()
155 l1_cntrl.requestToL2.master = ruby_system.network.slave
156 l1_cntrl.responseToL2 = MessageBuffer()
157 l1_cntrl.responseToL2.master = ruby_system.network.slave
158 l1_cntrl.unblockToL2 = MessageBuffer()
159 l1_cntrl.unblockToL2.master = ruby_system.network.slave
160
161 l1_cntrl.requestFromL2 = MessageBuffer()
162 l1_cntrl.requestFromL2.slave = ruby_system.network.master
163 l1_cntrl.responseFromL2 = MessageBuffer()
164 l1_cntrl.responseFromL2.slave = ruby_system.network.master
165
166
88 #
89 # First create the Ruby objects associated with this cpu
90 #
91 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
92 start_index_bit = block_size_bits,
93 replacement_policy = LRUReplacementPolicy())
94
95 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
96 start_index_bit = block_size_bits,
97 replacement_policy = LRUReplacementPolicy())
98
99 # the ruby random tester reuses num_cpus to specify the
100 # number of cpu ports connected to the tester object, which
101 # is stored in system.cpu. because there is only ever one
102 # tester object, num_cpus is not necessarily equal to the
103 # size of system.cpu; therefore if len(system.cpu) == 1
104 # we use system.cpu[0] to set the clk_domain, thereby ensuring
105 # we don't index off the end of the cpu list.
106 if len(system.cpu) == 1:
107 clk_domain = system.cpu[0].clk_domain
108 else:
109 clk_domain = system.cpu[i].clk_domain
110
111 l0_cntrl = L0Cache_Controller(
112 version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
113 Dcache = l0d_cache, send_evictions = send_evicts(options),
114 clk_domain = clk_domain, ruby_system = ruby_system)
115
116 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
117 icache = l0i_cache,
118 clk_domain = clk_domain,
119 dcache = l0d_cache,
120 ruby_system = ruby_system)
121
122 l0_cntrl.sequencer = cpu_seq
123
124 l1_cache = L1Cache(size = options.l1d_size,
125 assoc = options.l1d_assoc,
126 start_index_bit = block_size_bits,
127 is_icache = False)
128
129 l1_cntrl = L1Cache_Controller(
130 version = i * num_cpus_per_cluster + j,
131 cache = l1_cache, l2_select_num_bits = l2_bits,
132 cluster_id = i, ruby_system = ruby_system)
133
134 exec("ruby_system.l0_cntrl%d = l0_cntrl"
135 % ( i * num_cpus_per_cluster + j))
136 exec("ruby_system.l1_cntrl%d = l1_cntrl"
137 % ( i * num_cpus_per_cluster + j))
138
139 #
140 # Add controllers and sequencers to the appropriate lists
141 #
142 cpu_sequencers.append(cpu_seq)
143 l0_cntrl_nodes.append(l0_cntrl)
144 l1_cntrl_nodes.append(l1_cntrl)
145
146 # Connect the L0 and L1 controllers
147 l0_cntrl.mandatoryQueue = MessageBuffer()
148 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
149 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
150 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
151 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
152
153 # Connect the L1 controllers and the network
154 l1_cntrl.requestToL2 = MessageBuffer()
155 l1_cntrl.requestToL2.master = ruby_system.network.slave
156 l1_cntrl.responseToL2 = MessageBuffer()
157 l1_cntrl.responseToL2.master = ruby_system.network.slave
158 l1_cntrl.unblockToL2 = MessageBuffer()
159 l1_cntrl.unblockToL2.master = ruby_system.network.slave
160
161 l1_cntrl.requestFromL2 = MessageBuffer()
162 l1_cntrl.requestFromL2.slave = ruby_system.network.master
163 l1_cntrl.responseFromL2 = MessageBuffer()
164 l1_cntrl.responseFromL2.slave = ruby_system.network.master
165
166
167 for j in xrange(num_l2caches_per_cluster):
167 for j in range(num_l2caches_per_cluster):
168 l2_cache = L2Cache(size = options.l2_size,
169 assoc = options.l2_assoc,
170 start_index_bit = l2_index_start)
171
172 l2_cntrl = L2Cache_Controller(
173 version = i * num_l2caches_per_cluster + j,
174 L2cache = l2_cache, cluster_id = i,
175 transitions_per_cycle = options.ports,
176 ruby_system = ruby_system)
177
178 exec("ruby_system.l2_cntrl%d = l2_cntrl"
179 % (i * num_l2caches_per_cluster + j))
180 l2_cntrl_nodes.append(l2_cntrl)
181
182 # Connect the L2 controllers and the network
183 l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
184 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
185 l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
186 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
187 l2_cntrl.responseFromL2Cache = MessageBuffer()
188 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
189
190 l2_cntrl.unblockToL2Cache = MessageBuffer()
191 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
192 l2_cntrl.L1RequestToL2Cache = MessageBuffer()
193 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
194 l2_cntrl.responseToL2Cache = MessageBuffer()
195 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
196
197 # Run each of the ruby memory controllers at a ratio of the frequency of
198 # the ruby system
199 # clk_divider value is a fix to pass regression.
200 ruby_system.memctrl_clk_domain = DerivedClockDomain(
201 clk_domain = ruby_system.clk_domain, clk_divider = 3)
202
203 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
204 options, bootmem, ruby_system, system)
205 dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
206 if rom_dir_cntrl_node is not None:
207 dir_cntrl_nodes.append(rom_dir_cntrl_node)
208 for dir_cntrl in dir_cntrl_nodes:
209 # Connect the directory controllers and the network
210 dir_cntrl.requestToDir = MessageBuffer()
211 dir_cntrl.requestToDir.slave = ruby_system.network.master
212 dir_cntrl.responseToDir = MessageBuffer()
213 dir_cntrl.responseToDir.slave = ruby_system.network.master
214 dir_cntrl.responseFromDir = MessageBuffer()
215 dir_cntrl.responseFromDir.master = ruby_system.network.slave
216 dir_cntrl.responseFromMemory = MessageBuffer()
217
218 for i, dma_port in enumerate(dma_ports):
219 #
220 # Create the Ruby objects associated with the dma controller
221 #
222 dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
223
224 dma_cntrl = DMA_Controller(version = i,
225 dma_sequencer = dma_seq,
226 transitions_per_cycle = options.ports,
227 ruby_system = ruby_system)
228
229 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
230 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
231 dma_cntrl_nodes.append(dma_cntrl)
232
233 # Connect the dma controller to the network
234 dma_cntrl.mandatoryQueue = MessageBuffer()
235 dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
236 dma_cntrl.responseFromDir.slave = ruby_system.network.master
237 dma_cntrl.requestToDir = MessageBuffer()
238 dma_cntrl.requestToDir.master = ruby_system.network.slave
239
240 all_cntrls = l0_cntrl_nodes + \
241 l1_cntrl_nodes + \
242 l2_cntrl_nodes + \
243 dir_cntrl_nodes + \
244 dma_cntrl_nodes
245
246 # Create the io controller and the sequencer
247 if full_system:
248 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
249 ruby_system._io_port = io_seq
250 io_controller = DMA_Controller(version = len(dma_ports),
251 dma_sequencer = io_seq,
252 ruby_system = ruby_system)
253 ruby_system.io_controller = io_controller
254
255 # Connect the dma controller to the network
256 io_controller.mandatoryQueue = MessageBuffer()
257 io_controller.responseFromDir = MessageBuffer(ordered = True)
258 io_controller.responseFromDir.slave = ruby_system.network.master
259 io_controller.requestToDir = MessageBuffer()
260 io_controller.requestToDir.master = ruby_system.network.slave
261
262 all_cntrls = all_cntrls + [io_controller]
263
264 ruby_system.network.number_of_virtual_networks = 3
265 topology = create_topology(all_cntrls, options)
266 return (cpu_sequencers, mem_dir_cntrl_nodes, topology)
168 l2_cache = L2Cache(size = options.l2_size,
169 assoc = options.l2_assoc,
170 start_index_bit = l2_index_start)
171
172 l2_cntrl = L2Cache_Controller(
173 version = i * num_l2caches_per_cluster + j,
174 L2cache = l2_cache, cluster_id = i,
175 transitions_per_cycle = options.ports,
176 ruby_system = ruby_system)
177
178 exec("ruby_system.l2_cntrl%d = l2_cntrl"
179 % (i * num_l2caches_per_cluster + j))
180 l2_cntrl_nodes.append(l2_cntrl)
181
182 # Connect the L2 controllers and the network
183 l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
184 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
185 l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
186 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
187 l2_cntrl.responseFromL2Cache = MessageBuffer()
188 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
189
190 l2_cntrl.unblockToL2Cache = MessageBuffer()
191 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
192 l2_cntrl.L1RequestToL2Cache = MessageBuffer()
193 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
194 l2_cntrl.responseToL2Cache = MessageBuffer()
195 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
196
197 # Run each of the ruby memory controllers at a ratio of the frequency of
198 # the ruby system
199 # clk_divider value is a fix to pass regression.
200 ruby_system.memctrl_clk_domain = DerivedClockDomain(
201 clk_domain = ruby_system.clk_domain, clk_divider = 3)
202
203 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
204 options, bootmem, ruby_system, system)
205 dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
206 if rom_dir_cntrl_node is not None:
207 dir_cntrl_nodes.append(rom_dir_cntrl_node)
208 for dir_cntrl in dir_cntrl_nodes:
209 # Connect the directory controllers and the network
210 dir_cntrl.requestToDir = MessageBuffer()
211 dir_cntrl.requestToDir.slave = ruby_system.network.master
212 dir_cntrl.responseToDir = MessageBuffer()
213 dir_cntrl.responseToDir.slave = ruby_system.network.master
214 dir_cntrl.responseFromDir = MessageBuffer()
215 dir_cntrl.responseFromDir.master = ruby_system.network.slave
216 dir_cntrl.responseFromMemory = MessageBuffer()
217
218 for i, dma_port in enumerate(dma_ports):
219 #
220 # Create the Ruby objects associated with the dma controller
221 #
222 dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
223
224 dma_cntrl = DMA_Controller(version = i,
225 dma_sequencer = dma_seq,
226 transitions_per_cycle = options.ports,
227 ruby_system = ruby_system)
228
229 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
230 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
231 dma_cntrl_nodes.append(dma_cntrl)
232
233 # Connect the dma controller to the network
234 dma_cntrl.mandatoryQueue = MessageBuffer()
235 dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
236 dma_cntrl.responseFromDir.slave = ruby_system.network.master
237 dma_cntrl.requestToDir = MessageBuffer()
238 dma_cntrl.requestToDir.master = ruby_system.network.slave
239
240 all_cntrls = l0_cntrl_nodes + \
241 l1_cntrl_nodes + \
242 l2_cntrl_nodes + \
243 dir_cntrl_nodes + \
244 dma_cntrl_nodes
245
246 # Create the io controller and the sequencer
247 if full_system:
248 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
249 ruby_system._io_port = io_seq
250 io_controller = DMA_Controller(version = len(dma_ports),
251 dma_sequencer = io_seq,
252 ruby_system = ruby_system)
253 ruby_system.io_controller = io_controller
254
255 # Connect the dma controller to the network
256 io_controller.mandatoryQueue = MessageBuffer()
257 io_controller.responseFromDir = MessageBuffer(ordered = True)
258 io_controller.responseFromDir.slave = ruby_system.network.master
259 io_controller.requestToDir = MessageBuffer()
260 io_controller.requestToDir.master = ruby_system.network.slave
261
262 all_cntrls = all_cntrls + [io_controller]
263
264 ruby_system.network.number_of_virtual_networks = 3
265 topology = create_topology(all_cntrls, options)
266 return (cpu_sequencers, mem_dir_cntrl_nodes, topology)