MESI_Three_Level.py (13951:b8ec67ca5e42) MESI_Three_Level.py (13980:62a28c423e91)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
3# Copyright (c) 2013 Mark D. Hill and David A. Wood
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met: redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer;
10# redistributions in binary form must reproduce the above copyright
11# notice, this list of conditions and the following disclaimer in the
12# documentation and/or other materials provided with the distribution;
13# neither the name of the copyright holders nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29# Authors: Brad Beckmann
30# Nilay Vaish
31
32import math
33import m5
34from m5.objects import *
35from m5.defines import buildEnv
36from Ruby import create_topology, create_directories
37from Ruby import send_evicts
38from common import FileSystemConfig
39
40#
41# Declare caches used by the protocol
42#
43class L0Cache(RubyCache): pass
44class L1Cache(RubyCache): pass
45class L2Cache(RubyCache): pass
46
47def define_options(parser):
48 parser.add_option("--num-clusters", type = "int", default = 1,
49 help = "number of clusters in a design in which there are shared\
50 caches private to clusters")
51 return
52
53def create_system(options, full_system, system, dma_ports, bootmem,
54 ruby_system):
55
56 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
57 fatal("This script requires the MESI_Three_Level protocol to be\
58 built.")
59
60 cpu_sequencers = []
61
62 #
63 # The ruby network creation expects the list of nodes in the system to be
64 # consistent with the NetDest list. Therefore the l1 controller nodes
65 # must be listed before the directory nodes and directory nodes before
66 # dma nodes, etc.
67 #
68 l0_cntrl_nodes = []
69 l1_cntrl_nodes = []
70 l2_cntrl_nodes = []
71 dma_cntrl_nodes = []
72
73 assert (options.num_cpus % options.num_clusters == 0)
74 num_cpus_per_cluster = options.num_cpus / options.num_clusters
75
76 assert (options.num_l2caches % options.num_clusters == 0)
77 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
78
79 l2_bits = int(math.log(num_l2caches_per_cluster, 2))
80 block_size_bits = int(math.log(options.cacheline_size, 2))
81 l2_index_start = block_size_bits + l2_bits
82
83 #
84 # Must create the individual controllers before the network to ensure the
85 # controller constructors are called before the network constructor
86 #
87 for i in range(options.num_clusters):
88 for j in range(num_cpus_per_cluster):
89 #
90 # First create the Ruby objects associated with this cpu
91 #
92 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
93 start_index_bit = block_size_bits,
94 replacement_policy = LRUReplacementPolicy())
95
96 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
97 start_index_bit = block_size_bits,
98 replacement_policy = LRUReplacementPolicy())
99
100 # the ruby random tester reuses num_cpus to specify the
101 # number of cpu ports connected to the tester object, which
102 # is stored in system.cpu. because there is only ever one
103 # tester object, num_cpus is not necessarily equal to the
104 # size of system.cpu; therefore if len(system.cpu) == 1
105 # we use system.cpu[0] to set the clk_domain, thereby ensuring
106 # we don't index off the end of the cpu list.
107 if len(system.cpu) == 1:
108 clk_domain = system.cpu[0].clk_domain
109 else:
110 clk_domain = system.cpu[i].clk_domain
111
112 l0_cntrl = L0Cache_Controller(
113 version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
114 Dcache = l0d_cache, send_evictions = send_evicts(options),
115 clk_domain = clk_domain, ruby_system = ruby_system)
116
117 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
118 icache = l0i_cache,
119 clk_domain = clk_domain,
120 dcache = l0d_cache,
121 ruby_system = ruby_system)
122
123 l0_cntrl.sequencer = cpu_seq
124
125 l1_cache = L1Cache(size = options.l1d_size,
126 assoc = options.l1d_assoc,
127 start_index_bit = block_size_bits,
128 is_icache = False)
129
130 l1_cntrl = L1Cache_Controller(
131 version = i * num_cpus_per_cluster + j,
132 cache = l1_cache, l2_select_num_bits = l2_bits,
133 cluster_id = i, ruby_system = ruby_system)
134
135 exec("ruby_system.l0_cntrl%d = l0_cntrl"
136 % ( i * num_cpus_per_cluster + j))
137 exec("ruby_system.l1_cntrl%d = l1_cntrl"
138 % ( i * num_cpus_per_cluster + j))
139
140 #
141 # Add controllers and sequencers to the appropriate lists
142 #
143 cpu_sequencers.append(cpu_seq)
144 l0_cntrl_nodes.append(l0_cntrl)
145 l1_cntrl_nodes.append(l1_cntrl)
146
147 # Connect the L0 and L1 controllers
148 l0_cntrl.mandatoryQueue = MessageBuffer()
149 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
150 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
151 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
152 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
153
154 # Connect the L1 controllers and the network
155 l1_cntrl.requestToL2 = MessageBuffer()
156 l1_cntrl.requestToL2.master = ruby_system.network.slave
157 l1_cntrl.responseToL2 = MessageBuffer()
158 l1_cntrl.responseToL2.master = ruby_system.network.slave
159 l1_cntrl.unblockToL2 = MessageBuffer()
160 l1_cntrl.unblockToL2.master = ruby_system.network.slave
161
162 l1_cntrl.requestFromL2 = MessageBuffer()
163 l1_cntrl.requestFromL2.slave = ruby_system.network.master
164 l1_cntrl.responseFromL2 = MessageBuffer()
165 l1_cntrl.responseFromL2.slave = ruby_system.network.master
166
167
168 for j in range(num_l2caches_per_cluster):
169 l2_cache = L2Cache(size = options.l2_size,
170 assoc = options.l2_assoc,
171 start_index_bit = l2_index_start)
172
173 l2_cntrl = L2Cache_Controller(
174 version = i * num_l2caches_per_cluster + j,
175 L2cache = l2_cache, cluster_id = i,
176 transitions_per_cycle = options.ports,
177 ruby_system = ruby_system)
178
179 exec("ruby_system.l2_cntrl%d = l2_cntrl"
180 % (i * num_l2caches_per_cluster + j))
181 l2_cntrl_nodes.append(l2_cntrl)
182
183 # Connect the L2 controllers and the network
184 l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
185 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
186 l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
187 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
188 l2_cntrl.responseFromL2Cache = MessageBuffer()
189 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
190
191 l2_cntrl.unblockToL2Cache = MessageBuffer()
192 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
193 l2_cntrl.L1RequestToL2Cache = MessageBuffer()
194 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
195 l2_cntrl.responseToL2Cache = MessageBuffer()
196 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
197
198 # Run each of the ruby memory controllers at a ratio of the frequency of
199 # the ruby system
200 # clk_divider value is a fix to pass regression.
201 ruby_system.memctrl_clk_domain = DerivedClockDomain(
202 clk_domain = ruby_system.clk_domain, clk_divider = 3)
203
204 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
205 options, bootmem, ruby_system, system)
206 dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
207 if rom_dir_cntrl_node is not None:
208 dir_cntrl_nodes.append(rom_dir_cntrl_node)
209 for dir_cntrl in dir_cntrl_nodes:
210 # Connect the directory controllers and the network
211 dir_cntrl.requestToDir = MessageBuffer()
212 dir_cntrl.requestToDir.slave = ruby_system.network.master
213 dir_cntrl.responseToDir = MessageBuffer()
214 dir_cntrl.responseToDir.slave = ruby_system.network.master
215 dir_cntrl.responseFromDir = MessageBuffer()
216 dir_cntrl.responseFromDir.master = ruby_system.network.slave
217 dir_cntrl.responseFromMemory = MessageBuffer()
218
219 for i, dma_port in enumerate(dma_ports):
220 #
221 # Create the Ruby objects associated with the dma controller
222 #
223 dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
224
225 dma_cntrl = DMA_Controller(version = i,
226 dma_sequencer = dma_seq,
227 transitions_per_cycle = options.ports,
228 ruby_system = ruby_system)
229
230 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
231 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
232 dma_cntrl_nodes.append(dma_cntrl)
233
234 # Connect the dma controller to the network
235 dma_cntrl.mandatoryQueue = MessageBuffer()
236 dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
237 dma_cntrl.responseFromDir.slave = ruby_system.network.master
238 dma_cntrl.requestToDir = MessageBuffer()
239 dma_cntrl.requestToDir.master = ruby_system.network.slave
240
241 all_cntrls = l0_cntrl_nodes + \
242 l1_cntrl_nodes + \
243 l2_cntrl_nodes + \
244 dir_cntrl_nodes + \
245 dma_cntrl_nodes
246
247 # Create the io controller and the sequencer
248 if full_system:
249 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
250 ruby_system._io_port = io_seq
251 io_controller = DMA_Controller(version = len(dma_ports),
252 dma_sequencer = io_seq,
253 ruby_system = ruby_system)
254 ruby_system.io_controller = io_controller
255
256 # Connect the dma controller to the network
257 io_controller.mandatoryQueue = MessageBuffer()
258 io_controller.responseFromDir = MessageBuffer(ordered = True)
259 io_controller.responseFromDir.slave = ruby_system.network.master
260 io_controller.requestToDir = MessageBuffer()
261 io_controller.requestToDir.master = ruby_system.network.slave
262
263 all_cntrls = all_cntrls + [io_controller]
264 # Register configuration with filesystem
265 else:
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
3# Copyright (c) 2013 Mark D. Hill and David A. Wood
4# All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met: redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer;
10# redistributions in binary form must reproduce the above copyright
11# notice, this list of conditions and the following disclaimer in the
12# documentation and/or other materials provided with the distribution;
13# neither the name of the copyright holders nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28#
29# Authors: Brad Beckmann
30# Nilay Vaish
31
32import math
33import m5
34from m5.objects import *
35from m5.defines import buildEnv
36from Ruby import create_topology, create_directories
37from Ruby import send_evicts
38from common import FileSystemConfig
39
40#
41# Declare caches used by the protocol
42#
43class L0Cache(RubyCache): pass
44class L1Cache(RubyCache): pass
45class L2Cache(RubyCache): pass
46
47def define_options(parser):
48 parser.add_option("--num-clusters", type = "int", default = 1,
49 help = "number of clusters in a design in which there are shared\
50 caches private to clusters")
51 return
52
53def create_system(options, full_system, system, dma_ports, bootmem,
54 ruby_system):
55
56 if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
57 fatal("This script requires the MESI_Three_Level protocol to be\
58 built.")
59
60 cpu_sequencers = []
61
62 #
63 # The ruby network creation expects the list of nodes in the system to be
64 # consistent with the NetDest list. Therefore the l1 controller nodes
65 # must be listed before the directory nodes and directory nodes before
66 # dma nodes, etc.
67 #
68 l0_cntrl_nodes = []
69 l1_cntrl_nodes = []
70 l2_cntrl_nodes = []
71 dma_cntrl_nodes = []
72
73 assert (options.num_cpus % options.num_clusters == 0)
74 num_cpus_per_cluster = options.num_cpus / options.num_clusters
75
76 assert (options.num_l2caches % options.num_clusters == 0)
77 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
78
79 l2_bits = int(math.log(num_l2caches_per_cluster, 2))
80 block_size_bits = int(math.log(options.cacheline_size, 2))
81 l2_index_start = block_size_bits + l2_bits
82
83 #
84 # Must create the individual controllers before the network to ensure the
85 # controller constructors are called before the network constructor
86 #
87 for i in range(options.num_clusters):
88 for j in range(num_cpus_per_cluster):
89 #
90 # First create the Ruby objects associated with this cpu
91 #
92 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
93 start_index_bit = block_size_bits,
94 replacement_policy = LRUReplacementPolicy())
95
96 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
97 start_index_bit = block_size_bits,
98 replacement_policy = LRUReplacementPolicy())
99
100 # the ruby random tester reuses num_cpus to specify the
101 # number of cpu ports connected to the tester object, which
102 # is stored in system.cpu. because there is only ever one
103 # tester object, num_cpus is not necessarily equal to the
104 # size of system.cpu; therefore if len(system.cpu) == 1
105 # we use system.cpu[0] to set the clk_domain, thereby ensuring
106 # we don't index off the end of the cpu list.
107 if len(system.cpu) == 1:
108 clk_domain = system.cpu[0].clk_domain
109 else:
110 clk_domain = system.cpu[i].clk_domain
111
112 l0_cntrl = L0Cache_Controller(
113 version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
114 Dcache = l0d_cache, send_evictions = send_evicts(options),
115 clk_domain = clk_domain, ruby_system = ruby_system)
116
117 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
118 icache = l0i_cache,
119 clk_domain = clk_domain,
120 dcache = l0d_cache,
121 ruby_system = ruby_system)
122
123 l0_cntrl.sequencer = cpu_seq
124
125 l1_cache = L1Cache(size = options.l1d_size,
126 assoc = options.l1d_assoc,
127 start_index_bit = block_size_bits,
128 is_icache = False)
129
130 l1_cntrl = L1Cache_Controller(
131 version = i * num_cpus_per_cluster + j,
132 cache = l1_cache, l2_select_num_bits = l2_bits,
133 cluster_id = i, ruby_system = ruby_system)
134
135 exec("ruby_system.l0_cntrl%d = l0_cntrl"
136 % ( i * num_cpus_per_cluster + j))
137 exec("ruby_system.l1_cntrl%d = l1_cntrl"
138 % ( i * num_cpus_per_cluster + j))
139
140 #
141 # Add controllers and sequencers to the appropriate lists
142 #
143 cpu_sequencers.append(cpu_seq)
144 l0_cntrl_nodes.append(l0_cntrl)
145 l1_cntrl_nodes.append(l1_cntrl)
146
147 # Connect the L0 and L1 controllers
148 l0_cntrl.mandatoryQueue = MessageBuffer()
149 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
150 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
151 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
152 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
153
154 # Connect the L1 controllers and the network
155 l1_cntrl.requestToL2 = MessageBuffer()
156 l1_cntrl.requestToL2.master = ruby_system.network.slave
157 l1_cntrl.responseToL2 = MessageBuffer()
158 l1_cntrl.responseToL2.master = ruby_system.network.slave
159 l1_cntrl.unblockToL2 = MessageBuffer()
160 l1_cntrl.unblockToL2.master = ruby_system.network.slave
161
162 l1_cntrl.requestFromL2 = MessageBuffer()
163 l1_cntrl.requestFromL2.slave = ruby_system.network.master
164 l1_cntrl.responseFromL2 = MessageBuffer()
165 l1_cntrl.responseFromL2.slave = ruby_system.network.master
166
167
168 for j in range(num_l2caches_per_cluster):
169 l2_cache = L2Cache(size = options.l2_size,
170 assoc = options.l2_assoc,
171 start_index_bit = l2_index_start)
172
173 l2_cntrl = L2Cache_Controller(
174 version = i * num_l2caches_per_cluster + j,
175 L2cache = l2_cache, cluster_id = i,
176 transitions_per_cycle = options.ports,
177 ruby_system = ruby_system)
178
179 exec("ruby_system.l2_cntrl%d = l2_cntrl"
180 % (i * num_l2caches_per_cluster + j))
181 l2_cntrl_nodes.append(l2_cntrl)
182
183 # Connect the L2 controllers and the network
184 l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
185 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
186 l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
187 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
188 l2_cntrl.responseFromL2Cache = MessageBuffer()
189 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
190
191 l2_cntrl.unblockToL2Cache = MessageBuffer()
192 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
193 l2_cntrl.L1RequestToL2Cache = MessageBuffer()
194 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
195 l2_cntrl.responseToL2Cache = MessageBuffer()
196 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
197
198 # Run each of the ruby memory controllers at a ratio of the frequency of
199 # the ruby system
200 # clk_divider value is a fix to pass regression.
201 ruby_system.memctrl_clk_domain = DerivedClockDomain(
202 clk_domain = ruby_system.clk_domain, clk_divider = 3)
203
204 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
205 options, bootmem, ruby_system, system)
206 dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
207 if rom_dir_cntrl_node is not None:
208 dir_cntrl_nodes.append(rom_dir_cntrl_node)
209 for dir_cntrl in dir_cntrl_nodes:
210 # Connect the directory controllers and the network
211 dir_cntrl.requestToDir = MessageBuffer()
212 dir_cntrl.requestToDir.slave = ruby_system.network.master
213 dir_cntrl.responseToDir = MessageBuffer()
214 dir_cntrl.responseToDir.slave = ruby_system.network.master
215 dir_cntrl.responseFromDir = MessageBuffer()
216 dir_cntrl.responseFromDir.master = ruby_system.network.slave
217 dir_cntrl.responseFromMemory = MessageBuffer()
218
219 for i, dma_port in enumerate(dma_ports):
220 #
221 # Create the Ruby objects associated with the dma controller
222 #
223 dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
224
225 dma_cntrl = DMA_Controller(version = i,
226 dma_sequencer = dma_seq,
227 transitions_per_cycle = options.ports,
228 ruby_system = ruby_system)
229
230 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
231 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
232 dma_cntrl_nodes.append(dma_cntrl)
233
234 # Connect the dma controller to the network
235 dma_cntrl.mandatoryQueue = MessageBuffer()
236 dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
237 dma_cntrl.responseFromDir.slave = ruby_system.network.master
238 dma_cntrl.requestToDir = MessageBuffer()
239 dma_cntrl.requestToDir.master = ruby_system.network.slave
240
241 all_cntrls = l0_cntrl_nodes + \
242 l1_cntrl_nodes + \
243 l2_cntrl_nodes + \
244 dir_cntrl_nodes + \
245 dma_cntrl_nodes
246
247 # Create the io controller and the sequencer
248 if full_system:
249 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
250 ruby_system._io_port = io_seq
251 io_controller = DMA_Controller(version = len(dma_ports),
252 dma_sequencer = io_seq,
253 ruby_system = ruby_system)
254 ruby_system.io_controller = io_controller
255
256 # Connect the dma controller to the network
257 io_controller.mandatoryQueue = MessageBuffer()
258 io_controller.responseFromDir = MessageBuffer(ordered = True)
259 io_controller.responseFromDir.slave = ruby_system.network.master
260 io_controller.requestToDir = MessageBuffer()
261 io_controller.requestToDir.master = ruby_system.network.slave
262
263 all_cntrls = all_cntrls + [io_controller]
264 # Register configuration with filesystem
265 else:
266 FileSystemConfig.config_filesystem(options)
267
268 for i in xrange(options.num_clusters):
269 for j in xrange(num_cpus_per_cluster):
270 FileSystemConfig.register_cpu(physical_package_id = 0,
271 core_siblings = xrange(options.num_cpus),
272 core_id = i*num_cpus_per_cluster+j,
273 thread_siblings = [])
274
275 FileSystemConfig.register_cache(level = 0,
276 idu_type = 'Instruction',
277 size = '4096B',
278 line_size = options.cacheline_size,
279 assoc = 1,
280 cpus = [i*num_cpus_per_cluster+j])
281 FileSystemConfig.register_cache(level = 0,
282 idu_type = 'Data',
283 size = '4096B',
284 line_size = options.cacheline_size,
285 assoc = 1,
286 cpus = [i*num_cpus_per_cluster+j])
287
288 FileSystemConfig.register_cache(level = 1,
289 idu_type = 'Unified',
290 size = options.l1d_size,
291 line_size = options.cacheline_size,
292 assoc = options.l1d_assoc,
293 cpus = [i*num_cpus_per_cluster+j])
294
295 FileSystemConfig.register_cache(level = 2,
296 idu_type = 'Unified',
297 size = str(MemorySize(options.l2_size) * \
298 num_l2caches_per_cluster)+'B',
299 line_size = options.cacheline_size,
300 assoc = options.l2_assoc,
301 cpus = [n for n in xrange(i*num_cpus_per_cluster, \
302 (i+1)*num_cpus_per_cluster)])
303
304 ruby_system.network.number_of_virtual_networks = 3
305 topology = create_topology(all_cntrls, options)
306 return (cpu_sequencers, mem_dir_cntrl_nodes, topology)
266 for i in xrange(options.num_clusters):
267 for j in xrange(num_cpus_per_cluster):
268 FileSystemConfig.register_cpu(physical_package_id = 0,
269 core_siblings = xrange(options.num_cpus),
270 core_id = i*num_cpus_per_cluster+j,
271 thread_siblings = [])
272
273 FileSystemConfig.register_cache(level = 0,
274 idu_type = 'Instruction',
275 size = '4096B',
276 line_size = options.cacheline_size,
277 assoc = 1,
278 cpus = [i*num_cpus_per_cluster+j])
279 FileSystemConfig.register_cache(level = 0,
280 idu_type = 'Data',
281 size = '4096B',
282 line_size = options.cacheline_size,
283 assoc = 1,
284 cpus = [i*num_cpus_per_cluster+j])
285
286 FileSystemConfig.register_cache(level = 1,
287 idu_type = 'Unified',
288 size = options.l1d_size,
289 line_size = options.cacheline_size,
290 assoc = options.l1d_assoc,
291 cpus = [i*num_cpus_per_cluster+j])
292
293 FileSystemConfig.register_cache(level = 2,
294 idu_type = 'Unified',
295 size = str(MemorySize(options.l2_size) * \
296 num_l2caches_per_cluster)+'B',
297 line_size = options.cacheline_size,
298 assoc = options.l2_assoc,
299 cpus = [n for n in xrange(i*num_cpus_per_cluster, \
300 (i+1)*num_cpus_per_cluster)])
301
302 ruby_system.network.number_of_virtual_networks = 3
303 topology = create_topology(all_cntrls, options)
304 return (cpu_sequencers, mem_dir_cntrl_nodes, topology)