MOESI_hammer.py (10519:7a3ad4b09ce4) MOESI_hammer.py (10524:fff17530cef6)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35
36#
37# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38#
39class L1Cache(RubyCache):
40 latency = 2
41
42#
43# Note: the L2 Cache latency is not currently used
44#
45class L2Cache(RubyCache):
46 latency = 10
47
48#
49# Probe filter is a cache, latency is not used
50#
51class ProbeFilter(RubyCache):
52 latency = 1
53
54def define_options(parser):
55 parser.add_option("--allow-atomic-migration", action="store_true",
56 help="allow migratory sharing for atomic only accessed blocks")
57 parser.add_option("--pf-on", action="store_true",
58 help="Hammer: enable Probe Filter")
59 parser.add_option("--dir-on", action="store_true",
60 help="Hammer: enable Full-bit Directory")
61
62def create_system(options, full_system, system, dma_ports, ruby_system):
63
64 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
65 panic("This script requires the MOESI_hammer protocol to be built.")
66
67 cpu_sequencers = []
68
69 #
70 # The ruby network creation expects the list of nodes in the system to be
71 # consistent with the NetDest list. Therefore the l1 controller nodes must be
72 # listed before the directory nodes and directory nodes before dma nodes, etc.
73 #
74 l1_cntrl_nodes = []
75 dir_cntrl_nodes = []
76 dma_cntrl_nodes = []
77
78 #
79 # Must create the individual controllers before the network to ensure the
80 # controller constructors are called before the network constructor
81 #
82 block_size_bits = int(math.log(options.cacheline_size, 2))
83
84 for i in xrange(options.num_cpus):
85 #
86 # First create the Ruby objects associated with this cpu
87 #
88 l1i_cache = L1Cache(size = options.l1i_size,
89 assoc = options.l1i_assoc,
90 start_index_bit = block_size_bits,
91 is_icache = True)
92 l1d_cache = L1Cache(size = options.l1d_size,
93 assoc = options.l1d_assoc,
94 start_index_bit = block_size_bits)
95 l2_cache = L2Cache(size = options.l2_size,
96 assoc = options.l2_assoc,
97 start_index_bit = block_size_bits)
98
99 l1_cntrl = L1Cache_Controller(version = i,
100 L1Icache = l1i_cache,
101 L1Dcache = l1d_cache,
102 L2cache = l2_cache,
103 no_mig_atomic = not \
104 options.allow_atomic_migration,
105 send_evictions = (
106 options.cpu_type == "detailed"),
107 transitions_per_cycle = options.ports,
108 clk_domain=system.cpu[i].clk_domain,
109 ruby_system = ruby_system)
110
111 cpu_seq = RubySequencer(version = i,
112 icache = l1i_cache,
113 dcache = l1d_cache,
114 clk_domain=system.cpu[i].clk_domain,
115 ruby_system = ruby_system)
116
117 l1_cntrl.sequencer = cpu_seq
118 if options.recycle_latency:
119 l1_cntrl.recycle_latency = options.recycle_latency
120
121 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
122
123 # Add controllers and sequencers to the appropriate lists
124 cpu_sequencers.append(cpu_seq)
125 l1_cntrl_nodes.append(l1_cntrl)
126
127 # Connect the L1 controller and the network
128 # Connect the buffers from the controller to network
129 l1_cntrl.requestFromCache = ruby_system.network.slave
130 l1_cntrl.responseFromCache = ruby_system.network.slave
131 l1_cntrl.unblockFromCache = ruby_system.network.slave
132
133 # Connect the buffers from the network to the controller
134 l1_cntrl.forwardToCache = ruby_system.network.master
135 l1_cntrl.responseToCache = ruby_system.network.master
136
137
138 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
139 assert(phys_mem_size % options.num_dirs == 0)
140 mem_module_size = phys_mem_size / options.num_dirs
141
142 #
143 # determine size and index bits for probe filter
144 # By default, the probe filter size is configured to be twice the
145 # size of the L2 cache.
146 #
147 pf_size = MemorySize(options.l2_size)
148 pf_size.value = pf_size.value * 2
149 dir_bits = int(math.log(options.num_dirs, 2))
150 pf_bits = int(math.log(pf_size.value, 2))
151 if options.numa_high_bit:
152 if options.pf_on or options.dir_on:
153 # if numa high bit explicitly set, make sure it does not overlap
154 # with the probe filter index
155 assert(options.numa_high_bit - dir_bits > pf_bits)
156
157 # set the probe filter start bit to just above the block offset
158 pf_start_bit = block_size_bits
159 else:
160 if dir_bits > 0:
161 pf_start_bit = dir_bits + block_size_bits - 1
162 else:
163 pf_start_bit = block_size_bits
164
165 # Run each of the ruby memory controllers at a ratio of the frequency of
166 # the ruby system
167 # clk_divider value is a fix to pass regression.
168 ruby_system.memctrl_clk_domain = DerivedClockDomain(
169 clk_domain=ruby_system.clk_domain,
170 clk_divider=3)
171
172 for i in xrange(options.num_dirs):
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35
36#
37# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38#
39class L1Cache(RubyCache):
40 latency = 2
41
42#
43# Note: the L2 Cache latency is not currently used
44#
45class L2Cache(RubyCache):
46 latency = 10
47
48#
49# Probe filter is a cache, latency is not used
50#
51class ProbeFilter(RubyCache):
52 latency = 1
53
54def define_options(parser):
55 parser.add_option("--allow-atomic-migration", action="store_true",
56 help="allow migratory sharing for atomic only accessed blocks")
57 parser.add_option("--pf-on", action="store_true",
58 help="Hammer: enable Probe Filter")
59 parser.add_option("--dir-on", action="store_true",
60 help="Hammer: enable Full-bit Directory")
61
62def create_system(options, full_system, system, dma_ports, ruby_system):
63
64 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
65 panic("This script requires the MOESI_hammer protocol to be built.")
66
67 cpu_sequencers = []
68
69 #
70 # The ruby network creation expects the list of nodes in the system to be
71 # consistent with the NetDest list. Therefore the l1 controller nodes must be
72 # listed before the directory nodes and directory nodes before dma nodes, etc.
73 #
74 l1_cntrl_nodes = []
75 dir_cntrl_nodes = []
76 dma_cntrl_nodes = []
77
78 #
79 # Must create the individual controllers before the network to ensure the
80 # controller constructors are called before the network constructor
81 #
82 block_size_bits = int(math.log(options.cacheline_size, 2))
83
84 for i in xrange(options.num_cpus):
85 #
86 # First create the Ruby objects associated with this cpu
87 #
88 l1i_cache = L1Cache(size = options.l1i_size,
89 assoc = options.l1i_assoc,
90 start_index_bit = block_size_bits,
91 is_icache = True)
92 l1d_cache = L1Cache(size = options.l1d_size,
93 assoc = options.l1d_assoc,
94 start_index_bit = block_size_bits)
95 l2_cache = L2Cache(size = options.l2_size,
96 assoc = options.l2_assoc,
97 start_index_bit = block_size_bits)
98
99 l1_cntrl = L1Cache_Controller(version = i,
100 L1Icache = l1i_cache,
101 L1Dcache = l1d_cache,
102 L2cache = l2_cache,
103 no_mig_atomic = not \
104 options.allow_atomic_migration,
105 send_evictions = (
106 options.cpu_type == "detailed"),
107 transitions_per_cycle = options.ports,
108 clk_domain=system.cpu[i].clk_domain,
109 ruby_system = ruby_system)
110
111 cpu_seq = RubySequencer(version = i,
112 icache = l1i_cache,
113 dcache = l1d_cache,
114 clk_domain=system.cpu[i].clk_domain,
115 ruby_system = ruby_system)
116
117 l1_cntrl.sequencer = cpu_seq
118 if options.recycle_latency:
119 l1_cntrl.recycle_latency = options.recycle_latency
120
121 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
122
123 # Add controllers and sequencers to the appropriate lists
124 cpu_sequencers.append(cpu_seq)
125 l1_cntrl_nodes.append(l1_cntrl)
126
127 # Connect the L1 controller and the network
128 # Connect the buffers from the controller to network
129 l1_cntrl.requestFromCache = ruby_system.network.slave
130 l1_cntrl.responseFromCache = ruby_system.network.slave
131 l1_cntrl.unblockFromCache = ruby_system.network.slave
132
133 # Connect the buffers from the network to the controller
134 l1_cntrl.forwardToCache = ruby_system.network.master
135 l1_cntrl.responseToCache = ruby_system.network.master
136
137
138 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
139 assert(phys_mem_size % options.num_dirs == 0)
140 mem_module_size = phys_mem_size / options.num_dirs
141
142 #
143 # determine size and index bits for probe filter
144 # By default, the probe filter size is configured to be twice the
145 # size of the L2 cache.
146 #
147 pf_size = MemorySize(options.l2_size)
148 pf_size.value = pf_size.value * 2
149 dir_bits = int(math.log(options.num_dirs, 2))
150 pf_bits = int(math.log(pf_size.value, 2))
151 if options.numa_high_bit:
152 if options.pf_on or options.dir_on:
153 # if numa high bit explicitly set, make sure it does not overlap
154 # with the probe filter index
155 assert(options.numa_high_bit - dir_bits > pf_bits)
156
157 # set the probe filter start bit to just above the block offset
158 pf_start_bit = block_size_bits
159 else:
160 if dir_bits > 0:
161 pf_start_bit = dir_bits + block_size_bits - 1
162 else:
163 pf_start_bit = block_size_bits
164
165 # Run each of the ruby memory controllers at a ratio of the frequency of
166 # the ruby system
167 # clk_divider value is a fix to pass regression.
168 ruby_system.memctrl_clk_domain = DerivedClockDomain(
169 clk_domain=ruby_system.clk_domain,
170 clk_divider=3)
171
172 for i in xrange(options.num_dirs):
173 #
174 # Create the Ruby objects associated with the directory controller
175 #
176
177 mem_cntrl = RubyMemoryControl(
178 clk_domain = ruby_system.memctrl_clk_domain,
179 version = i,
180 ruby_system = ruby_system)
181
182 dir_size = MemorySize('0B')
183 dir_size.value = mem_module_size
184
185 pf = ProbeFilter(size = pf_size, assoc = 4,
186 start_index_bit = pf_start_bit)
187
188 dir_cntrl = Directory_Controller(version = i,
173 dir_size = MemorySize('0B')
174 dir_size.value = mem_module_size
175
176 pf = ProbeFilter(size = pf_size, assoc = 4,
177 start_index_bit = pf_start_bit)
178
179 dir_cntrl = Directory_Controller(version = i,
189 directory = \
190 RubyDirectoryMemory( \
191 version = i,
192 size = dir_size,
193 use_map = options.use_map,
194 map_levels = \
195 options.map_levels,
196 numa_high_bit = \
197 options.numa_high_bit),
180 directory = RubyDirectoryMemory(
181 version = i, size = dir_size),
198 probeFilter = pf,
182 probeFilter = pf,
199 memBuffer = mem_cntrl,
200 probe_filter_enabled = options.pf_on,
201 full_bit_dir_enabled = options.dir_on,
202 transitions_per_cycle = options.ports,
203 ruby_system = ruby_system)
204
205 if options.recycle_latency:
206 dir_cntrl.recycle_latency = options.recycle_latency
207
208 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
209 dir_cntrl_nodes.append(dir_cntrl)
210
211 # Connect the directory controller to the network
212 dir_cntrl.forwardFromDir = ruby_system.network.slave
213 dir_cntrl.responseFromDir = ruby_system.network.slave
214 dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
215
216 dir_cntrl.unblockToDir = ruby_system.network.master
217 dir_cntrl.responseToDir = ruby_system.network.master
218 dir_cntrl.requestToDir = ruby_system.network.master
219 dir_cntrl.dmaRequestToDir = ruby_system.network.master
220
221
222 for i, dma_port in enumerate(dma_ports):
223 #
224 # Create the Ruby objects associated with the dma controller
225 #
226 dma_seq = DMASequencer(version = i,
227 ruby_system = ruby_system,
228 slave = dma_port)
229
230 dma_cntrl = DMA_Controller(version = i,
231 dma_sequencer = dma_seq,
232 transitions_per_cycle = options.ports,
233 ruby_system = ruby_system)
234
235 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
236 dma_cntrl_nodes.append(dma_cntrl)
237
238 if options.recycle_latency:
239 dma_cntrl.recycle_latency = options.recycle_latency
240
241 # Connect the dma controller to the network
242 dma_cntrl.responseFromDir = ruby_system.network.master
243 dma_cntrl.requestToDir = ruby_system.network.slave
244
245 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
246
247 # Create the io controller and the sequencer
248 if full_system:
249 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
250 ruby_system._io_port = io_seq
251 io_controller = DMA_Controller(version = len(dma_ports),
252 dma_sequencer = io_seq,
253 ruby_system = ruby_system)
254 ruby_system.io_controller = io_controller
255
256 # Connect the dma controller to the network
257 io_controller.responseFromDir = ruby_system.network.master
258 io_controller.requestToDir = ruby_system.network.slave
259
260 all_cntrls = all_cntrls + [io_controller]
261
262 topology = create_topology(all_cntrls, options)
263 return (cpu_sequencers, dir_cntrl_nodes, topology)
183 probe_filter_enabled = options.pf_on,
184 full_bit_dir_enabled = options.dir_on,
185 transitions_per_cycle = options.ports,
186 ruby_system = ruby_system)
187
188 if options.recycle_latency:
189 dir_cntrl.recycle_latency = options.recycle_latency
190
191 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
192 dir_cntrl_nodes.append(dir_cntrl)
193
194 # Connect the directory controller to the network
195 dir_cntrl.forwardFromDir = ruby_system.network.slave
196 dir_cntrl.responseFromDir = ruby_system.network.slave
197 dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
198
199 dir_cntrl.unblockToDir = ruby_system.network.master
200 dir_cntrl.responseToDir = ruby_system.network.master
201 dir_cntrl.requestToDir = ruby_system.network.master
202 dir_cntrl.dmaRequestToDir = ruby_system.network.master
203
204
205 for i, dma_port in enumerate(dma_ports):
206 #
207 # Create the Ruby objects associated with the dma controller
208 #
209 dma_seq = DMASequencer(version = i,
210 ruby_system = ruby_system,
211 slave = dma_port)
212
213 dma_cntrl = DMA_Controller(version = i,
214 dma_sequencer = dma_seq,
215 transitions_per_cycle = options.ports,
216 ruby_system = ruby_system)
217
218 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
219 dma_cntrl_nodes.append(dma_cntrl)
220
221 if options.recycle_latency:
222 dma_cntrl.recycle_latency = options.recycle_latency
223
224 # Connect the dma controller to the network
225 dma_cntrl.responseFromDir = ruby_system.network.master
226 dma_cntrl.requestToDir = ruby_system.network.slave
227
228 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
229
230 # Create the io controller and the sequencer
231 if full_system:
232 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
233 ruby_system._io_port = io_seq
234 io_controller = DMA_Controller(version = len(dma_ports),
235 dma_sequencer = io_seq,
236 ruby_system = ruby_system)
237 ruby_system.io_controller = io_controller
238
239 # Connect the dma controller to the network
240 io_controller.responseFromDir = ruby_system.network.master
241 io_controller.requestToDir = ruby_system.network.slave
242
243 all_cntrls = all_cntrls + [io_controller]
244
245 topology = create_topology(all_cntrls, options)
246 return (cpu_sequencers, dir_cntrl_nodes, topology)