MOESI_hammer.py (9826:014ff1fbff6d) MOESI_hammer.py (9841:69c158420c51)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35
36#
37# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38#
39class L1Cache(RubyCache):
40 latency = 2
41
42#
43# Note: the L2 Cache latency is not currently used
44#
45class L2Cache(RubyCache):
46 latency = 10
47
48#
49# Probe filter is a cache, latency is not used
50#
51class ProbeFilter(RubyCache):
52 latency = 1
53
54def define_options(parser):
55 parser.add_option("--allow-atomic-migration", action="store_true",
56 help="allow migratory sharing for atomic only accessed blocks")
57 parser.add_option("--pf-on", action="store_true",
58 help="Hammer: enable Probe Filter")
59 parser.add_option("--dir-on", action="store_true",
60 help="Hammer: enable Full-bit Directory")
61
62def create_system(options, system, piobus, dma_ports, ruby_system):
63
64 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
65 panic("This script requires the MOESI_hammer protocol to be built.")
66
67 cpu_sequencers = []
68
69 #
70 # The ruby network creation expects the list of nodes in the system to be
71 # consistent with the NetDest list. Therefore the l1 controller nodes must be
72 # listed before the directory nodes and directory nodes before dma nodes, etc.
73 #
74 l1_cntrl_nodes = []
75 dir_cntrl_nodes = []
76 dma_cntrl_nodes = []
77
78 #
79 # Must create the individual controllers before the network to ensure the
80 # controller constructors are called before the network constructor
81 #
82 block_size_bits = int(math.log(options.cacheline_size, 2))
83
84 cntrl_count = 0
85
86 for i in xrange(options.num_cpus):
87 #
88 # First create the Ruby objects associated with this cpu
89 #
90 l1i_cache = L1Cache(size = options.l1i_size,
91 assoc = options.l1i_assoc,
92 start_index_bit = block_size_bits,
93 is_icache = True)
94 l1d_cache = L1Cache(size = options.l1d_size,
95 assoc = options.l1d_assoc,
96 start_index_bit = block_size_bits)
97 l2_cache = L2Cache(size = options.l2_size,
98 assoc = options.l2_assoc,
99 start_index_bit = block_size_bits)
100
101 l1_cntrl = L1Cache_Controller(version = i,
102 cntrl_id = cntrl_count,
103 L1Icache = l1i_cache,
104 L1Dcache = l1d_cache,
105 L2cache = l2_cache,
106 no_mig_atomic = not \
107 options.allow_atomic_migration,
108 send_evictions = (
109 options.cpu_type == "detailed"),
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35
36#
37# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38#
39class L1Cache(RubyCache):
40 latency = 2
41
42#
43# Note: the L2 Cache latency is not currently used
44#
45class L2Cache(RubyCache):
46 latency = 10
47
48#
49# Probe filter is a cache, latency is not used
50#
51class ProbeFilter(RubyCache):
52 latency = 1
53
54def define_options(parser):
55 parser.add_option("--allow-atomic-migration", action="store_true",
56 help="allow migratory sharing for atomic only accessed blocks")
57 parser.add_option("--pf-on", action="store_true",
58 help="Hammer: enable Probe Filter")
59 parser.add_option("--dir-on", action="store_true",
60 help="Hammer: enable Full-bit Directory")
61
62def create_system(options, system, piobus, dma_ports, ruby_system):
63
64 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
65 panic("This script requires the MOESI_hammer protocol to be built.")
66
67 cpu_sequencers = []
68
69 #
70 # The ruby network creation expects the list of nodes in the system to be
71 # consistent with the NetDest list. Therefore the l1 controller nodes must be
72 # listed before the directory nodes and directory nodes before dma nodes, etc.
73 #
74 l1_cntrl_nodes = []
75 dir_cntrl_nodes = []
76 dma_cntrl_nodes = []
77
78 #
79 # Must create the individual controllers before the network to ensure the
80 # controller constructors are called before the network constructor
81 #
82 block_size_bits = int(math.log(options.cacheline_size, 2))
83
84 cntrl_count = 0
85
86 for i in xrange(options.num_cpus):
87 #
88 # First create the Ruby objects associated with this cpu
89 #
90 l1i_cache = L1Cache(size = options.l1i_size,
91 assoc = options.l1i_assoc,
92 start_index_bit = block_size_bits,
93 is_icache = True)
94 l1d_cache = L1Cache(size = options.l1d_size,
95 assoc = options.l1d_assoc,
96 start_index_bit = block_size_bits)
97 l2_cache = L2Cache(size = options.l2_size,
98 assoc = options.l2_assoc,
99 start_index_bit = block_size_bits)
100
101 l1_cntrl = L1Cache_Controller(version = i,
102 cntrl_id = cntrl_count,
103 L1Icache = l1i_cache,
104 L1Dcache = l1d_cache,
105 L2cache = l2_cache,
106 no_mig_atomic = not \
107 options.allow_atomic_migration,
108 send_evictions = (
109 options.cpu_type == "detailed"),
110 transitions_per_cycle = options.ports,
110 ruby_system = ruby_system)
111
112 cpu_seq = RubySequencer(version = i,
113 icache = l1i_cache,
114 dcache = l1d_cache,
115 ruby_system = ruby_system)
116
117 l1_cntrl.sequencer = cpu_seq
118
119 if piobus != None:
120 cpu_seq.pio_port = piobus.slave
121
122 if options.recycle_latency:
123 l1_cntrl.recycle_latency = options.recycle_latency
124
125 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
126 #
127 # Add controllers and sequencers to the appropriate lists
128 #
129 cpu_sequencers.append(cpu_seq)
130 l1_cntrl_nodes.append(l1_cntrl)
131
132 cntrl_count += 1
133
134 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
135 assert(phys_mem_size % options.num_dirs == 0)
136 mem_module_size = phys_mem_size / options.num_dirs
137
138 #
139 # determine size and index bits for probe filter
140 # By default, the probe filter size is configured to be twice the
141 # size of the L2 cache.
142 #
143 pf_size = MemorySize(options.l2_size)
144 pf_size.value = pf_size.value * 2
145 dir_bits = int(math.log(options.num_dirs, 2))
146 pf_bits = int(math.log(pf_size.value, 2))
147 if options.numa_high_bit:
148 if options.pf_on or options.dir_on:
149 # if numa high bit explicitly set, make sure it does not overlap
150 # with the probe filter index
151 assert(options.numa_high_bit - dir_bits > pf_bits)
152
153 # set the probe filter start bit to just above the block offset
154 pf_start_bit = block_size_bits
155 else:
156 if dir_bits > 0:
157 pf_start_bit = dir_bits + block_size_bits - 1
158 else:
159 pf_start_bit = block_size_bits
160
161 # Run each of the ruby memory controllers at a ratio of the frequency of
162 # the ruby system
163 # clk_divider value is a fix to pass regression.
164 ruby_system.memctrl_clk_domain = DerivedClockDomain(
165 clk_domain=ruby_system.clk_domain,
166 clk_divider=3)
167
168 for i in xrange(options.num_dirs):
169 #
170 # Create the Ruby objects associated with the directory controller
171 #
172
173 mem_cntrl = RubyMemoryControl(
174 clk_domain = ruby_system.memctrl_clk_domain,
175 version = i,
176 ruby_system = ruby_system)
177
178 dir_size = MemorySize('0B')
179 dir_size.value = mem_module_size
180
181 pf = ProbeFilter(size = pf_size, assoc = 4,
182 start_index_bit = pf_start_bit)
183
184 dir_cntrl = Directory_Controller(version = i,
185 cntrl_id = cntrl_count,
186 directory = \
187 RubyDirectoryMemory( \
188 version = i,
189 size = dir_size,
190 use_map = options.use_map,
191 map_levels = \
192 options.map_levels,
193 numa_high_bit = \
194 options.numa_high_bit),
195 probeFilter = pf,
196 memBuffer = mem_cntrl,
197 probe_filter_enabled = options.pf_on,
198 full_bit_dir_enabled = options.dir_on,
111 ruby_system = ruby_system)
112
113 cpu_seq = RubySequencer(version = i,
114 icache = l1i_cache,
115 dcache = l1d_cache,
116 ruby_system = ruby_system)
117
118 l1_cntrl.sequencer = cpu_seq
119
120 if piobus != None:
121 cpu_seq.pio_port = piobus.slave
122
123 if options.recycle_latency:
124 l1_cntrl.recycle_latency = options.recycle_latency
125
126 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
127 #
128 # Add controllers and sequencers to the appropriate lists
129 #
130 cpu_sequencers.append(cpu_seq)
131 l1_cntrl_nodes.append(l1_cntrl)
132
133 cntrl_count += 1
134
135 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
136 assert(phys_mem_size % options.num_dirs == 0)
137 mem_module_size = phys_mem_size / options.num_dirs
138
139 #
140 # determine size and index bits for probe filter
141 # By default, the probe filter size is configured to be twice the
142 # size of the L2 cache.
143 #
144 pf_size = MemorySize(options.l2_size)
145 pf_size.value = pf_size.value * 2
146 dir_bits = int(math.log(options.num_dirs, 2))
147 pf_bits = int(math.log(pf_size.value, 2))
148 if options.numa_high_bit:
149 if options.pf_on or options.dir_on:
150 # if numa high bit explicitly set, make sure it does not overlap
151 # with the probe filter index
152 assert(options.numa_high_bit - dir_bits > pf_bits)
153
154 # set the probe filter start bit to just above the block offset
155 pf_start_bit = block_size_bits
156 else:
157 if dir_bits > 0:
158 pf_start_bit = dir_bits + block_size_bits - 1
159 else:
160 pf_start_bit = block_size_bits
161
162 # Run each of the ruby memory controllers at a ratio of the frequency of
163 # the ruby system
164 # clk_divider value is a fix to pass regression.
165 ruby_system.memctrl_clk_domain = DerivedClockDomain(
166 clk_domain=ruby_system.clk_domain,
167 clk_divider=3)
168
169 for i in xrange(options.num_dirs):
170 #
171 # Create the Ruby objects associated with the directory controller
172 #
173
174 mem_cntrl = RubyMemoryControl(
175 clk_domain = ruby_system.memctrl_clk_domain,
176 version = i,
177 ruby_system = ruby_system)
178
179 dir_size = MemorySize('0B')
180 dir_size.value = mem_module_size
181
182 pf = ProbeFilter(size = pf_size, assoc = 4,
183 start_index_bit = pf_start_bit)
184
185 dir_cntrl = Directory_Controller(version = i,
186 cntrl_id = cntrl_count,
187 directory = \
188 RubyDirectoryMemory( \
189 version = i,
190 size = dir_size,
191 use_map = options.use_map,
192 map_levels = \
193 options.map_levels,
194 numa_high_bit = \
195 options.numa_high_bit),
196 probeFilter = pf,
197 memBuffer = mem_cntrl,
198 probe_filter_enabled = options.pf_on,
199 full_bit_dir_enabled = options.dir_on,
200 transitions_per_cycle = options.ports,
199 ruby_system = ruby_system)
200
201 if options.recycle_latency:
202 dir_cntrl.recycle_latency = options.recycle_latency
203
204 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
205 dir_cntrl_nodes.append(dir_cntrl)
206
207 cntrl_count += 1
208
209 for i, dma_port in enumerate(dma_ports):
210 #
211 # Create the Ruby objects associated with the dma controller
212 #
213 dma_seq = DMASequencer(version = i,
214 ruby_system = ruby_system)
215
216 dma_cntrl = DMA_Controller(version = i,
217 cntrl_id = cntrl_count,
218 dma_sequencer = dma_seq,
201 ruby_system = ruby_system)
202
203 if options.recycle_latency:
204 dir_cntrl.recycle_latency = options.recycle_latency
205
206 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
207 dir_cntrl_nodes.append(dir_cntrl)
208
209 cntrl_count += 1
210
211 for i, dma_port in enumerate(dma_ports):
212 #
213 # Create the Ruby objects associated with the dma controller
214 #
215 dma_seq = DMASequencer(version = i,
216 ruby_system = ruby_system)
217
218 dma_cntrl = DMA_Controller(version = i,
219 cntrl_id = cntrl_count,
220 dma_sequencer = dma_seq,
221 transitions_per_cycle = options.ports,
219 ruby_system = ruby_system)
220
221 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
222 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
223 dma_cntrl_nodes.append(dma_cntrl)
224
225 if options.recycle_latency:
226 dma_cntrl.recycle_latency = options.recycle_latency
227
228 cntrl_count += 1
229
230 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
231
232 topology = create_topology(all_cntrls, options)
233
234 return (cpu_sequencers, dir_cntrl_nodes, topology)
222 ruby_system = ruby_system)
223
224 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
225 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
226 dma_cntrl_nodes.append(dma_cntrl)
227
228 if options.recycle_latency:
229 dma_cntrl.recycle_latency = options.recycle_latency
230
231 cntrl_count += 1
232
233 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
234
235 topology = create_topology(all_cntrls, options)
236
237 return (cpu_sequencers, dir_cntrl_nodes, topology)