MOESI_hammer.py (8931:7a1dfb191e3f) MOESI_hammer.py (9100:3caf131d7a95)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
61def create_system(options, system, piobus, dma_ports, ruby_system):
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
81 block_size_bits = int(math.log(options.cacheline_size, 2))
82
83 cntrl_count = 0
84
85 for i in xrange(options.num_cpus):
86 #
87 # First create the Ruby objects associated with this cpu
88 #
89 l1i_cache = L1Cache(size = options.l1i_size,
90 assoc = options.l1i_assoc,
91 start_index_bit = block_size_bits,
92 is_icache = True)
93 l1d_cache = L1Cache(size = options.l1d_size,
94 assoc = options.l1d_assoc,
95 start_index_bit = block_size_bits)
96 l2_cache = L2Cache(size = options.l2_size,
97 assoc = options.l2_assoc,
98 start_index_bit = block_size_bits)
99
100 l1_cntrl = L1Cache_Controller(version = i,
101 cntrl_id = cntrl_count,
102 L1IcacheMemory = l1i_cache,
103 L1DcacheMemory = l1d_cache,
104 L2cacheMemory = l2_cache,
105 no_mig_atomic = not \
106 options.allow_atomic_migration,
107 send_evictions = (
108 options.cpu_type == "detailed"),
109 ruby_system = ruby_system)
110
111 cpu_seq = RubySequencer(version = i,
112 icache = l1i_cache,
113 dcache = l1d_cache,
114 ruby_system = ruby_system)
115
116 l1_cntrl.sequencer = cpu_seq
117
118 if piobus != None:
119 cpu_seq.pio_port = piobus.slave
120
121 if options.recycle_latency:
122 l1_cntrl.recycle_latency = options.recycle_latency
123
124 exec("system.l1_cntrl%d = l1_cntrl" % i)
125 #
126 # Add controllers and sequencers to the appropriate lists
127 #
128 cpu_sequencers.append(cpu_seq)
129 l1_cntrl_nodes.append(l1_cntrl)
130
131 cntrl_count += 1
132
133 phys_mem_size = 0
134 for mem in system.memories.unproxy(system):
135 phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
136 mem_module_size = phys_mem_size / options.num_dirs
137
138 #
139 # determine size and index bits for probe filter
140 # By default, the probe filter size is configured to be twice the
141 # size of the L2 cache.
142 #
143 pf_size = MemorySize(options.l2_size)
144 pf_size.value = pf_size.value * 2
145 dir_bits = int(math.log(options.num_dirs, 2))
146 pf_bits = int(math.log(pf_size.value, 2))
147 if options.numa_high_bit:
148 if options.numa_high_bit > 0:
149 # if numa high bit explicitly set, make sure it does not overlap
150 # with the probe filter index
151 assert(options.numa_high_bit - dir_bits > pf_bits)
152
153 # set the probe filter start bit to just above the block offset
154 pf_start_bit = 6
155 else:
156 if dir_bits > 0:
157 pf_start_bit = dir_bits + 5
158 else:
159 pf_start_bit = 6
160
161 for i in xrange(options.num_dirs):
162 #
163 # Create the Ruby objects associated with the directory controller
164 #
165
166 mem_cntrl = RubyMemoryControl(version = i)
167
168 dir_size = MemorySize('0B')
169 dir_size.value = mem_module_size
170
171 pf = ProbeFilter(size = pf_size, assoc = 4,
172 start_index_bit = pf_start_bit)
173
174 dir_cntrl = Directory_Controller(version = i,
175 cntrl_id = cntrl_count,
176 directory = \
177 RubyDirectoryMemory( \
178 version = i,
179 size = dir_size,
180 use_map = options.use_map,
181 map_levels = \
182 options.map_levels,
183 numa_high_bit = \
184 options.numa_high_bit),
185 probeFilter = pf,
186 memBuffer = mem_cntrl,
187 probe_filter_enabled = options.pf_on,
188 full_bit_dir_enabled = options.dir_on,
189 ruby_system = ruby_system)
190
191 if options.recycle_latency:
192 dir_cntrl.recycle_latency = options.recycle_latency
193
194 exec("system.dir_cntrl%d = dir_cntrl" % i)
195 dir_cntrl_nodes.append(dir_cntrl)
196
197 cntrl_count += 1
198
199 for i, dma_port in enumerate(dma_ports):
200 #
201 # Create the Ruby objects associated with the dma controller
202 #
203 dma_seq = DMASequencer(version = i,
204 ruby_system = ruby_system)
205
206 dma_cntrl = DMA_Controller(version = i,
207 cntrl_id = cntrl_count,
208 dma_sequencer = dma_seq,
209 ruby_system = ruby_system)
210
211 exec("system.dma_cntrl%d = dma_cntrl" % i)
212 exec("system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
213 dma_cntrl_nodes.append(dma_cntrl)
214
215 if options.recycle_latency:
216 dma_cntrl.recycle_latency = options.recycle_latency
217
218 cntrl_count += 1
219
220 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
221
35
36#
37# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38#
39class L1Cache(RubyCache):
40 latency = 2
41
42#
43# Note: the L2 Cache latency is not currently used
44#
45class L2Cache(RubyCache):
46 latency = 10
47
48#
49# Probe filter is a cache, latency is not used
50#
51class ProbeFilter(RubyCache):
52 latency = 1
53
54def define_options(parser):
55 parser.add_option("--allow-atomic-migration", action="store_true",
56 help="allow migratory sharing for atomic only accessed blocks")
57 parser.add_option("--pf-on", action="store_true",
58 help="Hammer: enable Probe Filter")
59 parser.add_option("--dir-on", action="store_true",
60 help="Hammer: enable Full-bit Directory")
61
62def create_system(options, system, piobus, dma_ports, ruby_system):
63
64 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
65 panic("This script requires the MOESI_hammer protocol to be built.")
66
67 cpu_sequencers = []
68
69 #
70 # The ruby network creation expects the list of nodes in the system to be
71 # consistent with the NetDest list. Therefore the l1 controller nodes must be
72 # listed before the directory nodes and directory nodes before dma nodes, etc.
73 #
74 l1_cntrl_nodes = []
75 dir_cntrl_nodes = []
76 dma_cntrl_nodes = []
77
78 #
79 # Must create the individual controllers before the network to ensure the
80 # controller constructors are called before the network constructor
81 #
82 block_size_bits = int(math.log(options.cacheline_size, 2))
83
84 cntrl_count = 0
85
86 for i in xrange(options.num_cpus):
87 #
88 # First create the Ruby objects associated with this cpu
89 #
90 l1i_cache = L1Cache(size = options.l1i_size,
91 assoc = options.l1i_assoc,
92 start_index_bit = block_size_bits,
93 is_icache = True)
94 l1d_cache = L1Cache(size = options.l1d_size,
95 assoc = options.l1d_assoc,
96 start_index_bit = block_size_bits)
97 l2_cache = L2Cache(size = options.l2_size,
98 assoc = options.l2_assoc,
99 start_index_bit = block_size_bits)
100
101 l1_cntrl = L1Cache_Controller(version = i,
102 cntrl_id = cntrl_count,
103 L1IcacheMemory = l1i_cache,
104 L1DcacheMemory = l1d_cache,
105 L2cacheMemory = l2_cache,
106 no_mig_atomic = not \
107 options.allow_atomic_migration,
108 send_evictions = (
109 options.cpu_type == "detailed"),
110 ruby_system = ruby_system)
111
112 cpu_seq = RubySequencer(version = i,
113 icache = l1i_cache,
114 dcache = l1d_cache,
115 ruby_system = ruby_system)
116
117 l1_cntrl.sequencer = cpu_seq
118
119 if piobus != None:
120 cpu_seq.pio_port = piobus.slave
121
122 if options.recycle_latency:
123 l1_cntrl.recycle_latency = options.recycle_latency
124
125 exec("system.l1_cntrl%d = l1_cntrl" % i)
126 #
127 # Add controllers and sequencers to the appropriate lists
128 #
129 cpu_sequencers.append(cpu_seq)
130 l1_cntrl_nodes.append(l1_cntrl)
131
132 cntrl_count += 1
133
134 phys_mem_size = 0
135 for mem in system.memories.unproxy(system):
136 phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
137 mem_module_size = phys_mem_size / options.num_dirs
138
139 #
140 # determine size and index bits for probe filter
141 # By default, the probe filter size is configured to be twice the
142 # size of the L2 cache.
143 #
144 pf_size = MemorySize(options.l2_size)
145 pf_size.value = pf_size.value * 2
146 dir_bits = int(math.log(options.num_dirs, 2))
147 pf_bits = int(math.log(pf_size.value, 2))
148 if options.numa_high_bit:
149 if options.numa_high_bit > 0:
150 # if numa high bit explicitly set, make sure it does not overlap
151 # with the probe filter index
152 assert(options.numa_high_bit - dir_bits > pf_bits)
153
154 # set the probe filter start bit to just above the block offset
155 pf_start_bit = 6
156 else:
157 if dir_bits > 0:
158 pf_start_bit = dir_bits + 5
159 else:
160 pf_start_bit = 6
161
162 for i in xrange(options.num_dirs):
163 #
164 # Create the Ruby objects associated with the directory controller
165 #
166
167 mem_cntrl = RubyMemoryControl(version = i)
168
169 dir_size = MemorySize('0B')
170 dir_size.value = mem_module_size
171
172 pf = ProbeFilter(size = pf_size, assoc = 4,
173 start_index_bit = pf_start_bit)
174
175 dir_cntrl = Directory_Controller(version = i,
176 cntrl_id = cntrl_count,
177 directory = \
178 RubyDirectoryMemory( \
179 version = i,
180 size = dir_size,
181 use_map = options.use_map,
182 map_levels = \
183 options.map_levels,
184 numa_high_bit = \
185 options.numa_high_bit),
186 probeFilter = pf,
187 memBuffer = mem_cntrl,
188 probe_filter_enabled = options.pf_on,
189 full_bit_dir_enabled = options.dir_on,
190 ruby_system = ruby_system)
191
192 if options.recycle_latency:
193 dir_cntrl.recycle_latency = options.recycle_latency
194
195 exec("system.dir_cntrl%d = dir_cntrl" % i)
196 dir_cntrl_nodes.append(dir_cntrl)
197
198 cntrl_count += 1
199
200 for i, dma_port in enumerate(dma_ports):
201 #
202 # Create the Ruby objects associated with the dma controller
203 #
204 dma_seq = DMASequencer(version = i,
205 ruby_system = ruby_system)
206
207 dma_cntrl = DMA_Controller(version = i,
208 cntrl_id = cntrl_count,
209 dma_sequencer = dma_seq,
210 ruby_system = ruby_system)
211
212 exec("system.dma_cntrl%d = dma_cntrl" % i)
213 exec("system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
214 dma_cntrl_nodes.append(dma_cntrl)
215
216 if options.recycle_latency:
217 dma_cntrl.recycle_latency = options.recycle_latency
218
219 cntrl_count += 1
220
221 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
222
222 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
223 topology = create_topology(all_cntrls, options)
224
225 return (cpu_sequencers, dir_cntrl_nodes, topology)