MOESI_hammer.py (8923:820111f58fbb) MOESI_hammer.py (8929:4148f9af0b70)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
61def create_system(options, system, piobus, dma_devices, ruby_system):
61def create_system(options, system, piobus, dma_ports, ruby_system):
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
81 block_size_bits = int(math.log(options.cacheline_size, 2))
82
83 cntrl_count = 0
84
85 for i in xrange(options.num_cpus):
86 #
87 # First create the Ruby objects associated with this cpu
88 #
89 l1i_cache = L1Cache(size = options.l1i_size,
90 assoc = options.l1i_assoc,
91 start_index_bit = block_size_bits,
92 is_icache = True)
93 l1d_cache = L1Cache(size = options.l1d_size,
94 assoc = options.l1d_assoc,
95 start_index_bit = block_size_bits)
96 l2_cache = L2Cache(size = options.l2_size,
97 assoc = options.l2_assoc,
98 start_index_bit = block_size_bits)
99
100 l1_cntrl = L1Cache_Controller(version = i,
101 cntrl_id = cntrl_count,
102 L1IcacheMemory = l1i_cache,
103 L1DcacheMemory = l1d_cache,
104 L2cacheMemory = l2_cache,
105 no_mig_atomic = not \
106 options.allow_atomic_migration,
107 send_evictions = (
108 options.cpu_type == "detailed"),
109 ruby_system = ruby_system)
110
111 cpu_seq = RubySequencer(version = i,
112 icache = l1i_cache,
113 dcache = l1d_cache,
114 ruby_system = ruby_system)
115
116 l1_cntrl.sequencer = cpu_seq
117
118 if piobus != None:
119 cpu_seq.pio_port = piobus.slave
120
121 if options.recycle_latency:
122 l1_cntrl.recycle_latency = options.recycle_latency
123
124 exec("system.l1_cntrl%d = l1_cntrl" % i)
125 #
126 # Add controllers and sequencers to the appropriate lists
127 #
128 cpu_sequencers.append(cpu_seq)
129 l1_cntrl_nodes.append(l1_cntrl)
130
131 cntrl_count += 1
132
133 phys_mem_size = long(system.physmem.range.second) - \
134 long(system.physmem.range.first) + 1
135 mem_module_size = phys_mem_size / options.num_dirs
136
137 #
138 # determine size and index bits for probe filter
139 # By default, the probe filter size is configured to be twice the
140 # size of the L2 cache.
141 #
142 pf_size = MemorySize(options.l2_size)
143 pf_size.value = pf_size.value * 2
144 dir_bits = int(math.log(options.num_dirs, 2))
145 pf_bits = int(math.log(pf_size.value, 2))
146 if options.numa_high_bit:
147 if options.numa_high_bit > 0:
148 # if numa high bit explicitly set, make sure it does not overlap
149 # with the probe filter index
150 assert(options.numa_high_bit - dir_bits > pf_bits)
151
152 # set the probe filter start bit to just above the block offset
153 pf_start_bit = 6
154 else:
155 if dir_bits > 0:
156 pf_start_bit = dir_bits + 5
157 else:
158 pf_start_bit = 6
159
160 for i in xrange(options.num_dirs):
161 #
162 # Create the Ruby objects associated with the directory controller
163 #
164
165 mem_cntrl = RubyMemoryControl(version = i)
166
167 dir_size = MemorySize('0B')
168 dir_size.value = mem_module_size
169
170 pf = ProbeFilter(size = pf_size, assoc = 4,
171 start_index_bit = pf_start_bit)
172
173 dir_cntrl = Directory_Controller(version = i,
174 cntrl_id = cntrl_count,
175 directory = \
176 RubyDirectoryMemory( \
177 version = i,
178 size = dir_size,
179 use_map = options.use_map,
180 map_levels = \
181 options.map_levels,
182 numa_high_bit = \
183 options.numa_high_bit),
184 probeFilter = pf,
185 memBuffer = mem_cntrl,
186 probe_filter_enabled = options.pf_on,
187 full_bit_dir_enabled = options.dir_on,
188 ruby_system = ruby_system)
189
190 if options.recycle_latency:
191 dir_cntrl.recycle_latency = options.recycle_latency
192
193 exec("system.dir_cntrl%d = dir_cntrl" % i)
194 dir_cntrl_nodes.append(dir_cntrl)
195
196 cntrl_count += 1
197
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
81 block_size_bits = int(math.log(options.cacheline_size, 2))
82
83 cntrl_count = 0
84
85 for i in xrange(options.num_cpus):
86 #
87 # First create the Ruby objects associated with this cpu
88 #
89 l1i_cache = L1Cache(size = options.l1i_size,
90 assoc = options.l1i_assoc,
91 start_index_bit = block_size_bits,
92 is_icache = True)
93 l1d_cache = L1Cache(size = options.l1d_size,
94 assoc = options.l1d_assoc,
95 start_index_bit = block_size_bits)
96 l2_cache = L2Cache(size = options.l2_size,
97 assoc = options.l2_assoc,
98 start_index_bit = block_size_bits)
99
100 l1_cntrl = L1Cache_Controller(version = i,
101 cntrl_id = cntrl_count,
102 L1IcacheMemory = l1i_cache,
103 L1DcacheMemory = l1d_cache,
104 L2cacheMemory = l2_cache,
105 no_mig_atomic = not \
106 options.allow_atomic_migration,
107 send_evictions = (
108 options.cpu_type == "detailed"),
109 ruby_system = ruby_system)
110
111 cpu_seq = RubySequencer(version = i,
112 icache = l1i_cache,
113 dcache = l1d_cache,
114 ruby_system = ruby_system)
115
116 l1_cntrl.sequencer = cpu_seq
117
118 if piobus != None:
119 cpu_seq.pio_port = piobus.slave
120
121 if options.recycle_latency:
122 l1_cntrl.recycle_latency = options.recycle_latency
123
124 exec("system.l1_cntrl%d = l1_cntrl" % i)
125 #
126 # Add controllers and sequencers to the appropriate lists
127 #
128 cpu_sequencers.append(cpu_seq)
129 l1_cntrl_nodes.append(l1_cntrl)
130
131 cntrl_count += 1
132
133 phys_mem_size = long(system.physmem.range.second) - \
134 long(system.physmem.range.first) + 1
135 mem_module_size = phys_mem_size / options.num_dirs
136
137 #
138 # determine size and index bits for probe filter
139 # By default, the probe filter size is configured to be twice the
140 # size of the L2 cache.
141 #
142 pf_size = MemorySize(options.l2_size)
143 pf_size.value = pf_size.value * 2
144 dir_bits = int(math.log(options.num_dirs, 2))
145 pf_bits = int(math.log(pf_size.value, 2))
146 if options.numa_high_bit:
147 if options.numa_high_bit > 0:
148 # if numa high bit explicitly set, make sure it does not overlap
149 # with the probe filter index
150 assert(options.numa_high_bit - dir_bits > pf_bits)
151
152 # set the probe filter start bit to just above the block offset
153 pf_start_bit = 6
154 else:
155 if dir_bits > 0:
156 pf_start_bit = dir_bits + 5
157 else:
158 pf_start_bit = 6
159
160 for i in xrange(options.num_dirs):
161 #
162 # Create the Ruby objects associated with the directory controller
163 #
164
165 mem_cntrl = RubyMemoryControl(version = i)
166
167 dir_size = MemorySize('0B')
168 dir_size.value = mem_module_size
169
170 pf = ProbeFilter(size = pf_size, assoc = 4,
171 start_index_bit = pf_start_bit)
172
173 dir_cntrl = Directory_Controller(version = i,
174 cntrl_id = cntrl_count,
175 directory = \
176 RubyDirectoryMemory( \
177 version = i,
178 size = dir_size,
179 use_map = options.use_map,
180 map_levels = \
181 options.map_levels,
182 numa_high_bit = \
183 options.numa_high_bit),
184 probeFilter = pf,
185 memBuffer = mem_cntrl,
186 probe_filter_enabled = options.pf_on,
187 full_bit_dir_enabled = options.dir_on,
188 ruby_system = ruby_system)
189
190 if options.recycle_latency:
191 dir_cntrl.recycle_latency = options.recycle_latency
192
193 exec("system.dir_cntrl%d = dir_cntrl" % i)
194 dir_cntrl_nodes.append(dir_cntrl)
195
196 cntrl_count += 1
197
198 for i, dma_device in enumerate(dma_devices):
198 for i, dma_port in enumerate(dma_ports):
199 #
200 # Create the Ruby objects associated with the dma controller
201 #
202 dma_seq = DMASequencer(version = i,
203 ruby_system = ruby_system)
204
205 dma_cntrl = DMA_Controller(version = i,
206 cntrl_id = cntrl_count,
207 dma_sequencer = dma_seq,
208 ruby_system = ruby_system)
209
210 exec("system.dma_cntrl%d = dma_cntrl" % i)
199 #
200 # Create the Ruby objects associated with the dma controller
201 #
202 dma_seq = DMASequencer(version = i,
203 ruby_system = ruby_system)
204
205 dma_cntrl = DMA_Controller(version = i,
206 cntrl_id = cntrl_count,
207 dma_sequencer = dma_seq,
208 ruby_system = ruby_system)
209
210 exec("system.dma_cntrl%d = dma_cntrl" % i)
211 if dma_device.type == 'MemTest':
212 exec("system.dma_cntrl%d.dma_sequencer.slave = dma_device.test" % i)
213 else:
214 exec("system.dma_cntrl%d.dma_sequencer.slave = dma_device.dma" % i)
211 exec("system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
215 dma_cntrl_nodes.append(dma_cntrl)
216
217 if options.recycle_latency:
218 dma_cntrl.recycle_latency = options.recycle_latency
219
220 cntrl_count += 1
221
222 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
223
224 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
212 dma_cntrl_nodes.append(dma_cntrl)
213
214 if options.recycle_latency:
215 dma_cntrl.recycle_latency = options.recycle_latency
216
217 cntrl_count += 1
218
219 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
220
221 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)