MOESI_hammer.py (7917:d9afb18a5008) MOESI_hammer.py (8180:d8587c913ccf)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
61def create_system(options, system, piobus, dma_devices):
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
61def create_system(options, system, piobus, dma_devices):
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
81 block_size_bits = int(math.log(options.cacheline_size, 2))
81
82 for i in xrange(options.num_cpus):
83 #
84 # First create the Ruby objects associated with this cpu
85 #
86 l1i_cache = L1Cache(size = options.l1i_size,
82
83 for i in xrange(options.num_cpus):
84 #
85 # First create the Ruby objects associated with this cpu
86 #
87 l1i_cache = L1Cache(size = options.l1i_size,
87 assoc = options.l1i_assoc)
88 assoc = options.l1i_assoc,
89 start_index_bit = block_size_bits)
88 l1d_cache = L1Cache(size = options.l1d_size,
90 l1d_cache = L1Cache(size = options.l1d_size,
89 assoc = options.l1d_assoc)
91 assoc = options.l1d_assoc,
92 start_index_bit = block_size_bits)
90 l2_cache = L2Cache(size = options.l2_size,
93 l2_cache = L2Cache(size = options.l2_size,
91 assoc = options.l2_assoc)
94 assoc = options.l2_assoc,
95 start_index_bit = block_size_bits)
92
93 cpu_seq = RubySequencer(version = i,
94 icache = l1i_cache,
95 dcache = l1d_cache,
96 physMemPort = system.physmem.port,
97 physmem = system.physmem)
98
99 if piobus != None:
100 cpu_seq.pio_port = piobus.port
101
102 l1_cntrl = L1Cache_Controller(version = i,
103 sequencer = cpu_seq,
104 L1IcacheMemory = l1i_cache,
105 L1DcacheMemory = l1d_cache,
106 L2cacheMemory = l2_cache,
107 no_mig_atomic = not \
108 options.allow_atomic_migration)
109
110 if options.recycle_latency:
111 l1_cntrl.recycle_latency = options.recycle_latency
112
113 exec("system.l1_cntrl%d = l1_cntrl" % i)
114 #
115 # Add controllers and sequencers to the appropriate lists
116 #
117 cpu_sequencers.append(cpu_seq)
118 l1_cntrl_nodes.append(l1_cntrl)
119
120 phys_mem_size = long(system.physmem.range.second) - \
121 long(system.physmem.range.first) + 1
122 mem_module_size = phys_mem_size / options.num_dirs
123
124 #
125 # determine size and index bits for probe filter
126 # By default, the probe filter size is configured to be twice the
127 # size of the L2 cache.
128 #
129 pf_size = MemorySize(options.l2_size)
130 pf_size.value = pf_size.value * 2
131 dir_bits = int(math.log(options.num_dirs, 2))
132 pf_bits = int(math.log(pf_size.value, 2))
133 if options.numa_high_bit:
134 if options.numa_high_bit > 0:
135 # if numa high bit explicitly set, make sure it does not overlap
136 # with the probe filter index
137 assert(options.numa_high_bit - dir_bits > pf_bits)
138
139 # set the probe filter start bit to just above the block offset
140 pf_start_bit = 6
141 else:
142 if dir_bits > 0:
143 pf_start_bit = dir_bits + 5
144 else:
145 pf_start_bit = 6
146
147 for i in xrange(options.num_dirs):
148 #
149 # Create the Ruby objects associated with the directory controller
150 #
151
152 mem_cntrl = RubyMemoryControl(version = i)
153
154 dir_size = MemorySize('0B')
155 dir_size.value = mem_module_size
156
157 pf = ProbeFilter(size = pf_size, assoc = 4,
158 start_index_bit = pf_start_bit)
159
160 dir_cntrl = Directory_Controller(version = i,
161 directory = \
162 RubyDirectoryMemory( \
163 version = i,
164 size = dir_size,
165 use_map = options.use_map,
166 map_levels = \
167 options.map_levels,
168 numa_high_bit = \
169 options.numa_high_bit),
170 probeFilter = pf,
171 memBuffer = mem_cntrl,
172 probe_filter_enabled = options.pf_on,
173 full_bit_dir_enabled = options.dir_on)
174
175 if options.recycle_latency:
176 dir_cntrl.recycle_latency = options.recycle_latency
177
178 exec("system.dir_cntrl%d = dir_cntrl" % i)
179 dir_cntrl_nodes.append(dir_cntrl)
180
181 for i, dma_device in enumerate(dma_devices):
182 #
183 # Create the Ruby objects associated with the dma controller
184 #
185 dma_seq = DMASequencer(version = i,
186 physMemPort = system.physmem.port,
187 physmem = system.physmem)
188
189 dma_cntrl = DMA_Controller(version = i,
190 dma_sequencer = dma_seq)
191
192 exec("system.dma_cntrl%d = dma_cntrl" % i)
193 if dma_device.type == 'MemTest':
194 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.test" % i)
195 else:
196 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.dma" % i)
197 dma_cntrl_nodes.append(dma_cntrl)
198
199 if options.recycle_latency:
200 dma_cntrl.recycle_latency = options.recycle_latency
201
202 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
203
204 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
96
97 cpu_seq = RubySequencer(version = i,
98 icache = l1i_cache,
99 dcache = l1d_cache,
100 physMemPort = system.physmem.port,
101 physmem = system.physmem)
102
103 if piobus != None:
104 cpu_seq.pio_port = piobus.port
105
106 l1_cntrl = L1Cache_Controller(version = i,
107 sequencer = cpu_seq,
108 L1IcacheMemory = l1i_cache,
109 L1DcacheMemory = l1d_cache,
110 L2cacheMemory = l2_cache,
111 no_mig_atomic = not \
112 options.allow_atomic_migration)
113
114 if options.recycle_latency:
115 l1_cntrl.recycle_latency = options.recycle_latency
116
117 exec("system.l1_cntrl%d = l1_cntrl" % i)
118 #
119 # Add controllers and sequencers to the appropriate lists
120 #
121 cpu_sequencers.append(cpu_seq)
122 l1_cntrl_nodes.append(l1_cntrl)
123
124 phys_mem_size = long(system.physmem.range.second) - \
125 long(system.physmem.range.first) + 1
126 mem_module_size = phys_mem_size / options.num_dirs
127
128 #
129 # determine size and index bits for probe filter
130 # By default, the probe filter size is configured to be twice the
131 # size of the L2 cache.
132 #
133 pf_size = MemorySize(options.l2_size)
134 pf_size.value = pf_size.value * 2
135 dir_bits = int(math.log(options.num_dirs, 2))
136 pf_bits = int(math.log(pf_size.value, 2))
137 if options.numa_high_bit:
138 if options.numa_high_bit > 0:
139 # if numa high bit explicitly set, make sure it does not overlap
140 # with the probe filter index
141 assert(options.numa_high_bit - dir_bits > pf_bits)
142
143 # set the probe filter start bit to just above the block offset
144 pf_start_bit = 6
145 else:
146 if dir_bits > 0:
147 pf_start_bit = dir_bits + 5
148 else:
149 pf_start_bit = 6
150
151 for i in xrange(options.num_dirs):
152 #
153 # Create the Ruby objects associated with the directory controller
154 #
155
156 mem_cntrl = RubyMemoryControl(version = i)
157
158 dir_size = MemorySize('0B')
159 dir_size.value = mem_module_size
160
161 pf = ProbeFilter(size = pf_size, assoc = 4,
162 start_index_bit = pf_start_bit)
163
164 dir_cntrl = Directory_Controller(version = i,
165 directory = \
166 RubyDirectoryMemory( \
167 version = i,
168 size = dir_size,
169 use_map = options.use_map,
170 map_levels = \
171 options.map_levels,
172 numa_high_bit = \
173 options.numa_high_bit),
174 probeFilter = pf,
175 memBuffer = mem_cntrl,
176 probe_filter_enabled = options.pf_on,
177 full_bit_dir_enabled = options.dir_on)
178
179 if options.recycle_latency:
180 dir_cntrl.recycle_latency = options.recycle_latency
181
182 exec("system.dir_cntrl%d = dir_cntrl" % i)
183 dir_cntrl_nodes.append(dir_cntrl)
184
185 for i, dma_device in enumerate(dma_devices):
186 #
187 # Create the Ruby objects associated with the dma controller
188 #
189 dma_seq = DMASequencer(version = i,
190 physMemPort = system.physmem.port,
191 physmem = system.physmem)
192
193 dma_cntrl = DMA_Controller(version = i,
194 dma_sequencer = dma_seq)
195
196 exec("system.dma_cntrl%d = dma_cntrl" % i)
197 if dma_device.type == 'MemTest':
198 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.test" % i)
199 else:
200 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.dma" % i)
201 dma_cntrl_nodes.append(dma_cntrl)
202
203 if options.recycle_latency:
204 dma_cntrl.recycle_latency = options.recycle_latency
205
206 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
207
208 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)