MOESI_hammer.py (7662:b4d3a30575a1) MOESI_hammer.py (7904:6f5299ff8260)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34
35#
36# Note: the L1 Cache latency is only used by the sequencer on fast path hits
37#
38class L1Cache(RubyCache):
39 latency = 2
40
41#
42# Note: the L2 Cache latency is not currently used
43#
44class L2Cache(RubyCache):
45 latency = 10
46
47#
48# Probe filter is a cache, latency is not used
49#
50class ProbeFilter(RubyCache):
51 latency = 1
52
53def define_options(parser):
54 parser.add_option("--allow-atomic-migration", action="store_true",
55 help="allow migratory sharing for atomic only accessed blocks")
56 parser.add_option("--pf-on", action="store_true",
57 help="Hammer: enable Probe Filter")
58
58 parser.add_option("--dir-on", action="store_true",
59 help="Hammer: enable Full-bit Directory")
60
59def create_system(options, system, piobus, dma_devices):
60
61 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
62 panic("This script requires the MOESI_hammer protocol to be built.")
63
64 cpu_sequencers = []
65
66 #
67 # The ruby network creation expects the list of nodes in the system to be
68 # consistent with the NetDest list. Therefore the l1 controller nodes must be
69 # listed before the directory nodes and directory nodes before dma nodes, etc.
70 #
71 l1_cntrl_nodes = []
72 dir_cntrl_nodes = []
73 dma_cntrl_nodes = []
74
75 #
76 # Must create the individual controllers before the network to ensure the
77 # controller constructors are called before the network constructor
78 #
79
80 for i in xrange(options.num_cpus):
81 #
82 # First create the Ruby objects associated with this cpu
83 #
84 l1i_cache = L1Cache(size = options.l1i_size,
85 assoc = options.l1i_assoc)
86 l1d_cache = L1Cache(size = options.l1d_size,
87 assoc = options.l1d_assoc)
88 l2_cache = L2Cache(size = options.l2_size,
89 assoc = options.l2_assoc)
90
91 cpu_seq = RubySequencer(version = i,
92 icache = l1i_cache,
93 dcache = l1d_cache,
94 physMemPort = system.physmem.port,
95 physmem = system.physmem)
96
97 if piobus != None:
98 cpu_seq.pio_port = piobus.port
99
100 l1_cntrl = L1Cache_Controller(version = i,
101 sequencer = cpu_seq,
102 L1IcacheMemory = l1i_cache,
103 L1DcacheMemory = l1d_cache,
104 L2cacheMemory = l2_cache,
105 no_mig_atomic = not \
106 options.allow_atomic_migration)
107
108 if options.recycle_latency:
109 l1_cntrl.recycle_latency = options.recycle_latency
110
111 exec("system.l1_cntrl%d = l1_cntrl" % i)
112 #
113 # Add controllers and sequencers to the appropriate lists
114 #
115 cpu_sequencers.append(cpu_seq)
116 l1_cntrl_nodes.append(l1_cntrl)
117
118 phys_mem_size = long(system.physmem.range.second) - \
119 long(system.physmem.range.first) + 1
120 mem_module_size = phys_mem_size / options.num_dirs
121
122 #
123 # determine size and index bits for probe filter
124 # By default, the probe filter size is configured to be twice the
125 # size of the L2 cache.
126 #
127 pf_size = MemorySize(options.l2_size)
128 pf_size.value = pf_size.value * 2
129 dir_bits = int(math.log(options.num_dirs, 2))
130 pf_bits = int(math.log(pf_size.value, 2))
131 if options.numa_high_bit:
132 if options.numa_high_bit > 0:
133 # if numa high bit explicitly set, make sure it does not overlap
134 # with the probe filter index
135 assert(options.numa_high_bit - dir_bits > pf_bits)
136
137 # set the probe filter start bit to just above the block offset
138 pf_start_bit = 6
139 else:
140 if dir_bits > 0:
141 pf_start_bit = dir_bits + 5
142 else:
143 pf_start_bit = 6
144
145 for i in xrange(options.num_dirs):
146 #
147 # Create the Ruby objects associated with the directory controller
148 #
149
150 mem_cntrl = RubyMemoryControl(version = i)
151
152 dir_size = MemorySize('0B')
153 dir_size.value = mem_module_size
154
155 pf = ProbeFilter(size = pf_size, assoc = 4,
156 start_index_bit = pf_start_bit)
157
158 dir_cntrl = Directory_Controller(version = i,
159 directory = \
160 RubyDirectoryMemory( \
161 version = i,
162 size = dir_size,
163 use_map = options.use_map,
164 map_levels = \
165 options.map_levels),
166 probeFilter = pf,
167 memBuffer = mem_cntrl,
61def create_system(options, system, piobus, dma_devices):
62
63 if buildEnv['PROTOCOL'] != 'MOESI_hammer':
64 panic("This script requires the MOESI_hammer protocol to be built.")
65
66 cpu_sequencers = []
67
68 #
69 # The ruby network creation expects the list of nodes in the system to be
70 # consistent with the NetDest list. Therefore the l1 controller nodes must be
71 # listed before the directory nodes and directory nodes before dma nodes, etc.
72 #
73 l1_cntrl_nodes = []
74 dir_cntrl_nodes = []
75 dma_cntrl_nodes = []
76
77 #
78 # Must create the individual controllers before the network to ensure the
79 # controller constructors are called before the network constructor
80 #
81
82 for i in xrange(options.num_cpus):
83 #
84 # First create the Ruby objects associated with this cpu
85 #
86 l1i_cache = L1Cache(size = options.l1i_size,
87 assoc = options.l1i_assoc)
88 l1d_cache = L1Cache(size = options.l1d_size,
89 assoc = options.l1d_assoc)
90 l2_cache = L2Cache(size = options.l2_size,
91 assoc = options.l2_assoc)
92
93 cpu_seq = RubySequencer(version = i,
94 icache = l1i_cache,
95 dcache = l1d_cache,
96 physMemPort = system.physmem.port,
97 physmem = system.physmem)
98
99 if piobus != None:
100 cpu_seq.pio_port = piobus.port
101
102 l1_cntrl = L1Cache_Controller(version = i,
103 sequencer = cpu_seq,
104 L1IcacheMemory = l1i_cache,
105 L1DcacheMemory = l1d_cache,
106 L2cacheMemory = l2_cache,
107 no_mig_atomic = not \
108 options.allow_atomic_migration)
109
110 if options.recycle_latency:
111 l1_cntrl.recycle_latency = options.recycle_latency
112
113 exec("system.l1_cntrl%d = l1_cntrl" % i)
114 #
115 # Add controllers and sequencers to the appropriate lists
116 #
117 cpu_sequencers.append(cpu_seq)
118 l1_cntrl_nodes.append(l1_cntrl)
119
120 phys_mem_size = long(system.physmem.range.second) - \
121 long(system.physmem.range.first) + 1
122 mem_module_size = phys_mem_size / options.num_dirs
123
124 #
125 # determine size and index bits for probe filter
126 # By default, the probe filter size is configured to be twice the
127 # size of the L2 cache.
128 #
129 pf_size = MemorySize(options.l2_size)
130 pf_size.value = pf_size.value * 2
131 dir_bits = int(math.log(options.num_dirs, 2))
132 pf_bits = int(math.log(pf_size.value, 2))
133 if options.numa_high_bit:
134 if options.numa_high_bit > 0:
135 # if numa high bit explicitly set, make sure it does not overlap
136 # with the probe filter index
137 assert(options.numa_high_bit - dir_bits > pf_bits)
138
139 # set the probe filter start bit to just above the block offset
140 pf_start_bit = 6
141 else:
142 if dir_bits > 0:
143 pf_start_bit = dir_bits + 5
144 else:
145 pf_start_bit = 6
146
147 for i in xrange(options.num_dirs):
148 #
149 # Create the Ruby objects associated with the directory controller
150 #
151
152 mem_cntrl = RubyMemoryControl(version = i)
153
154 dir_size = MemorySize('0B')
155 dir_size.value = mem_module_size
156
157 pf = ProbeFilter(size = pf_size, assoc = 4,
158 start_index_bit = pf_start_bit)
159
160 dir_cntrl = Directory_Controller(version = i,
161 directory = \
162 RubyDirectoryMemory( \
163 version = i,
164 size = dir_size,
165 use_map = options.use_map,
166 map_levels = \
167 options.map_levels),
168 probeFilter = pf,
169 memBuffer = mem_cntrl,
168 probe_filter_enabled = options.pf_on)
170 probe_filter_enabled = options.pf_on,
171 full_bit_dir_enabled = options.dir_on)
169
170 if options.recycle_latency:
171 dir_cntrl.recycle_latency = options.recycle_latency
172
173 exec("system.dir_cntrl%d = dir_cntrl" % i)
174 dir_cntrl_nodes.append(dir_cntrl)
175
176 for i, dma_device in enumerate(dma_devices):
177 #
178 # Create the Ruby objects associated with the dma controller
179 #
180 dma_seq = DMASequencer(version = i,
181 physMemPort = system.physmem.port,
182 physmem = system.physmem)
183
184 dma_cntrl = DMA_Controller(version = i,
185 dma_sequencer = dma_seq)
186
187 exec("system.dma_cntrl%d = dma_cntrl" % i)
188 if dma_device.type == 'MemTest':
189 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.test" % i)
190 else:
191 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.dma" % i)
192 dma_cntrl_nodes.append(dma_cntrl)
193
194 if options.recycle_latency:
195 dma_cntrl.recycle_latency = options.recycle_latency
196
197 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
198
199 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
172
173 if options.recycle_latency:
174 dir_cntrl.recycle_latency = options.recycle_latency
175
176 exec("system.dir_cntrl%d = dir_cntrl" % i)
177 dir_cntrl_nodes.append(dir_cntrl)
178
179 for i, dma_device in enumerate(dma_devices):
180 #
181 # Create the Ruby objects associated with the dma controller
182 #
183 dma_seq = DMASequencer(version = i,
184 physMemPort = system.physmem.port,
185 physmem = system.physmem)
186
187 dma_cntrl = DMA_Controller(version = i,
188 dma_sequencer = dma_seq)
189
190 exec("system.dma_cntrl%d = dma_cntrl" % i)
191 if dma_device.type == 'MemTest':
192 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.test" % i)
193 else:
194 exec("system.dma_cntrl%d.dma_sequencer.port = dma_device.dma" % i)
195 dma_cntrl_nodes.append(dma_cntrl)
196
197 if options.recycle_latency:
198 dma_cntrl.recycle_latency = options.recycle_latency
199
200 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
201
202 return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)