MOESI_CMP_token.py (10917:c38f28fad4c3) MOESI_CMP_token.py (11019:fc1e41e88fd3)
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35from Ruby import send_evicts
36
37#
1# Copyright (c) 2006-2007 The Regents of The University of Michigan
2# Copyright (c) 2009 Advanced Micro Devices, Inc.
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions are
7# met: redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer;
9# redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution;
12# neither the name of the copyright holders nor the names of its
13# contributors may be used to endorse or promote products derived from
14# this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#
28# Authors: Brad Beckmann
29
30import math
31import m5
32from m5.objects import *
33from m5.defines import buildEnv
34from Ruby import create_topology
35from Ruby import send_evicts
36
37#
38# Note: the L1 Cache latency is only used by the sequencer on fast path hits
38# Declare caches used by the protocol
39#
39#
40class L1Cache(RubyCache):
41 latency = 2
40class L1Cache(RubyCache): pass
41class L2Cache(RubyCache): pass
42
42
43#
44# Note: the L2 Cache latency is not currently used
45#
46class L2Cache(RubyCache):
47 latency = 10
48
49def define_options(parser):
50 parser.add_option("--l1-retries", type="int", default=1,
51 help="Token_CMP: # of l1 retries before going persistent")
52 parser.add_option("--timeout-latency", type="int", default=300,
53 help="Token_CMP: cycles until issuing again");
54 parser.add_option("--disable-dyn-timeouts", action="store_true",
55 help="Token_CMP: disable dyanimc timeouts, use fixed latency instead")
56 parser.add_option("--allow-atomic-migration", action="store_true",
57 help="allow migratory sharing for atomic only accessed blocks")
58
59def create_system(options, full_system, system, dma_ports, ruby_system):
60
61 if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
62 panic("This script requires the MOESI_CMP_token protocol to be built.")
63
64 #
65 # number of tokens that the owner passes to requests so that shared blocks can
66 # respond to read requests
67 #
68 n_tokens = options.num_cpus + 1
69
70 cpu_sequencers = []
71
72 #
73 # The ruby network creation expects the list of nodes in the system to be
74 # consistent with the NetDest list. Therefore the l1 controller nodes must be
75 # listed before the directory nodes and directory nodes before dma nodes, etc.
76 #
77 l1_cntrl_nodes = []
78 l2_cntrl_nodes = []
79 dir_cntrl_nodes = []
80 dma_cntrl_nodes = []
81
82 #
83 # Must create the individual controllers before the network to ensure the
84 # controller constructors are called before the network constructor
85 #
86 l2_bits = int(math.log(options.num_l2caches, 2))
87 block_size_bits = int(math.log(options.cacheline_size, 2))
88
89 for i in xrange(options.num_cpus):
90 #
91 # First create the Ruby objects associated with this cpu
92 #
93 l1i_cache = L1Cache(size = options.l1i_size,
94 assoc = options.l1i_assoc,
95 start_index_bit = block_size_bits)
96 l1d_cache = L1Cache(size = options.l1d_size,
97 assoc = options.l1d_assoc,
98 start_index_bit = block_size_bits)
99
100 l1_cntrl = L1Cache_Controller(version = i,
101 L1Icache = l1i_cache,
102 L1Dcache = l1d_cache,
103 l2_select_num_bits = l2_bits,
104 N_tokens = n_tokens,
105 retry_threshold = \
106 options.l1_retries,
107 fixed_timeout_latency = \
108 options.timeout_latency,
109 dynamic_timeout_enabled = \
110 not options.disable_dyn_timeouts,
111 no_mig_atomic = not \
112 options.allow_atomic_migration,
113 send_evictions = send_evicts(options),
114 transitions_per_cycle = options.ports,
115 clk_domain=system.cpu[i].clk_domain,
116 ruby_system = ruby_system)
117
118 cpu_seq = RubySequencer(version = i,
119 icache = l1i_cache,
120 dcache = l1d_cache,
121 clk_domain=system.cpu[i].clk_domain,
122 ruby_system = ruby_system)
123
124 l1_cntrl.sequencer = cpu_seq
125 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
126
127 # Add controllers and sequencers to the appropriate lists
128 cpu_sequencers.append(cpu_seq)
129 l1_cntrl_nodes.append(l1_cntrl)
130
131 # Connect the L1 controllers and the network
132 l1_cntrl.requestFromL1Cache = ruby_system.network.slave
133 l1_cntrl.responseFromL1Cache = ruby_system.network.slave
134 l1_cntrl.persistentFromL1Cache = ruby_system.network.slave
135
136 l1_cntrl.requestToL1Cache = ruby_system.network.master
137 l1_cntrl.responseToL1Cache = ruby_system.network.master
138 l1_cntrl.persistentToL1Cache = ruby_system.network.master
139
140
141 l2_index_start = block_size_bits + l2_bits
142
143 for i in xrange(options.num_l2caches):
144 #
145 # First create the Ruby objects associated with this cpu
146 #
147 l2_cache = L2Cache(size = options.l2_size,
148 assoc = options.l2_assoc,
149 start_index_bit = l2_index_start)
150
151 l2_cntrl = L2Cache_Controller(version = i,
152 L2cache = l2_cache,
153 N_tokens = n_tokens,
154 transitions_per_cycle = options.ports,
155 ruby_system = ruby_system)
156
157 exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
158 l2_cntrl_nodes.append(l2_cntrl)
159
160 # Connect the L2 controllers and the network
161 l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
162 l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
163 l2_cntrl.responseFromL2Cache = ruby_system.network.slave
164
165 l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
166 l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
167 l2_cntrl.responseToL2Cache = ruby_system.network.master
168 l2_cntrl.persistentToL2Cache = ruby_system.network.master
169
170
171 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
172 assert(phys_mem_size % options.num_dirs == 0)
173 mem_module_size = phys_mem_size / options.num_dirs
174
175 # Run each of the ruby memory controllers at a ratio of the frequency of
176 # the ruby system
177 # clk_divider value is a fix to pass regression.
178 ruby_system.memctrl_clk_domain = DerivedClockDomain(
179 clk_domain=ruby_system.clk_domain,
180 clk_divider=3)
181
182 for i in xrange(options.num_dirs):
183 dir_size = MemorySize('0B')
184 dir_size.value = mem_module_size
185
186 dir_cntrl = Directory_Controller(version = i,
187 directory = RubyDirectoryMemory(
188 version = i, size = dir_size),
189 l2_select_num_bits = l2_bits,
190 transitions_per_cycle = options.ports,
191 ruby_system = ruby_system)
192
193 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
194 dir_cntrl_nodes.append(dir_cntrl)
195
196 # Connect the directory controllers and the network
197 dir_cntrl.requestToDir = ruby_system.network.master
198 dir_cntrl.responseToDir = ruby_system.network.master
199 dir_cntrl.persistentToDir = ruby_system.network.master
200 dir_cntrl.dmaRequestToDir = ruby_system.network.master
201
202 dir_cntrl.requestFromDir = ruby_system.network.slave
203 dir_cntrl.responseFromDir = ruby_system.network.slave
204 dir_cntrl.persistentFromDir = ruby_system.network.slave
205 dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
206
207
208 for i, dma_port in enumerate(dma_ports):
209 #
210 # Create the Ruby objects associated with the dma controller
211 #
212 dma_seq = DMASequencer(version = i,
213 ruby_system = ruby_system,
214 slave = dma_port)
215
216 dma_cntrl = DMA_Controller(version = i,
217 dma_sequencer = dma_seq,
218 transitions_per_cycle = options.ports,
219 ruby_system = ruby_system)
220
221 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
222 dma_cntrl_nodes.append(dma_cntrl)
223
224 # Connect the dma controller to the network
225 dma_cntrl.responseFromDir = ruby_system.network.master
226 dma_cntrl.reqToDirectory = ruby_system.network.slave
227
228 all_cntrls = l1_cntrl_nodes + \
229 l2_cntrl_nodes + \
230 dir_cntrl_nodes + \
231 dma_cntrl_nodes
232
233 # Create the io controller and the sequencer
234 if full_system:
235 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
236 ruby_system._io_port = io_seq
237 io_controller = DMA_Controller(version = len(dma_ports),
238 dma_sequencer = io_seq,
239 ruby_system = ruby_system)
240 ruby_system.io_controller = io_controller
241
242 # Connect the dma controller to the network
243 io_controller.responseFromDir = ruby_system.network.master
244 io_controller.reqToDirectory = ruby_system.network.slave
245
246 all_cntrls = all_cntrls + [io_controller]
247
248
249 topology = create_topology(all_cntrls, options)
250 return (cpu_sequencers, dir_cntrl_nodes, topology)
43def define_options(parser):
44 parser.add_option("--l1-retries", type="int", default=1,
45 help="Token_CMP: # of l1 retries before going persistent")
46 parser.add_option("--timeout-latency", type="int", default=300,
47 help="Token_CMP: cycles until issuing again");
48 parser.add_option("--disable-dyn-timeouts", action="store_true",
49 help="Token_CMP: disable dyanimc timeouts, use fixed latency instead")
50 parser.add_option("--allow-atomic-migration", action="store_true",
51 help="allow migratory sharing for atomic only accessed blocks")
52
53def create_system(options, full_system, system, dma_ports, ruby_system):
54
55 if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
56 panic("This script requires the MOESI_CMP_token protocol to be built.")
57
58 #
59 # number of tokens that the owner passes to requests so that shared blocks can
60 # respond to read requests
61 #
62 n_tokens = options.num_cpus + 1
63
64 cpu_sequencers = []
65
66 #
67 # The ruby network creation expects the list of nodes in the system to be
68 # consistent with the NetDest list. Therefore the l1 controller nodes must be
69 # listed before the directory nodes and directory nodes before dma nodes, etc.
70 #
71 l1_cntrl_nodes = []
72 l2_cntrl_nodes = []
73 dir_cntrl_nodes = []
74 dma_cntrl_nodes = []
75
76 #
77 # Must create the individual controllers before the network to ensure the
78 # controller constructors are called before the network constructor
79 #
80 l2_bits = int(math.log(options.num_l2caches, 2))
81 block_size_bits = int(math.log(options.cacheline_size, 2))
82
83 for i in xrange(options.num_cpus):
84 #
85 # First create the Ruby objects associated with this cpu
86 #
87 l1i_cache = L1Cache(size = options.l1i_size,
88 assoc = options.l1i_assoc,
89 start_index_bit = block_size_bits)
90 l1d_cache = L1Cache(size = options.l1d_size,
91 assoc = options.l1d_assoc,
92 start_index_bit = block_size_bits)
93
94 l1_cntrl = L1Cache_Controller(version = i,
95 L1Icache = l1i_cache,
96 L1Dcache = l1d_cache,
97 l2_select_num_bits = l2_bits,
98 N_tokens = n_tokens,
99 retry_threshold = \
100 options.l1_retries,
101 fixed_timeout_latency = \
102 options.timeout_latency,
103 dynamic_timeout_enabled = \
104 not options.disable_dyn_timeouts,
105 no_mig_atomic = not \
106 options.allow_atomic_migration,
107 send_evictions = send_evicts(options),
108 transitions_per_cycle = options.ports,
109 clk_domain=system.cpu[i].clk_domain,
110 ruby_system = ruby_system)
111
112 cpu_seq = RubySequencer(version = i,
113 icache = l1i_cache,
114 dcache = l1d_cache,
115 clk_domain=system.cpu[i].clk_domain,
116 ruby_system = ruby_system)
117
118 l1_cntrl.sequencer = cpu_seq
119 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
120
121 # Add controllers and sequencers to the appropriate lists
122 cpu_sequencers.append(cpu_seq)
123 l1_cntrl_nodes.append(l1_cntrl)
124
125 # Connect the L1 controllers and the network
126 l1_cntrl.requestFromL1Cache = ruby_system.network.slave
127 l1_cntrl.responseFromL1Cache = ruby_system.network.slave
128 l1_cntrl.persistentFromL1Cache = ruby_system.network.slave
129
130 l1_cntrl.requestToL1Cache = ruby_system.network.master
131 l1_cntrl.responseToL1Cache = ruby_system.network.master
132 l1_cntrl.persistentToL1Cache = ruby_system.network.master
133
134
135 l2_index_start = block_size_bits + l2_bits
136
137 for i in xrange(options.num_l2caches):
138 #
139 # First create the Ruby objects associated with this cpu
140 #
141 l2_cache = L2Cache(size = options.l2_size,
142 assoc = options.l2_assoc,
143 start_index_bit = l2_index_start)
144
145 l2_cntrl = L2Cache_Controller(version = i,
146 L2cache = l2_cache,
147 N_tokens = n_tokens,
148 transitions_per_cycle = options.ports,
149 ruby_system = ruby_system)
150
151 exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
152 l2_cntrl_nodes.append(l2_cntrl)
153
154 # Connect the L2 controllers and the network
155 l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
156 l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
157 l2_cntrl.responseFromL2Cache = ruby_system.network.slave
158
159 l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
160 l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
161 l2_cntrl.responseToL2Cache = ruby_system.network.master
162 l2_cntrl.persistentToL2Cache = ruby_system.network.master
163
164
165 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
166 assert(phys_mem_size % options.num_dirs == 0)
167 mem_module_size = phys_mem_size / options.num_dirs
168
169 # Run each of the ruby memory controllers at a ratio of the frequency of
170 # the ruby system
171 # clk_divider value is a fix to pass regression.
172 ruby_system.memctrl_clk_domain = DerivedClockDomain(
173 clk_domain=ruby_system.clk_domain,
174 clk_divider=3)
175
176 for i in xrange(options.num_dirs):
177 dir_size = MemorySize('0B')
178 dir_size.value = mem_module_size
179
180 dir_cntrl = Directory_Controller(version = i,
181 directory = RubyDirectoryMemory(
182 version = i, size = dir_size),
183 l2_select_num_bits = l2_bits,
184 transitions_per_cycle = options.ports,
185 ruby_system = ruby_system)
186
187 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
188 dir_cntrl_nodes.append(dir_cntrl)
189
190 # Connect the directory controllers and the network
191 dir_cntrl.requestToDir = ruby_system.network.master
192 dir_cntrl.responseToDir = ruby_system.network.master
193 dir_cntrl.persistentToDir = ruby_system.network.master
194 dir_cntrl.dmaRequestToDir = ruby_system.network.master
195
196 dir_cntrl.requestFromDir = ruby_system.network.slave
197 dir_cntrl.responseFromDir = ruby_system.network.slave
198 dir_cntrl.persistentFromDir = ruby_system.network.slave
199 dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
200
201
202 for i, dma_port in enumerate(dma_ports):
203 #
204 # Create the Ruby objects associated with the dma controller
205 #
206 dma_seq = DMASequencer(version = i,
207 ruby_system = ruby_system,
208 slave = dma_port)
209
210 dma_cntrl = DMA_Controller(version = i,
211 dma_sequencer = dma_seq,
212 transitions_per_cycle = options.ports,
213 ruby_system = ruby_system)
214
215 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
216 dma_cntrl_nodes.append(dma_cntrl)
217
218 # Connect the dma controller to the network
219 dma_cntrl.responseFromDir = ruby_system.network.master
220 dma_cntrl.reqToDirectory = ruby_system.network.slave
221
222 all_cntrls = l1_cntrl_nodes + \
223 l2_cntrl_nodes + \
224 dir_cntrl_nodes + \
225 dma_cntrl_nodes
226
227 # Create the io controller and the sequencer
228 if full_system:
229 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
230 ruby_system._io_port = io_seq
231 io_controller = DMA_Controller(version = len(dma_ports),
232 dma_sequencer = io_seq,
233 ruby_system = ruby_system)
234 ruby_system.io_controller = io_controller
235
236 # Connect the dma controller to the network
237 io_controller.responseFromDir = ruby_system.network.master
238 io_controller.reqToDirectory = ruby_system.network.slave
239
240 all_cntrls = all_cntrls + [io_controller]
241
242
243 topology = create_topology(all_cntrls, options)
244 return (cpu_sequencers, dir_cntrl_nodes, topology)