MESI_Three_Level.py revision 12065:e3e51756dfef
1# Copyright (c) 2006-2007 The Regents of The University of Michigan 2# Copyright (c) 2009,2015 Advanced Micro Devices, Inc. 3# Copyright (c) 2013 Mark D. Hill and David A. Wood 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28# 29# Authors: Brad Beckmann 30# Nilay Vaish 31 32import math 33import m5 34from m5.objects import * 35from m5.defines import buildEnv 36from Ruby import create_topology, create_directories 37from Ruby import send_evicts 38 39# 40# Declare caches used by the protocol 41# 42class L0Cache(RubyCache): pass 43class L1Cache(RubyCache): pass 44class L2Cache(RubyCache): pass 45 46def define_options(parser): 47 parser.add_option("--num-clusters", type = "int", default = 1, 48 help = "number of clusters in a design in which there are shared\ 49 caches private to clusters") 50 return 51 52def create_system(options, full_system, system, dma_ports, ruby_system): 53 54 if buildEnv['PROTOCOL'] != 'MESI_Three_Level': 55 fatal("This script requires the MESI_Three_Level protocol to be\ 56 built.") 57 58 cpu_sequencers = [] 59 60 # 61 # The ruby network creation expects the list of nodes in the system to be 62 # consistent with the NetDest list. Therefore the l1 controller nodes 63 # must be listed before the directory nodes and directory nodes before 64 # dma nodes, etc. 65 # 66 l0_cntrl_nodes = [] 67 l1_cntrl_nodes = [] 68 l2_cntrl_nodes = [] 69 dma_cntrl_nodes = [] 70 71 assert (options.num_cpus % options.num_clusters == 0) 72 num_cpus_per_cluster = options.num_cpus / options.num_clusters 73 74 assert (options.num_l2caches % options.num_clusters == 0) 75 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters 76 77 l2_bits = int(math.log(num_l2caches_per_cluster, 2)) 78 block_size_bits = int(math.log(options.cacheline_size, 2)) 79 l2_index_start = block_size_bits + l2_bits 80 81 # 82 # Must create the individual controllers before the network to ensure the 83 # controller constructors are called before the network constructor 84 # 85 for i in xrange(options.num_clusters): 86 for j in xrange(num_cpus_per_cluster): 87 # 88 # First create the Ruby objects associated with this cpu 89 # 90 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True, 91 start_index_bit = block_size_bits, 92 replacement_policy = LRUReplacementPolicy()) 93 94 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False, 95 start_index_bit = block_size_bits, 96 replacement_policy = LRUReplacementPolicy()) 97 98 # the ruby random tester reuses num_cpus to specify the 99 # number of cpu ports connected to the tester object, which 100 # is stored in system.cpu. because there is only ever one 101 # tester object, num_cpus is not necessarily equal to the 102 # size of system.cpu; therefore if len(system.cpu) == 1 103 # we use system.cpu[0] to set the clk_domain, thereby ensuring 104 # we don't index off the end of the cpu list. 105 if len(system.cpu) == 1: 106 clk_domain = system.cpu[0].clk_domain 107 else: 108 clk_domain = system.cpu[i].clk_domain 109 110 l0_cntrl = L0Cache_Controller( 111 version = i * num_cpus_per_cluster + j, Icache = l0i_cache, 112 Dcache = l0d_cache, send_evictions = send_evicts(options), 113 clk_domain = clk_domain, ruby_system = ruby_system) 114 115 cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j, 116 icache = l0i_cache, 117 clk_domain = clk_domain, 118 dcache = l0d_cache, 119 ruby_system = ruby_system) 120 121 l0_cntrl.sequencer = cpu_seq 122 123 l1_cache = L1Cache(size = options.l1d_size, 124 assoc = options.l1d_assoc, 125 start_index_bit = block_size_bits, 126 is_icache = False) 127 128 l1_cntrl = L1Cache_Controller( 129 version = i * num_cpus_per_cluster + j, 130 cache = l1_cache, l2_select_num_bits = l2_bits, 131 cluster_id = i, ruby_system = ruby_system) 132 133 exec("ruby_system.l0_cntrl%d = l0_cntrl" 134 % ( i * num_cpus_per_cluster + j)) 135 exec("ruby_system.l1_cntrl%d = l1_cntrl" 136 % ( i * num_cpus_per_cluster + j)) 137 138 # 139 # Add controllers and sequencers to the appropriate lists 140 # 141 cpu_sequencers.append(cpu_seq) 142 l0_cntrl_nodes.append(l0_cntrl) 143 l1_cntrl_nodes.append(l1_cntrl) 144 145 # Connect the L0 and L1 controllers 146 l0_cntrl.mandatoryQueue = MessageBuffer() 147 l0_cntrl.bufferToL1 = MessageBuffer(ordered = True) 148 l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1 149 l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True) 150 l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1 151 152 # Connect the L1 controllers and the network 153 l1_cntrl.requestToL2 = MessageBuffer() 154 l1_cntrl.requestToL2.master = ruby_system.network.slave 155 l1_cntrl.responseToL2 = MessageBuffer() 156 l1_cntrl.responseToL2.master = ruby_system.network.slave 157 l1_cntrl.unblockToL2 = MessageBuffer() 158 l1_cntrl.unblockToL2.master = ruby_system.network.slave 159 160 l1_cntrl.requestFromL2 = MessageBuffer() 161 l1_cntrl.requestFromL2.slave = ruby_system.network.master 162 l1_cntrl.responseFromL2 = MessageBuffer() 163 l1_cntrl.responseFromL2.slave = ruby_system.network.master 164 165 166 for j in xrange(num_l2caches_per_cluster): 167 l2_cache = L2Cache(size = options.l2_size, 168 assoc = options.l2_assoc, 169 start_index_bit = l2_index_start) 170 171 l2_cntrl = L2Cache_Controller( 172 version = i * num_l2caches_per_cluster + j, 173 L2cache = l2_cache, cluster_id = i, 174 transitions_per_cycle = options.ports, 175 ruby_system = ruby_system) 176 177 exec("ruby_system.l2_cntrl%d = l2_cntrl" 178 % (i * num_l2caches_per_cluster + j)) 179 l2_cntrl_nodes.append(l2_cntrl) 180 181 # Connect the L2 controllers and the network 182 l2_cntrl.DirRequestFromL2Cache = MessageBuffer() 183 l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave 184 l2_cntrl.L1RequestFromL2Cache = MessageBuffer() 185 l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave 186 l2_cntrl.responseFromL2Cache = MessageBuffer() 187 l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave 188 189 l2_cntrl.unblockToL2Cache = MessageBuffer() 190 l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master 191 l2_cntrl.L1RequestToL2Cache = MessageBuffer() 192 l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master 193 l2_cntrl.responseToL2Cache = MessageBuffer() 194 l2_cntrl.responseToL2Cache.slave = ruby_system.network.master 195 196 # Run each of the ruby memory controllers at a ratio of the frequency of 197 # the ruby system 198 # clk_divider value is a fix to pass regression. 199 ruby_system.memctrl_clk_domain = DerivedClockDomain( 200 clk_domain = ruby_system.clk_domain, clk_divider = 3) 201 202 dir_cntrl_nodes = create_directories(options, system.mem_ranges, 203 ruby_system) 204 for dir_cntrl in dir_cntrl_nodes: 205 # Connect the directory controllers and the network 206 dir_cntrl.requestToDir = MessageBuffer() 207 dir_cntrl.requestToDir.slave = ruby_system.network.master 208 dir_cntrl.responseToDir = MessageBuffer() 209 dir_cntrl.responseToDir.slave = ruby_system.network.master 210 dir_cntrl.responseFromDir = MessageBuffer() 211 dir_cntrl.responseFromDir.master = ruby_system.network.slave 212 dir_cntrl.responseFromMemory = MessageBuffer() 213 214 for i, dma_port in enumerate(dma_ports): 215 # 216 # Create the Ruby objects associated with the dma controller 217 # 218 dma_seq = DMASequencer(version = i, ruby_system = ruby_system) 219 220 dma_cntrl = DMA_Controller(version = i, 221 dma_sequencer = dma_seq, 222 transitions_per_cycle = options.ports, 223 ruby_system = ruby_system) 224 225 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 226 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 227 dma_cntrl_nodes.append(dma_cntrl) 228 229 # Connect the dma controller to the network 230 dma_cntrl.mandatoryQueue = MessageBuffer() 231 dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 232 dma_cntrl.responseFromDir.slave = ruby_system.network.master 233 dma_cntrl.requestToDir = MessageBuffer() 234 dma_cntrl.requestToDir.master = ruby_system.network.slave 235 236 all_cntrls = l0_cntrl_nodes + \ 237 l1_cntrl_nodes + \ 238 l2_cntrl_nodes + \ 239 dir_cntrl_nodes + \ 240 dma_cntrl_nodes 241 242 # Create the io controller and the sequencer 243 if full_system: 244 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 245 ruby_system._io_port = io_seq 246 io_controller = DMA_Controller(version = len(dma_ports), 247 dma_sequencer = io_seq, 248 ruby_system = ruby_system) 249 ruby_system.io_controller = io_controller 250 251 # Connect the dma controller to the network 252 io_controller.mandatoryQueue = MessageBuffer() 253 io_controller.responseFromDir = MessageBuffer(ordered = True) 254 io_controller.responseFromDir.slave = ruby_system.network.master 255 io_controller.requestToDir = MessageBuffer() 256 io_controller.requestToDir.master = ruby_system.network.slave 257 258 all_cntrls = all_cntrls + [io_controller] 259 260 ruby_system.network.number_of_virtual_networks = 3 261 topology = create_topology(all_cntrls, options) 262 return (cpu_sequencers, dir_cntrl_nodes, topology) 263