1# Copyright (c) 2006-2007 The Regents of The University of Michigan 2# Copyright (c) 2009 Advanced Micro Devices, Inc. 3# Copyright (c) 2013 Mark D. Hill and David A. Wood 4# All rights reserved. 5# 6# Redistribution and use in source and binary forms, with or without 7# modification, are permitted provided that the following conditions are 8# met: redistributions of source code must retain the above copyright 9# notice, this list of conditions and the following disclaimer; 10# redistributions in binary form must reproduce the above copyright 11# notice, this list of conditions and the following disclaimer in the 12# documentation and/or other materials provided with the distribution; 13# neither the name of the copyright holders nor the names of its 14# contributors may be used to endorse or promote products derived from 15# this software without specific prior written permission. 16# 17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28# 29# Authors: Brad Beckmann 30# Nilay Vaish 31 32import math 33import m5 34from m5.objects import * 35from m5.defines import buildEnv 36from Ruby import create_topology 37 38# 39# Note: the L1 Cache latency is only used by the sequencer on fast path hits 40# 41class L0Cache(RubyCache): 42 latency = 1 43 44class L1Cache(RubyCache): 45 latency = 5 46 47# 48# Note: the L2 Cache latency is not currently used 49# 50class L2Cache(RubyCache): 51 latency = 15 52 53def define_options(parser): 54 parser.add_option("--num-clusters", type="int", default=1, 55 help="number of clusters in a design in which there are shared\ 56 caches private to clusters") 57 return 58 59def create_system(options, system, dma_ports, ruby_system): 60 61 if buildEnv['PROTOCOL'] != 'MESI_Three_Level': 62 fatal("This script requires the MESI_Three_Level protocol to be built.") 63 64 cpu_sequencers = [] 65 66 # 67 # The ruby network creation expects the list of nodes in the system to be 68 # consistent with the NetDest list. Therefore the l1 controller nodes must be 69 # listed before the directory nodes and directory nodes before dma nodes, etc. 70 # 71 l0_cntrl_nodes = [] 72 l1_cntrl_nodes = [] 73 l2_cntrl_nodes = [] 74 dir_cntrl_nodes = [] 75 dma_cntrl_nodes = [] 76 77 assert (options.num_cpus % options.num_clusters == 0) 78 num_cpus_per_cluster = options.num_cpus / options.num_clusters 79 80 assert (options.num_l2caches % options.num_clusters == 0) 81 num_l2caches_per_cluster = options.num_l2caches / options.num_clusters 82 83 l2_bits = int(math.log(num_l2caches_per_cluster, 2)) 84 block_size_bits = int(math.log(options.cacheline_size, 2)) 85 l2_index_start = block_size_bits + l2_bits 86 87 # 88 # Must create the individual controllers before the network to ensure the 89 # controller constructors are called before the network constructor 90 # 91 for i in xrange(options.num_clusters): 92 for j in xrange(num_cpus_per_cluster): 93 # 94 # First create the Ruby objects associated with this cpu 95 # 96 l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True, 97 start_index_bit = block_size_bits, replacement_policy="LRU") 98 99 l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False, 100 start_index_bit = block_size_bits, replacement_policy="LRU") 101 102 l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j, 103 Icache = l0i_cache, Dcache = l0d_cache, 104 send_evictions = (options.cpu_type == "detailed"),
|
105 clk_domain=system.cpu[i].clk_domain, |
106 ruby_system = ruby_system) 107 108 cpu_seq = RubySequencer(version = i, icache = l0i_cache,
|
109 clk_domain=system.cpu[i].clk_domain, |
110 dcache = l0d_cache, ruby_system = ruby_system) 111 112 l0_cntrl.sequencer = cpu_seq 113 114 l1_cache = L1Cache(size = options.l1d_size, assoc = options.l1d_assoc, 115 start_index_bit = block_size_bits, is_icache = False) 116 117 l1_cntrl = L1Cache_Controller(version = i*num_cpus_per_cluster+j, 118 cache = l1_cache, l2_select_num_bits = l2_bits, 119 cluster_id = i, ruby_system = ruby_system) 120 121 exec("ruby_system.l0_cntrl%d = l0_cntrl" % ( 122 i*num_cpus_per_cluster+j)) 123 exec("ruby_system.l1_cntrl%d = l1_cntrl" % ( 124 i*num_cpus_per_cluster+j)) 125 126 # 127 # Add controllers and sequencers to the appropriate lists 128 # 129 cpu_sequencers.append(cpu_seq) 130 l0_cntrl_nodes.append(l0_cntrl) 131 l1_cntrl_nodes.append(l1_cntrl) 132 l0_cntrl.peer = l1_cntrl 133 134 for j in xrange(num_l2caches_per_cluster): 135 l2_cache = L2Cache(size = options.l2_size, 136 assoc = options.l2_assoc, 137 start_index_bit = l2_index_start) 138 139 l2_cntrl = L2Cache_Controller( 140 version = i * num_l2caches_per_cluster + j, 141 L2cache = l2_cache, cluster_id = i, 142 transitions_per_cycle=options.ports, 143 ruby_system = ruby_system) 144 145 exec("ruby_system.l2_cntrl%d = l2_cntrl" % ( 146 i * num_l2caches_per_cluster + j)) 147 l2_cntrl_nodes.append(l2_cntrl) 148 149 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 150 assert(phys_mem_size % options.num_dirs == 0) 151 mem_module_size = phys_mem_size / options.num_dirs 152 153 # Run each of the ruby memory controllers at a ratio of the frequency of 154 # the ruby system 155 # clk_divider value is a fix to pass regression. 156 ruby_system.memctrl_clk_domain = DerivedClockDomain( 157 clk_domain=ruby_system.clk_domain, 158 clk_divider=3) 159 160 for i in xrange(options.num_dirs): 161 # 162 # Create the Ruby objects associated with the directory controller 163 # 164 165 mem_cntrl = RubyMemoryControl( 166 clk_domain = ruby_system.memctrl_clk_domain, 167 version = i, 168 ruby_system = ruby_system) 169 170 dir_size = MemorySize('0B') 171 dir_size.value = mem_module_size 172 173 dir_cntrl = Directory_Controller(version = i, 174 directory = \ 175 RubyDirectoryMemory(version = i, 176 size = dir_size, 177 use_map = 178 options.use_map), 179 memBuffer = mem_cntrl, 180 transitions_per_cycle = options.ports, 181 ruby_system = ruby_system) 182 183 exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 184 dir_cntrl_nodes.append(dir_cntrl) 185 186 for i, dma_port in enumerate(dma_ports): 187 # 188 # Create the Ruby objects associated with the dma controller 189 # 190 dma_seq = DMASequencer(version = i, 191 ruby_system = ruby_system) 192 193 dma_cntrl = DMA_Controller(version = i, 194 dma_sequencer = dma_seq, 195 transitions_per_cycle = options.ports, 196 ruby_system = ruby_system) 197 198 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 199 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 200 dma_cntrl_nodes.append(dma_cntrl) 201 202 all_cntrls = l0_cntrl_nodes + \ 203 l1_cntrl_nodes + \ 204 l2_cntrl_nodes + \ 205 dir_cntrl_nodes + \ 206 dma_cntrl_nodes 207 208 topology = create_topology(all_cntrls, options) 209 return (cpu_sequencers, dir_cntrl_nodes, topology)
|