1# Copyright (c) 2006-2007 The Regents of The University of Michigan 2# Copyright (c) 2009 Advanced Micro Devices, Inc. 3# All rights reserved. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer; 9# redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution; 12# neither the name of the copyright holders nor the names of its 13# contributors may be used to endorse or promote products derived from 14# this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27# 28# Authors: Brad Beckmann 29 30import math 31import m5 32from m5.objects import * 33from m5.defines import buildEnv 34from .Ruby import create_topology, create_directories 35from .Ruby import send_evicts 36 37# 38# Declare caches used by the protocol 39# 40class L1Cache(RubyCache): pass 41 42def define_options(parser): 43 return 44 45def create_system(options, full_system, system, dma_ports, bootmem, 46 ruby_system): 47 48 if buildEnv['PROTOCOL'] != 'MI_example': 49 panic("This script requires the MI_example protocol to be built.") 50 51 cpu_sequencers = [] 52 53 # 54 # The ruby network creation expects the list of nodes in the system to be 55 # consistent with the NetDest list. Therefore the l1 controller nodes must be 56 # listed before the directory nodes and directory nodes before dma nodes, etc. 57 # 58 l1_cntrl_nodes = [] 59 dma_cntrl_nodes = [] 60 61 # 62 # Must create the individual controllers before the network to ensure the 63 # controller constructors are called before the network constructor 64 # 65 block_size_bits = int(math.log(options.cacheline_size, 2)) 66 67 for i in range(options.num_cpus): 68 # 69 # First create the Ruby objects associated with this cpu 70 # Only one cache exists for this protocol, so by default use the L1D 71 # config parameters. 72 # 73 cache = L1Cache(size = options.l1d_size, 74 assoc = options.l1d_assoc, 75 start_index_bit = block_size_bits) 76 77 78 # the ruby random tester reuses num_cpus to specify the 79 # number of cpu ports connected to the tester object, which 80 # is stored in system.cpu. because there is only ever one 81 # tester object, num_cpus is not necessarily equal to the 82 # size of system.cpu; therefore if len(system.cpu) == 1 83 # we use system.cpu[0] to set the clk_domain, thereby ensuring 84 # we don't index off the end of the cpu list. 85 if len(system.cpu) == 1: 86 clk_domain = system.cpu[0].clk_domain 87 else: 88 clk_domain = system.cpu[i].clk_domain 89 90 # Only one unified L1 cache exists. Can cache instructions and data. 91 l1_cntrl = L1Cache_Controller(version=i, cacheMemory=cache, 92 send_evictions=send_evicts(options), 93 transitions_per_cycle=options.ports, 94 clk_domain=clk_domain, 95 ruby_system=ruby_system) 96 97 cpu_seq = RubySequencer(version=i, icache=cache, dcache=cache, 98 clk_domain=clk_domain, ruby_system=ruby_system) 99 100 l1_cntrl.sequencer = cpu_seq 101 exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) 102 103 # Add controllers and sequencers to the appropriate lists 104 cpu_sequencers.append(cpu_seq) 105 l1_cntrl_nodes.append(l1_cntrl) 106 107 # Connect the L1 controllers and the network 108 l1_cntrl.mandatoryQueue = MessageBuffer() 109 l1_cntrl.requestFromCache = MessageBuffer(ordered = True) 110 l1_cntrl.requestFromCache.master = ruby_system.network.slave 111 l1_cntrl.responseFromCache = MessageBuffer(ordered = True) 112 l1_cntrl.responseFromCache.master = ruby_system.network.slave 113 l1_cntrl.forwardToCache = MessageBuffer(ordered = True) 114 l1_cntrl.forwardToCache.slave = ruby_system.network.master 115 l1_cntrl.responseToCache = MessageBuffer(ordered = True) 116 l1_cntrl.responseToCache.slave = ruby_system.network.master 117 118 phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 119 assert(phys_mem_size % options.num_dirs == 0) 120 mem_module_size = phys_mem_size / options.num_dirs 121 122 # Run each of the ruby memory controllers at a ratio of the frequency of 123 # the ruby system. 124 # clk_divider value is a fix to pass regression. 125 ruby_system.memctrl_clk_domain = DerivedClockDomain( 126 clk_domain=ruby_system.clk_domain, 127 clk_divider=3) 128 129 mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories( 130 options, bootmem, ruby_system, system) 131 dir_cntrl_nodes = mem_dir_cntrl_nodes[:] 132 if rom_dir_cntrl_node is not None: 133 dir_cntrl_nodes.append(rom_dir_cntrl_node) 134 for dir_cntrl in dir_cntrl_nodes: 135 # Connect the directory controllers and the network 136 dir_cntrl.requestToDir = MessageBuffer(ordered = True) 137 dir_cntrl.requestToDir.slave = ruby_system.network.master 138 dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True) 139 dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master 140 141 dir_cntrl.responseFromDir = MessageBuffer() 142 dir_cntrl.responseFromDir.master = ruby_system.network.slave 143 dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True) 144 dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave 145 dir_cntrl.forwardFromDir = MessageBuffer() 146 dir_cntrl.forwardFromDir.master = ruby_system.network.slave 147 dir_cntrl.responseFromMemory = MessageBuffer() 148 149 150 for i, dma_port in enumerate(dma_ports): 151 # 152 # Create the Ruby objects associated with the dma controller 153 # 154 dma_seq = DMASequencer(version = i, 155 ruby_system = ruby_system) 156 157 dma_cntrl = DMA_Controller(version = i, 158 dma_sequencer = dma_seq, 159 transitions_per_cycle = options.ports, 160 ruby_system = ruby_system) 161 162 exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 163 exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i) 164 dma_cntrl_nodes.append(dma_cntrl) 165 166 # Connect the directory controllers and the network 167 dma_cntrl.mandatoryQueue = MessageBuffer() 168 dma_cntrl.requestToDir = MessageBuffer() 169 dma_cntrl.requestToDir.master = ruby_system.network.slave 170 dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 171 dma_cntrl.responseFromDir.slave = ruby_system.network.master 172 173 all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes 174 175 # Create the io controller and the sequencer 176 if full_system: 177 io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 178 ruby_system._io_port = io_seq 179 io_controller = DMA_Controller(version = len(dma_ports), 180 dma_sequencer = io_seq, 181 ruby_system = ruby_system) 182 ruby_system.io_controller = io_controller 183 184 # Connect the dma controller to the network 185 io_controller.mandatoryQueue = MessageBuffer() 186 io_controller.requestToDir = MessageBuffer() 187 io_controller.requestToDir.master = ruby_system.network.slave 188 io_controller.responseFromDir = MessageBuffer(ordered = True) 189 io_controller.responseFromDir.slave = ruby_system.network.master 190 191 all_cntrls = all_cntrls + [io_controller] 192 193 ruby_system.network.number_of_virtual_networks = 5 194 topology = create_topology(all_cntrls, options) 195 return (cpu_sequencers, mem_dir_cntrl_nodes, topology) 196