MOESI_hammer.py revision 11266
16892SBrad.Beckmann@amd.com# Copyright (c) 2006-2007 The Regents of The University of Michigan 26892SBrad.Beckmann@amd.com# Copyright (c) 2009 Advanced Micro Devices, Inc. 36892SBrad.Beckmann@amd.com# All rights reserved. 46892SBrad.Beckmann@amd.com# 56892SBrad.Beckmann@amd.com# Redistribution and use in source and binary forms, with or without 66892SBrad.Beckmann@amd.com# modification, are permitted provided that the following conditions are 76892SBrad.Beckmann@amd.com# met: redistributions of source code must retain the above copyright 86892SBrad.Beckmann@amd.com# notice, this list of conditions and the following disclaimer; 96892SBrad.Beckmann@amd.com# redistributions in binary form must reproduce the above copyright 106892SBrad.Beckmann@amd.com# notice, this list of conditions and the following disclaimer in the 116892SBrad.Beckmann@amd.com# documentation and/or other materials provided with the distribution; 126892SBrad.Beckmann@amd.com# neither the name of the copyright holders nor the names of its 136892SBrad.Beckmann@amd.com# contributors may be used to endorse or promote products derived from 146892SBrad.Beckmann@amd.com# this software without specific prior written permission. 156892SBrad.Beckmann@amd.com# 166892SBrad.Beckmann@amd.com# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176892SBrad.Beckmann@amd.com# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186892SBrad.Beckmann@amd.com# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196892SBrad.Beckmann@amd.com# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206892SBrad.Beckmann@amd.com# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216892SBrad.Beckmann@amd.com# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226892SBrad.Beckmann@amd.com# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236892SBrad.Beckmann@amd.com# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246892SBrad.Beckmann@amd.com# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256892SBrad.Beckmann@amd.com# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266892SBrad.Beckmann@amd.com# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276892SBrad.Beckmann@amd.com# 286892SBrad.Beckmann@amd.com# Authors: Brad Beckmann 296892SBrad.Beckmann@amd.com 307564SBrad.Beckmann@amd.comimport math 316892SBrad.Beckmann@amd.comimport m5 326892SBrad.Beckmann@amd.comfrom m5.objects import * 336892SBrad.Beckmann@amd.comfrom m5.defines import buildEnv 349100SBrad.Beckmann@amd.comfrom Ruby import create_topology 3510529Smorr@cs.wisc.edufrom Ruby import send_evicts 366892SBrad.Beckmann@amd.com 376892SBrad.Beckmann@amd.com# 3811019Sjthestness@gmail.com# Declare caches used by the protocol 396892SBrad.Beckmann@amd.com# 4011019Sjthestness@gmail.comclass L1Cache(RubyCache): pass 4111019Sjthestness@gmail.comclass L2Cache(RubyCache): pass 426892SBrad.Beckmann@amd.com# 4311019Sjthestness@gmail.com# Probe filter is a cache 446892SBrad.Beckmann@amd.com# 4511019Sjthestness@gmail.comclass ProbeFilter(RubyCache): pass 467564SBrad.Beckmann@amd.com 477538SBrad.Beckmann@amd.comdef define_options(parser): 487561SBrad.Beckmann@amd.com parser.add_option("--allow-atomic-migration", action="store_true", 497561SBrad.Beckmann@amd.com help="allow migratory sharing for atomic only accessed blocks") 507564SBrad.Beckmann@amd.com parser.add_option("--pf-on", action="store_true", 517564SBrad.Beckmann@amd.com help="Hammer: enable Probe Filter") 527904SBrad.Beckmann@amd.com parser.add_option("--dir-on", action="store_true", 537904SBrad.Beckmann@amd.com help="Hammer: enable Full-bit Directory") 547904SBrad.Beckmann@amd.com 5510519Snilay@cs.wisc.edudef create_system(options, full_system, system, dma_ports, ruby_system): 568436SBrad.Beckmann@amd.com 576892SBrad.Beckmann@amd.com if buildEnv['PROTOCOL'] != 'MOESI_hammer': 586892SBrad.Beckmann@amd.com panic("This script requires the MOESI_hammer protocol to be built.") 596892SBrad.Beckmann@amd.com 606893SBrad.Beckmann@amd.com cpu_sequencers = [] 6110917Sbrandon.potter@amd.com 626892SBrad.Beckmann@amd.com # 636892SBrad.Beckmann@amd.com # The ruby network creation expects the list of nodes in the system to be 646892SBrad.Beckmann@amd.com # consistent with the NetDest list. Therefore the l1 controller nodes must be 656892SBrad.Beckmann@amd.com # listed before the directory nodes and directory nodes before dma nodes, etc. 666892SBrad.Beckmann@amd.com # 676892SBrad.Beckmann@amd.com l1_cntrl_nodes = [] 686892SBrad.Beckmann@amd.com dir_cntrl_nodes = [] 696892SBrad.Beckmann@amd.com dma_cntrl_nodes = [] 706892SBrad.Beckmann@amd.com 716892SBrad.Beckmann@amd.com # 726892SBrad.Beckmann@amd.com # Must create the individual controllers before the network to ensure the 736892SBrad.Beckmann@amd.com # controller constructors are called before the network constructor 746892SBrad.Beckmann@amd.com # 758180SBrad.Beckmann@amd.com block_size_bits = int(math.log(options.cacheline_size, 2)) 768257SBrad.Beckmann@amd.com 776893SBrad.Beckmann@amd.com for i in xrange(options.num_cpus): 786892SBrad.Beckmann@amd.com # 796892SBrad.Beckmann@amd.com # First create the Ruby objects associated with this cpu 806892SBrad.Beckmann@amd.com # 816903SBrad.Beckmann@amd.com l1i_cache = L1Cache(size = options.l1i_size, 828180SBrad.Beckmann@amd.com assoc = options.l1i_assoc, 838653Snilay@cs.wisc.edu start_index_bit = block_size_bits, 848653Snilay@cs.wisc.edu is_icache = True) 856903SBrad.Beckmann@amd.com l1d_cache = L1Cache(size = options.l1d_size, 868180SBrad.Beckmann@amd.com assoc = options.l1d_assoc, 878180SBrad.Beckmann@amd.com start_index_bit = block_size_bits) 886903SBrad.Beckmann@amd.com l2_cache = L2Cache(size = options.l2_size, 898180SBrad.Beckmann@amd.com assoc = options.l2_assoc, 908180SBrad.Beckmann@amd.com start_index_bit = block_size_bits) 916892SBrad.Beckmann@amd.com 9211266SBrad.Beckmann@amd.com # the ruby random tester reuses num_cpus to specify the 9311266SBrad.Beckmann@amd.com # number of cpu ports connected to the tester object, which 9411266SBrad.Beckmann@amd.com # is stored in system.cpu. because there is only ever one 9511266SBrad.Beckmann@amd.com # tester object, num_cpus is not necessarily equal to the 9611266SBrad.Beckmann@amd.com # size of system.cpu; therefore if len(system.cpu) == 1 9711266SBrad.Beckmann@amd.com # we use system.cpu[0] to set the clk_domain, thereby ensuring 9811266SBrad.Beckmann@amd.com # we don't index off the end of the cpu list. 9911266SBrad.Beckmann@amd.com if len(system.cpu) == 1: 10011266SBrad.Beckmann@amd.com clk_domain = system.cpu[0].clk_domain 10111266SBrad.Beckmann@amd.com else: 10211266SBrad.Beckmann@amd.com clk_domain = system.cpu[i].clk_domain 1038322Ssteve.reinhardt@amd.com 10411266SBrad.Beckmann@amd.com l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache, 10511266SBrad.Beckmann@amd.com L1Dcache=l1d_cache, L2cache=l2_cache, 10611266SBrad.Beckmann@amd.com no_mig_atomic=not \ 10711266SBrad.Beckmann@amd.com options.allow_atomic_migration, 10811266SBrad.Beckmann@amd.com send_evictions=send_evicts(options), 10911266SBrad.Beckmann@amd.com transitions_per_cycle=options.ports, 11011266SBrad.Beckmann@amd.com clk_domain=clk_domain, 11111266SBrad.Beckmann@amd.com ruby_system=ruby_system) 11211266SBrad.Beckmann@amd.com 11311266SBrad.Beckmann@amd.com cpu_seq = RubySequencer(version=i, icache=l1i_cache, 11411266SBrad.Beckmann@amd.com dcache=l1d_cache,clk_domain=clk_domain, 11511266SBrad.Beckmann@amd.com ruby_system=ruby_system) 1166893SBrad.Beckmann@amd.com 1178322Ssteve.reinhardt@amd.com l1_cntrl.sequencer = cpu_seq 1187566SBrad.Beckmann@amd.com if options.recycle_latency: 1197566SBrad.Beckmann@amd.com l1_cntrl.recycle_latency = options.recycle_latency 1207566SBrad.Beckmann@amd.com 1219468Smalek.musleh@gmail.com exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) 12210311Snilay@cs.wisc.edu 1236893SBrad.Beckmann@amd.com # Add controllers and sequencers to the appropriate lists 1246893SBrad.Beckmann@amd.com cpu_sequencers.append(cpu_seq) 1256893SBrad.Beckmann@amd.com l1_cntrl_nodes.append(l1_cntrl) 1266893SBrad.Beckmann@amd.com 12710311Snilay@cs.wisc.edu # Connect the L1 controller and the network 12810311Snilay@cs.wisc.edu # Connect the buffers from the controller to network 12911022Sjthestness@gmail.com l1_cntrl.requestFromCache = MessageBuffer() 13011022Sjthestness@gmail.com l1_cntrl.requestFromCache.master = ruby_system.network.slave 13111022Sjthestness@gmail.com l1_cntrl.responseFromCache = MessageBuffer() 13211022Sjthestness@gmail.com l1_cntrl.responseFromCache.master = ruby_system.network.slave 13311022Sjthestness@gmail.com l1_cntrl.unblockFromCache = MessageBuffer() 13411022Sjthestness@gmail.com l1_cntrl.unblockFromCache.master = ruby_system.network.slave 13511022Sjthestness@gmail.com 13611022Sjthestness@gmail.com l1_cntrl.triggerQueue = MessageBuffer() 13710311Snilay@cs.wisc.edu 13810311Snilay@cs.wisc.edu # Connect the buffers from the network to the controller 13911022Sjthestness@gmail.com l1_cntrl.mandatoryQueue = MessageBuffer() 14011022Sjthestness@gmail.com l1_cntrl.forwardToCache = MessageBuffer() 14111022Sjthestness@gmail.com l1_cntrl.forwardToCache.slave = ruby_system.network.master 14211022Sjthestness@gmail.com l1_cntrl.responseToCache = MessageBuffer() 14311022Sjthestness@gmail.com l1_cntrl.responseToCache.slave = ruby_system.network.master 14410311Snilay@cs.wisc.edu 14510311Snilay@cs.wisc.edu 1469826Sandreas.hansson@arm.com phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 1479798Snilay@cs.wisc.edu assert(phys_mem_size % options.num_dirs == 0) 1486905SBrad.Beckmann@amd.com mem_module_size = phys_mem_size / options.num_dirs 1496905SBrad.Beckmann@amd.com 1507564SBrad.Beckmann@amd.com # 1517564SBrad.Beckmann@amd.com # determine size and index bits for probe filter 1527564SBrad.Beckmann@amd.com # By default, the probe filter size is configured to be twice the 1537564SBrad.Beckmann@amd.com # size of the L2 cache. 1547564SBrad.Beckmann@amd.com # 1557564SBrad.Beckmann@amd.com pf_size = MemorySize(options.l2_size) 1567564SBrad.Beckmann@amd.com pf_size.value = pf_size.value * 2 1577564SBrad.Beckmann@amd.com dir_bits = int(math.log(options.num_dirs, 2)) 1587564SBrad.Beckmann@amd.com pf_bits = int(math.log(pf_size.value, 2)) 1597564SBrad.Beckmann@amd.com if options.numa_high_bit: 1609318Spower.jg@gmail.com if options.pf_on or options.dir_on: 1617564SBrad.Beckmann@amd.com # if numa high bit explicitly set, make sure it does not overlap 1627564SBrad.Beckmann@amd.com # with the probe filter index 1637564SBrad.Beckmann@amd.com assert(options.numa_high_bit - dir_bits > pf_bits) 1647564SBrad.Beckmann@amd.com 1657564SBrad.Beckmann@amd.com # set the probe filter start bit to just above the block offset 1669318Spower.jg@gmail.com pf_start_bit = block_size_bits 1677564SBrad.Beckmann@amd.com else: 1687564SBrad.Beckmann@amd.com if dir_bits > 0: 1699318Spower.jg@gmail.com pf_start_bit = dir_bits + block_size_bits - 1 1707564SBrad.Beckmann@amd.com else: 1719318Spower.jg@gmail.com pf_start_bit = block_size_bits 1727564SBrad.Beckmann@amd.com 1739793Sakash.bagdia@arm.com # Run each of the ruby memory controllers at a ratio of the frequency of 1749793Sakash.bagdia@arm.com # the ruby system 1759793Sakash.bagdia@arm.com # clk_divider value is a fix to pass regression. 1769793Sakash.bagdia@arm.com ruby_system.memctrl_clk_domain = DerivedClockDomain( 1779793Sakash.bagdia@arm.com clk_domain=ruby_system.clk_domain, 1789793Sakash.bagdia@arm.com clk_divider=3) 1799793Sakash.bagdia@arm.com 1806893SBrad.Beckmann@amd.com for i in xrange(options.num_dirs): 1816905SBrad.Beckmann@amd.com dir_size = MemorySize('0B') 1826905SBrad.Beckmann@amd.com dir_size.value = mem_module_size 1836905SBrad.Beckmann@amd.com 1847662SBrad.Beckmann@amd.com pf = ProbeFilter(size = pf_size, assoc = 4, 1857662SBrad.Beckmann@amd.com start_index_bit = pf_start_bit) 1867564SBrad.Beckmann@amd.com 1876892SBrad.Beckmann@amd.com dir_cntrl = Directory_Controller(version = i, 18810524Snilay@cs.wisc.edu directory = RubyDirectoryMemory( 18910524Snilay@cs.wisc.edu version = i, size = dir_size), 1907564SBrad.Beckmann@amd.com probeFilter = pf, 1917904SBrad.Beckmann@amd.com probe_filter_enabled = options.pf_on, 1928436SBrad.Beckmann@amd.com full_bit_dir_enabled = options.dir_on, 1939841Snilay@cs.wisc.edu transitions_per_cycle = options.ports, 1948436SBrad.Beckmann@amd.com ruby_system = ruby_system) 1956892SBrad.Beckmann@amd.com 1967566SBrad.Beckmann@amd.com if options.recycle_latency: 1977566SBrad.Beckmann@amd.com dir_cntrl.recycle_latency = options.recycle_latency 1987566SBrad.Beckmann@amd.com 1999468Smalek.musleh@gmail.com exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 2006893SBrad.Beckmann@amd.com dir_cntrl_nodes.append(dir_cntrl) 2016893SBrad.Beckmann@amd.com 20210311Snilay@cs.wisc.edu # Connect the directory controller to the network 20311022Sjthestness@gmail.com dir_cntrl.forwardFromDir = MessageBuffer() 20411022Sjthestness@gmail.com dir_cntrl.forwardFromDir.master = ruby_system.network.slave 20511022Sjthestness@gmail.com dir_cntrl.responseFromDir = MessageBuffer() 20611022Sjthestness@gmail.com dir_cntrl.responseFromDir.master = ruby_system.network.slave 20711022Sjthestness@gmail.com dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True) 20811022Sjthestness@gmail.com dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave 20910311Snilay@cs.wisc.edu 21011022Sjthestness@gmail.com dir_cntrl.triggerQueue = MessageBuffer(ordered = True) 21111022Sjthestness@gmail.com 21211022Sjthestness@gmail.com dir_cntrl.unblockToDir = MessageBuffer() 21311022Sjthestness@gmail.com dir_cntrl.unblockToDir.slave = ruby_system.network.master 21411022Sjthestness@gmail.com dir_cntrl.responseToDir = MessageBuffer() 21511022Sjthestness@gmail.com dir_cntrl.responseToDir.slave = ruby_system.network.master 21611022Sjthestness@gmail.com dir_cntrl.requestToDir = MessageBuffer() 21711022Sjthestness@gmail.com dir_cntrl.requestToDir.slave = ruby_system.network.master 21811022Sjthestness@gmail.com dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True) 21911022Sjthestness@gmail.com dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master 22011022Sjthestness@gmail.com dir_cntrl.responseFromMemory = MessageBuffer() 22110311Snilay@cs.wisc.edu 22210311Snilay@cs.wisc.edu 2238929Snilay@cs.wisc.edu for i, dma_port in enumerate(dma_ports): 2246893SBrad.Beckmann@amd.com # 2256893SBrad.Beckmann@amd.com # Create the Ruby objects associated with the dma controller 2266893SBrad.Beckmann@amd.com # 2276893SBrad.Beckmann@amd.com dma_seq = DMASequencer(version = i, 22810519Snilay@cs.wisc.edu ruby_system = ruby_system, 22910519Snilay@cs.wisc.edu slave = dma_port) 23010917Sbrandon.potter@amd.com 2316892SBrad.Beckmann@amd.com dma_cntrl = DMA_Controller(version = i, 2328477Snilay@cs.wisc.edu dma_sequencer = dma_seq, 2339841Snilay@cs.wisc.edu transitions_per_cycle = options.ports, 2348477Snilay@cs.wisc.edu ruby_system = ruby_system) 2356892SBrad.Beckmann@amd.com 2369468Smalek.musleh@gmail.com exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 2376892SBrad.Beckmann@amd.com dma_cntrl_nodes.append(dma_cntrl) 2386892SBrad.Beckmann@amd.com 2397566SBrad.Beckmann@amd.com if options.recycle_latency: 2407566SBrad.Beckmann@amd.com dma_cntrl.recycle_latency = options.recycle_latency 2417566SBrad.Beckmann@amd.com 24210311Snilay@cs.wisc.edu # Connect the dma controller to the network 24311022Sjthestness@gmail.com dma_cntrl.responseFromDir = MessageBuffer(ordered = True) 24411022Sjthestness@gmail.com dma_cntrl.responseFromDir.slave = ruby_system.network.master 24511022Sjthestness@gmail.com dma_cntrl.requestToDir = MessageBuffer() 24611022Sjthestness@gmail.com dma_cntrl.requestToDir.master = ruby_system.network.slave 24711022Sjthestness@gmail.com dma_cntrl.mandatoryQueue = MessageBuffer() 24810311Snilay@cs.wisc.edu 24910519Snilay@cs.wisc.edu all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes 25010311Snilay@cs.wisc.edu 25110519Snilay@cs.wisc.edu # Create the io controller and the sequencer 25210519Snilay@cs.wisc.edu if full_system: 25310519Snilay@cs.wisc.edu io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 25410519Snilay@cs.wisc.edu ruby_system._io_port = io_seq 25510519Snilay@cs.wisc.edu io_controller = DMA_Controller(version = len(dma_ports), 25610519Snilay@cs.wisc.edu dma_sequencer = io_seq, 25710519Snilay@cs.wisc.edu ruby_system = ruby_system) 25810519Snilay@cs.wisc.edu ruby_system.io_controller = io_controller 25910519Snilay@cs.wisc.edu 26010519Snilay@cs.wisc.edu # Connect the dma controller to the network 26111022Sjthestness@gmail.com io_controller.responseFromDir = MessageBuffer(ordered = True) 26211022Sjthestness@gmail.com io_controller.responseFromDir.slave = ruby_system.network.master 26311022Sjthestness@gmail.com io_controller.requestToDir = MessageBuffer() 26411022Sjthestness@gmail.com io_controller.requestToDir.master = ruby_system.network.slave 26511022Sjthestness@gmail.com io_controller.mandatoryQueue = MessageBuffer() 26610519Snilay@cs.wisc.edu 26710519Snilay@cs.wisc.edu all_cntrls = all_cntrls + [io_controller] 26810519Snilay@cs.wisc.edu 26911065Snilay@cs.wisc.edu ruby_system.network.number_of_virtual_networks = 6 2709100SBrad.Beckmann@amd.com topology = create_topology(all_cntrls, options) 2719100SBrad.Beckmann@amd.com return (cpu_sequencers, dir_cntrl_nodes, topology) 272