MOESI_hammer.py revision 10529
16892SBrad.Beckmann@amd.com# Copyright (c) 2006-2007 The Regents of The University of Michigan 26892SBrad.Beckmann@amd.com# Copyright (c) 2009 Advanced Micro Devices, Inc. 36892SBrad.Beckmann@amd.com# All rights reserved. 46892SBrad.Beckmann@amd.com# 56892SBrad.Beckmann@amd.com# Redistribution and use in source and binary forms, with or without 66892SBrad.Beckmann@amd.com# modification, are permitted provided that the following conditions are 76892SBrad.Beckmann@amd.com# met: redistributions of source code must retain the above copyright 86892SBrad.Beckmann@amd.com# notice, this list of conditions and the following disclaimer; 96892SBrad.Beckmann@amd.com# redistributions in binary form must reproduce the above copyright 106892SBrad.Beckmann@amd.com# notice, this list of conditions and the following disclaimer in the 116892SBrad.Beckmann@amd.com# documentation and/or other materials provided with the distribution; 126892SBrad.Beckmann@amd.com# neither the name of the copyright holders nor the names of its 136892SBrad.Beckmann@amd.com# contributors may be used to endorse or promote products derived from 146892SBrad.Beckmann@amd.com# this software without specific prior written permission. 156892SBrad.Beckmann@amd.com# 166892SBrad.Beckmann@amd.com# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176892SBrad.Beckmann@amd.com# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186892SBrad.Beckmann@amd.com# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196892SBrad.Beckmann@amd.com# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206892SBrad.Beckmann@amd.com# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216892SBrad.Beckmann@amd.com# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226892SBrad.Beckmann@amd.com# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236892SBrad.Beckmann@amd.com# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246892SBrad.Beckmann@amd.com# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256892SBrad.Beckmann@amd.com# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266892SBrad.Beckmann@amd.com# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276892SBrad.Beckmann@amd.com# 286892SBrad.Beckmann@amd.com# Authors: Brad Beckmann 296892SBrad.Beckmann@amd.com 307564SBrad.Beckmann@amd.comimport math 316892SBrad.Beckmann@amd.comimport m5 326892SBrad.Beckmann@amd.comfrom m5.objects import * 336892SBrad.Beckmann@amd.comfrom m5.defines import buildEnv 349100SBrad.Beckmann@amd.comfrom Ruby import create_topology 3510529Smorr@cs.wisc.edufrom Ruby import send_evicts 366892SBrad.Beckmann@amd.com 376892SBrad.Beckmann@amd.com# 386892SBrad.Beckmann@amd.com# Note: the L1 Cache latency is only used by the sequencer on fast path hits 396892SBrad.Beckmann@amd.com# 406892SBrad.Beckmann@amd.comclass L1Cache(RubyCache): 417551SBrad.Beckmann@amd.com latency = 2 426892SBrad.Beckmann@amd.com 436892SBrad.Beckmann@amd.com# 446892SBrad.Beckmann@amd.com# Note: the L2 Cache latency is not currently used 456892SBrad.Beckmann@amd.com# 466892SBrad.Beckmann@amd.comclass L2Cache(RubyCache): 477551SBrad.Beckmann@amd.com latency = 10 486892SBrad.Beckmann@amd.com 497564SBrad.Beckmann@amd.com# 507564SBrad.Beckmann@amd.com# Probe filter is a cache, latency is not used 517564SBrad.Beckmann@amd.com# 527564SBrad.Beckmann@amd.comclass ProbeFilter(RubyCache): 537564SBrad.Beckmann@amd.com latency = 1 547564SBrad.Beckmann@amd.com 557538SBrad.Beckmann@amd.comdef define_options(parser): 567561SBrad.Beckmann@amd.com parser.add_option("--allow-atomic-migration", action="store_true", 577561SBrad.Beckmann@amd.com help="allow migratory sharing for atomic only accessed blocks") 587564SBrad.Beckmann@amd.com parser.add_option("--pf-on", action="store_true", 597564SBrad.Beckmann@amd.com help="Hammer: enable Probe Filter") 607904SBrad.Beckmann@amd.com parser.add_option("--dir-on", action="store_true", 617904SBrad.Beckmann@amd.com help="Hammer: enable Full-bit Directory") 627904SBrad.Beckmann@amd.com 6310519Snilay@cs.wisc.edudef create_system(options, full_system, system, dma_ports, ruby_system): 648436SBrad.Beckmann@amd.com 656892SBrad.Beckmann@amd.com if buildEnv['PROTOCOL'] != 'MOESI_hammer': 666892SBrad.Beckmann@amd.com panic("This script requires the MOESI_hammer protocol to be built.") 676892SBrad.Beckmann@amd.com 686893SBrad.Beckmann@amd.com cpu_sequencers = [] 696893SBrad.Beckmann@amd.com 706892SBrad.Beckmann@amd.com # 716892SBrad.Beckmann@amd.com # The ruby network creation expects the list of nodes in the system to be 726892SBrad.Beckmann@amd.com # consistent with the NetDest list. Therefore the l1 controller nodes must be 736892SBrad.Beckmann@amd.com # listed before the directory nodes and directory nodes before dma nodes, etc. 746892SBrad.Beckmann@amd.com # 756892SBrad.Beckmann@amd.com l1_cntrl_nodes = [] 766892SBrad.Beckmann@amd.com dir_cntrl_nodes = [] 776892SBrad.Beckmann@amd.com dma_cntrl_nodes = [] 786892SBrad.Beckmann@amd.com 796892SBrad.Beckmann@amd.com # 806892SBrad.Beckmann@amd.com # Must create the individual controllers before the network to ensure the 816892SBrad.Beckmann@amd.com # controller constructors are called before the network constructor 826892SBrad.Beckmann@amd.com # 838180SBrad.Beckmann@amd.com block_size_bits = int(math.log(options.cacheline_size, 2)) 848257SBrad.Beckmann@amd.com 856893SBrad.Beckmann@amd.com for i in xrange(options.num_cpus): 866892SBrad.Beckmann@amd.com # 876892SBrad.Beckmann@amd.com # First create the Ruby objects associated with this cpu 886892SBrad.Beckmann@amd.com # 896903SBrad.Beckmann@amd.com l1i_cache = L1Cache(size = options.l1i_size, 908180SBrad.Beckmann@amd.com assoc = options.l1i_assoc, 918653Snilay@cs.wisc.edu start_index_bit = block_size_bits, 928653Snilay@cs.wisc.edu is_icache = True) 936903SBrad.Beckmann@amd.com l1d_cache = L1Cache(size = options.l1d_size, 948180SBrad.Beckmann@amd.com assoc = options.l1d_assoc, 958180SBrad.Beckmann@amd.com start_index_bit = block_size_bits) 966903SBrad.Beckmann@amd.com l2_cache = L2Cache(size = options.l2_size, 978180SBrad.Beckmann@amd.com assoc = options.l2_assoc, 988180SBrad.Beckmann@amd.com start_index_bit = block_size_bits) 996892SBrad.Beckmann@amd.com 1008322Ssteve.reinhardt@amd.com l1_cntrl = L1Cache_Controller(version = i, 1019697Snilay@cs.wisc.edu L1Icache = l1i_cache, 1029697Snilay@cs.wisc.edu L1Dcache = l1d_cache, 1039697Snilay@cs.wisc.edu L2cache = l2_cache, 1048322Ssteve.reinhardt@amd.com no_mig_atomic = not \ 1058436SBrad.Beckmann@amd.com options.allow_atomic_migration, 10610529Smorr@cs.wisc.edu send_evictions = send_evicts(options), 1079841Snilay@cs.wisc.edu transitions_per_cycle = options.ports, 10810300Scastilloe@unican.es clk_domain=system.cpu[i].clk_domain, 1098436SBrad.Beckmann@amd.com ruby_system = ruby_system) 1108322Ssteve.reinhardt@amd.com 1117015SBrad.Beckmann@amd.com cpu_seq = RubySequencer(version = i, 1127015SBrad.Beckmann@amd.com icache = l1i_cache, 1136892SBrad.Beckmann@amd.com dcache = l1d_cache, 11410300Scastilloe@unican.es clk_domain=system.cpu[i].clk_domain, 1158436SBrad.Beckmann@amd.com ruby_system = ruby_system) 1166893SBrad.Beckmann@amd.com 1178322Ssteve.reinhardt@amd.com l1_cntrl.sequencer = cpu_seq 1187566SBrad.Beckmann@amd.com if options.recycle_latency: 1197566SBrad.Beckmann@amd.com l1_cntrl.recycle_latency = options.recycle_latency 1207566SBrad.Beckmann@amd.com 1219468Smalek.musleh@gmail.com exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) 12210311Snilay@cs.wisc.edu 1236893SBrad.Beckmann@amd.com # Add controllers and sequencers to the appropriate lists 1246893SBrad.Beckmann@amd.com cpu_sequencers.append(cpu_seq) 1256893SBrad.Beckmann@amd.com l1_cntrl_nodes.append(l1_cntrl) 1266893SBrad.Beckmann@amd.com 12710311Snilay@cs.wisc.edu # Connect the L1 controller and the network 12810311Snilay@cs.wisc.edu # Connect the buffers from the controller to network 12910311Snilay@cs.wisc.edu l1_cntrl.requestFromCache = ruby_system.network.slave 13010311Snilay@cs.wisc.edu l1_cntrl.responseFromCache = ruby_system.network.slave 13110311Snilay@cs.wisc.edu l1_cntrl.unblockFromCache = ruby_system.network.slave 13210311Snilay@cs.wisc.edu 13310311Snilay@cs.wisc.edu # Connect the buffers from the network to the controller 13410311Snilay@cs.wisc.edu l1_cntrl.forwardToCache = ruby_system.network.master 13510311Snilay@cs.wisc.edu l1_cntrl.responseToCache = ruby_system.network.master 13610311Snilay@cs.wisc.edu 13710311Snilay@cs.wisc.edu 1389826Sandreas.hansson@arm.com phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges)) 1399798Snilay@cs.wisc.edu assert(phys_mem_size % options.num_dirs == 0) 1406905SBrad.Beckmann@amd.com mem_module_size = phys_mem_size / options.num_dirs 1416905SBrad.Beckmann@amd.com 1427564SBrad.Beckmann@amd.com # 1437564SBrad.Beckmann@amd.com # determine size and index bits for probe filter 1447564SBrad.Beckmann@amd.com # By default, the probe filter size is configured to be twice the 1457564SBrad.Beckmann@amd.com # size of the L2 cache. 1467564SBrad.Beckmann@amd.com # 1477564SBrad.Beckmann@amd.com pf_size = MemorySize(options.l2_size) 1487564SBrad.Beckmann@amd.com pf_size.value = pf_size.value * 2 1497564SBrad.Beckmann@amd.com dir_bits = int(math.log(options.num_dirs, 2)) 1507564SBrad.Beckmann@amd.com pf_bits = int(math.log(pf_size.value, 2)) 1517564SBrad.Beckmann@amd.com if options.numa_high_bit: 1529318Spower.jg@gmail.com if options.pf_on or options.dir_on: 1537564SBrad.Beckmann@amd.com # if numa high bit explicitly set, make sure it does not overlap 1547564SBrad.Beckmann@amd.com # with the probe filter index 1557564SBrad.Beckmann@amd.com assert(options.numa_high_bit - dir_bits > pf_bits) 1567564SBrad.Beckmann@amd.com 1577564SBrad.Beckmann@amd.com # set the probe filter start bit to just above the block offset 1589318Spower.jg@gmail.com pf_start_bit = block_size_bits 1597564SBrad.Beckmann@amd.com else: 1607564SBrad.Beckmann@amd.com if dir_bits > 0: 1619318Spower.jg@gmail.com pf_start_bit = dir_bits + block_size_bits - 1 1627564SBrad.Beckmann@amd.com else: 1639318Spower.jg@gmail.com pf_start_bit = block_size_bits 1647564SBrad.Beckmann@amd.com 1659793Sakash.bagdia@arm.com # Run each of the ruby memory controllers at a ratio of the frequency of 1669793Sakash.bagdia@arm.com # the ruby system 1679793Sakash.bagdia@arm.com # clk_divider value is a fix to pass regression. 1689793Sakash.bagdia@arm.com ruby_system.memctrl_clk_domain = DerivedClockDomain( 1699793Sakash.bagdia@arm.com clk_domain=ruby_system.clk_domain, 1709793Sakash.bagdia@arm.com clk_divider=3) 1719793Sakash.bagdia@arm.com 1726893SBrad.Beckmann@amd.com for i in xrange(options.num_dirs): 1736905SBrad.Beckmann@amd.com dir_size = MemorySize('0B') 1746905SBrad.Beckmann@amd.com dir_size.value = mem_module_size 1756905SBrad.Beckmann@amd.com 1767662SBrad.Beckmann@amd.com pf = ProbeFilter(size = pf_size, assoc = 4, 1777662SBrad.Beckmann@amd.com start_index_bit = pf_start_bit) 1787564SBrad.Beckmann@amd.com 1796892SBrad.Beckmann@amd.com dir_cntrl = Directory_Controller(version = i, 18010524Snilay@cs.wisc.edu directory = RubyDirectoryMemory( 18110524Snilay@cs.wisc.edu version = i, size = dir_size), 1827564SBrad.Beckmann@amd.com probeFilter = pf, 1837904SBrad.Beckmann@amd.com probe_filter_enabled = options.pf_on, 1848436SBrad.Beckmann@amd.com full_bit_dir_enabled = options.dir_on, 1859841Snilay@cs.wisc.edu transitions_per_cycle = options.ports, 1868436SBrad.Beckmann@amd.com ruby_system = ruby_system) 1876892SBrad.Beckmann@amd.com 1887566SBrad.Beckmann@amd.com if options.recycle_latency: 1897566SBrad.Beckmann@amd.com dir_cntrl.recycle_latency = options.recycle_latency 1907566SBrad.Beckmann@amd.com 1919468Smalek.musleh@gmail.com exec("ruby_system.dir_cntrl%d = dir_cntrl" % i) 1926893SBrad.Beckmann@amd.com dir_cntrl_nodes.append(dir_cntrl) 1936893SBrad.Beckmann@amd.com 19410311Snilay@cs.wisc.edu # Connect the directory controller to the network 19510311Snilay@cs.wisc.edu dir_cntrl.forwardFromDir = ruby_system.network.slave 19610311Snilay@cs.wisc.edu dir_cntrl.responseFromDir = ruby_system.network.slave 19710311Snilay@cs.wisc.edu dir_cntrl.dmaResponseFromDir = ruby_system.network.slave 19810311Snilay@cs.wisc.edu 19910311Snilay@cs.wisc.edu dir_cntrl.unblockToDir = ruby_system.network.master 20010311Snilay@cs.wisc.edu dir_cntrl.responseToDir = ruby_system.network.master 20110311Snilay@cs.wisc.edu dir_cntrl.requestToDir = ruby_system.network.master 20210311Snilay@cs.wisc.edu dir_cntrl.dmaRequestToDir = ruby_system.network.master 20310311Snilay@cs.wisc.edu 20410311Snilay@cs.wisc.edu 2058929Snilay@cs.wisc.edu for i, dma_port in enumerate(dma_ports): 2066893SBrad.Beckmann@amd.com # 2076893SBrad.Beckmann@amd.com # Create the Ruby objects associated with the dma controller 2086893SBrad.Beckmann@amd.com # 2096893SBrad.Beckmann@amd.com dma_seq = DMASequencer(version = i, 21010519Snilay@cs.wisc.edu ruby_system = ruby_system, 21110519Snilay@cs.wisc.edu slave = dma_port) 2126893SBrad.Beckmann@amd.com 2136892SBrad.Beckmann@amd.com dma_cntrl = DMA_Controller(version = i, 2148477Snilay@cs.wisc.edu dma_sequencer = dma_seq, 2159841Snilay@cs.wisc.edu transitions_per_cycle = options.ports, 2168477Snilay@cs.wisc.edu ruby_system = ruby_system) 2176892SBrad.Beckmann@amd.com 2189468Smalek.musleh@gmail.com exec("ruby_system.dma_cntrl%d = dma_cntrl" % i) 2196892SBrad.Beckmann@amd.com dma_cntrl_nodes.append(dma_cntrl) 2206892SBrad.Beckmann@amd.com 2217566SBrad.Beckmann@amd.com if options.recycle_latency: 2227566SBrad.Beckmann@amd.com dma_cntrl.recycle_latency = options.recycle_latency 2237566SBrad.Beckmann@amd.com 22410311Snilay@cs.wisc.edu # Connect the dma controller to the network 22510440Snilay@cs.wisc.edu dma_cntrl.responseFromDir = ruby_system.network.master 22610440Snilay@cs.wisc.edu dma_cntrl.requestToDir = ruby_system.network.slave 22710311Snilay@cs.wisc.edu 22810519Snilay@cs.wisc.edu all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes 22910311Snilay@cs.wisc.edu 23010519Snilay@cs.wisc.edu # Create the io controller and the sequencer 23110519Snilay@cs.wisc.edu if full_system: 23210519Snilay@cs.wisc.edu io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system) 23310519Snilay@cs.wisc.edu ruby_system._io_port = io_seq 23410519Snilay@cs.wisc.edu io_controller = DMA_Controller(version = len(dma_ports), 23510519Snilay@cs.wisc.edu dma_sequencer = io_seq, 23610519Snilay@cs.wisc.edu ruby_system = ruby_system) 23710519Snilay@cs.wisc.edu ruby_system.io_controller = io_controller 23810519Snilay@cs.wisc.edu 23910519Snilay@cs.wisc.edu # Connect the dma controller to the network 24010519Snilay@cs.wisc.edu io_controller.responseFromDir = ruby_system.network.master 24110519Snilay@cs.wisc.edu io_controller.requestToDir = ruby_system.network.slave 24210519Snilay@cs.wisc.edu 24310519Snilay@cs.wisc.edu all_cntrls = all_cntrls + [io_controller] 24410519Snilay@cs.wisc.edu 2459100SBrad.Beckmann@amd.com topology = create_topology(all_cntrls, options) 2469100SBrad.Beckmann@amd.com return (cpu_sequencers, dir_cntrl_nodes, topology) 247