mshr.cc revision 10922
12810SN/A/* 210764Sandreas.hansson@arm.com * Copyright (c) 2012-2013, 2015 ARM Limited 39663Suri.wiener@arm.com * All rights reserved. 49663Suri.wiener@arm.com * 59663Suri.wiener@arm.com * The license below extends only to copyright in the software and shall 69663Suri.wiener@arm.com * not be construed as granting a license to any other intellectual 79663Suri.wiener@arm.com * property including but not limited to intellectual property relating 89663Suri.wiener@arm.com * to a hardware implementation of the functionality of the software 99663Suri.wiener@arm.com * licensed hereunder. You may use the software subject to the license 109663Suri.wiener@arm.com * terms below provided that you ensure that this notice is replicated 119663Suri.wiener@arm.com * unmodified and in its entirety in all distributions of the software, 129663Suri.wiener@arm.com * modified or unmodified, in source code or in binary form. 139663Suri.wiener@arm.com * 142810SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 157636Ssteve.reinhardt@amd.com * Copyright (c) 2010 Advanced Micro Devices, Inc. 162810SN/A * All rights reserved. 172810SN/A * 182810SN/A * Redistribution and use in source and binary forms, with or without 192810SN/A * modification, are permitted provided that the following conditions are 202810SN/A * met: redistributions of source code must retain the above copyright 212810SN/A * notice, this list of conditions and the following disclaimer; 222810SN/A * redistributions in binary form must reproduce the above copyright 232810SN/A * notice, this list of conditions and the following disclaimer in the 242810SN/A * documentation and/or other materials provided with the distribution; 252810SN/A * neither the name of the copyright holders nor the names of its 262810SN/A * contributors may be used to endorse or promote products derived from 272810SN/A * this software without specific prior written permission. 282810SN/A * 292810SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302810SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312810SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322810SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332810SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342810SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352810SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362810SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372810SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382810SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392810SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402810SN/A * 412810SN/A * Authors: Erik Hallnor 422810SN/A * Dave Greene 432810SN/A */ 442810SN/A 452810SN/A/** 462810SN/A * @file 472810SN/A * Miss Status and Handling Register (MSHR) definitions. 482810SN/A */ 492810SN/A 506216Snate@binkert.org#include <algorithm> 516216Snate@binkert.org#include <cassert> 522810SN/A#include <string> 532810SN/A#include <vector> 542810SN/A 556216Snate@binkert.org#include "base/misc.hh" 566216Snate@binkert.org#include "base/types.hh" 578232Snate@binkert.org#include "debug/Cache.hh" 586216Snate@binkert.org#include "mem/cache/cache.hh" 595338Sstever@gmail.com#include "mem/cache/mshr.hh" 606216Snate@binkert.org#include "sim/core.hh" 612810SN/A 622810SN/Ausing namespace std; 632810SN/A 649725Sandreas.hansson@arm.comMSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false), 6510582SCurtis.Dunham@arm.com pendingDirty(false), 6610503SCurtis.Dunham@arm.com postInvalidate(false), postDowngrade(false), 6710764Sandreas.hansson@arm.com queue(NULL), order(0), blkAddr(0), 6810764Sandreas.hansson@arm.com blkSize(0), isSecure(false), inService(false), 6910503SCurtis.Dunham@arm.com isForward(false), threadNum(InvalidThreadID), data(NULL) 702810SN/A{ 712810SN/A} 722810SN/A 734903SN/A 744903SN/AMSHR::TargetList::TargetList() 754903SN/A : needsExclusive(false), hasUpgrade(false) 764903SN/A{} 774903SN/A 784903SN/A 794903SN/Ainline void 804908SN/AMSHR::TargetList::add(PacketPtr pkt, Tick readyTime, 815875Ssteve.reinhardt@amd.com Counter order, Target::Source source, bool markPending) 824903SN/A{ 835875Ssteve.reinhardt@amd.com if (source != Target::FromSnoop) { 844903SN/A if (pkt->needsExclusive()) { 854903SN/A needsExclusive = true; 864903SN/A } 874903SN/A 887669Ssteve.reinhardt@amd.com // StoreCondReq is effectively an upgrade if it's in an MSHR 897669Ssteve.reinhardt@amd.com // since it would have been failed already if we didn't have a 907669Ssteve.reinhardt@amd.com // read-only copy 917669Ssteve.reinhardt@amd.com if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) { 924903SN/A hasUpgrade = true; 934903SN/A } 945318SN/A } 954908SN/A 965318SN/A if (markPending) { 979543Ssascha.bischoff@arm.com // Iterate over the SenderState stack and see if we find 989543Ssascha.bischoff@arm.com // an MSHR entry. If we do, set the downstreamPending 999543Ssascha.bischoff@arm.com // flag. Otherwise, do nothing. 1009543Ssascha.bischoff@arm.com MSHR *mshr = pkt->findNextSenderState<MSHR>(); 1014908SN/A if (mshr != NULL) { 1024908SN/A assert(!mshr->downstreamPending); 1034908SN/A mshr->downstreamPending = true; 1044908SN/A } 1054903SN/A } 1064903SN/A 10710922Sandreas.hansson@arm.com emplace_back(pkt, readyTime, order, source, markPending); 1084903SN/A} 1094903SN/A 1104903SN/A 1117667Ssteve.reinhardt@amd.comstatic void 1127667Ssteve.reinhardt@amd.comreplaceUpgrade(PacketPtr pkt) 1137667Ssteve.reinhardt@amd.com{ 1147667Ssteve.reinhardt@amd.com if (pkt->cmd == MemCmd::UpgradeReq) { 1157667Ssteve.reinhardt@amd.com pkt->cmd = MemCmd::ReadExReq; 1167667Ssteve.reinhardt@amd.com DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n"); 1177667Ssteve.reinhardt@amd.com } else if (pkt->cmd == MemCmd::SCUpgradeReq) { 1187667Ssteve.reinhardt@amd.com pkt->cmd = MemCmd::SCUpgradeFailReq; 1197667Ssteve.reinhardt@amd.com DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n"); 1207669Ssteve.reinhardt@amd.com } else if (pkt->cmd == MemCmd::StoreCondReq) { 1217669Ssteve.reinhardt@amd.com pkt->cmd = MemCmd::StoreCondFailReq; 1227669Ssteve.reinhardt@amd.com DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n"); 1237667Ssteve.reinhardt@amd.com } 1247667Ssteve.reinhardt@amd.com} 1257667Ssteve.reinhardt@amd.com 1267667Ssteve.reinhardt@amd.com 1274903SN/Avoid 1284903SN/AMSHR::TargetList::replaceUpgrades() 1294903SN/A{ 1304903SN/A if (!hasUpgrade) 1314903SN/A return; 1324903SN/A 13310766Sandreas.hansson@arm.com for (auto& t : *this) { 13410766Sandreas.hansson@arm.com replaceUpgrade(t.pkt); 1354903SN/A } 1364903SN/A 1374903SN/A hasUpgrade = false; 1384903SN/A} 1394903SN/A 1404903SN/A 1412810SN/Avoid 1424908SN/AMSHR::TargetList::clearDownstreamPending() 1434908SN/A{ 14410766Sandreas.hansson@arm.com for (auto& t : *this) { 14510766Sandreas.hansson@arm.com if (t.markedPending) { 1469543Ssascha.bischoff@arm.com // Iterate over the SenderState stack and see if we find 1479543Ssascha.bischoff@arm.com // an MSHR entry. If we find one, clear the 1489543Ssascha.bischoff@arm.com // downstreamPending flag by calling 1499543Ssascha.bischoff@arm.com // clearDownstreamPending(). This recursively clears the 1509543Ssascha.bischoff@arm.com // downstreamPending flag in all caches this packet has 1519543Ssascha.bischoff@arm.com // passed through. 15210766Sandreas.hansson@arm.com MSHR *mshr = t.pkt->findNextSenderState<MSHR>(); 1535318SN/A if (mshr != NULL) { 1545318SN/A mshr->clearDownstreamPending(); 1555318SN/A } 1564908SN/A } 1574908SN/A } 1584908SN/A} 1594908SN/A 1604908SN/A 1614920SN/Abool 1624920SN/AMSHR::TargetList::checkFunctional(PacketPtr pkt) 1634920SN/A{ 16410766Sandreas.hansson@arm.com for (auto& t : *this) { 16510766Sandreas.hansson@arm.com if (pkt->checkFunctional(t.pkt)) { 1664920SN/A return true; 1674920SN/A } 1684920SN/A } 1694920SN/A 1704920SN/A return false; 1714920SN/A} 1724920SN/A 1734920SN/A 1744908SN/Avoid 17510766Sandreas.hansson@arm.comMSHR::TargetList::print(std::ostream &os, int verbosity, 17610766Sandreas.hansson@arm.com const std::string &prefix) const 1775314SN/A{ 17810766Sandreas.hansson@arm.com for (auto& t : *this) { 1795875Ssteve.reinhardt@amd.com const char *s; 18010766Sandreas.hansson@arm.com switch (t.source) { 1818988SAli.Saidi@ARM.com case Target::FromCPU: 1828988SAli.Saidi@ARM.com s = "FromCPU"; 1838988SAli.Saidi@ARM.com break; 1848988SAli.Saidi@ARM.com case Target::FromSnoop: 1858988SAli.Saidi@ARM.com s = "FromSnoop"; 1868988SAli.Saidi@ARM.com break; 1878988SAli.Saidi@ARM.com case Target::FromPrefetcher: 1888988SAli.Saidi@ARM.com s = "FromPrefetcher"; 1898988SAli.Saidi@ARM.com break; 1908988SAli.Saidi@ARM.com default: 1918988SAli.Saidi@ARM.com s = ""; 1928988SAli.Saidi@ARM.com break; 1935875Ssteve.reinhardt@amd.com } 1945875Ssteve.reinhardt@amd.com ccprintf(os, "%s%s: ", prefix, s); 19510766Sandreas.hansson@arm.com t.pkt->print(os, verbosity, ""); 1965314SN/A } 1975314SN/A} 1985314SN/A 1995314SN/A 2005314SN/Avoid 20110764Sandreas.hansson@arm.comMSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target, 20210764Sandreas.hansson@arm.com Tick when_ready, Counter _order) 2032810SN/A{ 20410764Sandreas.hansson@arm.com blkAddr = blk_addr; 20510764Sandreas.hansson@arm.com blkSize = blk_size; 20610028SGiacomo.Gabrielli@arm.com isSecure = target->isSecure(); 20710764Sandreas.hansson@arm.com readyTime = when_ready; 2084666SN/A order = _order; 2094626SN/A assert(target); 2105730SSteve.Reinhardt@amd.com isForward = false; 2114626SN/A _isUncacheable = target->req->isUncacheable(); 2124626SN/A inService = false; 2134908SN/A downstreamPending = false; 2144626SN/A threadNum = 0; 2159725Sandreas.hansson@arm.com assert(targets.isReset()); 2164626SN/A // Don't know of a case where we would allocate a new MSHR for a 2175875Ssteve.reinhardt@amd.com // snoop (mem-side request), so set source according to request here 2185875Ssteve.reinhardt@amd.com Target::Source source = (target->cmd == MemCmd::HardPFReq) ? 2195875Ssteve.reinhardt@amd.com Target::FromPrefetcher : Target::FromCPU; 22010764Sandreas.hansson@arm.com targets.add(target, when_ready, _order, source, true); 2219725Sandreas.hansson@arm.com assert(deferredTargets.isReset()); 2224668SN/A data = NULL; 2232810SN/A} 2242810SN/A 2254908SN/A 2265318SN/Avoid 2275318SN/AMSHR::clearDownstreamPending() 2285318SN/A{ 2295318SN/A assert(downstreamPending); 2305318SN/A downstreamPending = false; 2315318SN/A // recursively clear flag on any MSHRs we will be forwarding 2325318SN/A // responses to 2339725Sandreas.hansson@arm.com targets.clearDownstreamPending(); 2345318SN/A} 2355318SN/A 2364908SN/Abool 23710679Sandreas.hansson@arm.comMSHR::markInService(bool pending_dirty_resp) 2384908SN/A{ 2394908SN/A assert(!inService); 2405730SSteve.Reinhardt@amd.com if (isForwardNoResponse()) { 2414908SN/A // we just forwarded the request packet & don't expect a 2424908SN/A // response, so get rid of it 2434908SN/A assert(getNumTargets() == 1); 2444908SN/A popTarget(); 2454908SN/A return true; 2464908SN/A } 24710424Sandreas.hansson@arm.com 2484908SN/A inService = true; 24910679Sandreas.hansson@arm.com pendingDirty = targets.needsExclusive || pending_dirty_resp; 2507667Ssteve.reinhardt@amd.com postInvalidate = postDowngrade = false; 2517667Ssteve.reinhardt@amd.com 2524908SN/A if (!downstreamPending) { 2534908SN/A // let upstream caches know that the request has made it to a 2544908SN/A // level where it's going to get a response 2559725Sandreas.hansson@arm.com targets.clearDownstreamPending(); 2564908SN/A } 2574908SN/A return false; 2584908SN/A} 2594908SN/A 2604908SN/A 2612810SN/Avoid 2622810SN/AMSHR::deallocate() 2632810SN/A{ 2649725Sandreas.hansson@arm.com assert(targets.empty()); 2659725Sandreas.hansson@arm.com targets.resetFlags(); 2669725Sandreas.hansson@arm.com assert(deferredTargets.isReset()); 2672810SN/A inService = false; 2682810SN/A} 2692810SN/A 2702810SN/A/* 2712810SN/A * Adds a target to an MSHR 2722810SN/A */ 2732810SN/Avoid 2744903SN/AMSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order) 2752810SN/A{ 27610768Sandreas.hansson@arm.com // assume we'd never issue a prefetch when we've got an 27710768Sandreas.hansson@arm.com // outstanding miss 27810768Sandreas.hansson@arm.com assert(pkt->cmd != MemCmd::HardPFReq); 27910768Sandreas.hansson@arm.com 28010768Sandreas.hansson@arm.com // uncacheable accesses always allocate a new MSHR, and cacheable 28110768Sandreas.hansson@arm.com // accesses ignore any uncacheable MSHRs, thus we should never 28210768Sandreas.hansson@arm.com // have targets addded if originally allocated uncacheable 28310768Sandreas.hansson@arm.com assert(!_isUncacheable); 28410768Sandreas.hansson@arm.com 2854903SN/A // if there's a request already in service for this MSHR, we will 2864903SN/A // have to defer the new target until after the response if any of 2874903SN/A // the following are true: 2884903SN/A // - there are other targets already deferred 2894903SN/A // - there's a pending invalidate to be applied after the response 2904903SN/A // comes back (but before this target is processed) 2917667Ssteve.reinhardt@amd.com // - this target requires an exclusive block and either we're not 2927667Ssteve.reinhardt@amd.com // getting an exclusive block back or we have already snooped 2937667Ssteve.reinhardt@amd.com // another read request that will downgrade our exclusive block 2947667Ssteve.reinhardt@amd.com // to shared 2954903SN/A if (inService && 2969725Sandreas.hansson@arm.com (!deferredTargets.empty() || hasPostInvalidate() || 2977667Ssteve.reinhardt@amd.com (pkt->needsExclusive() && 2987667Ssteve.reinhardt@amd.com (!isPendingDirty() || hasPostDowngrade() || isForward)))) { 2994903SN/A // need to put on deferred list 3007667Ssteve.reinhardt@amd.com if (hasPostInvalidate()) 3017667Ssteve.reinhardt@amd.com replaceUpgrade(pkt); 3029725Sandreas.hansson@arm.com deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true); 3034665SN/A } else { 3045318SN/A // No request outstanding, or still OK to append to 3055318SN/A // outstanding request: append to regular target list. Only 3065318SN/A // mark pending if current request hasn't been issued yet 3075318SN/A // (isn't in service). 3089725Sandreas.hansson@arm.com targets.add(pkt, whenReady, _order, Target::FromCPU, !inService); 3092810SN/A } 3104665SN/A} 3114665SN/A 3124902SN/Abool 3134902SN/AMSHR::handleSnoop(PacketPtr pkt, Counter _order) 3144665SN/A{ 31510725Sandreas.hansson@arm.com DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__, 3169663Suri.wiener@arm.com pkt->cmdString(), pkt->getAddr(), pkt->getSize()); 3174910SN/A if (!inService || (pkt->isExpressSnoop() && downstreamPending)) { 3184903SN/A // Request has not been issued yet, or it's been issued 3194903SN/A // locally but is buffered unissued at some downstream cache 3204903SN/A // which is forwarding us this snoop. Either way, the packet 3214903SN/A // we're snooping logically precedes this MSHR's request, so 3224903SN/A // the snoop has no impact on the MSHR, but must be processed 3234903SN/A // in the standard way by the cache. The only exception is 3244903SN/A // that if we're an L2+ cache buffering an UpgradeReq from a 3254903SN/A // higher-level cache, and the snoop is invalidating, then our 3264903SN/A // buffered upgrades must be converted to read exclusives, 3274903SN/A // since the upper-level cache no longer has a valid copy. 3284903SN/A // That is, even though the upper-level cache got out on its 3294903SN/A // local bus first, some other invalidating transaction 3304903SN/A // reached the global bus before the upgrade did. 3314903SN/A if (pkt->needsExclusive()) { 3329725Sandreas.hansson@arm.com targets.replaceUpgrades(); 3339725Sandreas.hansson@arm.com deferredTargets.replaceUpgrades(); 3344903SN/A } 3354903SN/A 3364902SN/A return false; 3374902SN/A } 3384665SN/A 3394903SN/A // From here on down, the request issued by this MSHR logically 3404903SN/A // precedes the request we're snooping. 3414903SN/A if (pkt->needsExclusive()) { 3424903SN/A // snooped request still precedes the re-request we'll have to 3434903SN/A // issue for deferred targets, if any... 3449725Sandreas.hansson@arm.com deferredTargets.replaceUpgrades(); 3454903SN/A } 3464903SN/A 3477667Ssteve.reinhardt@amd.com if (hasPostInvalidate()) { 3484665SN/A // a prior snoop has already appended an invalidation, so 3494903SN/A // logically we don't have the block anymore; no need for 3504903SN/A // further snooping. 3514902SN/A return true; 3524665SN/A } 3534665SN/A 3547667Ssteve.reinhardt@amd.com if (isPendingDirty() || pkt->isInvalidate()) { 3557667Ssteve.reinhardt@amd.com // We need to save and replay the packet in two cases: 3567667Ssteve.reinhardt@amd.com // 1. We're awaiting an exclusive copy, so ownership is pending, 3577667Ssteve.reinhardt@amd.com // and we need to respond after we receive data. 3587667Ssteve.reinhardt@amd.com // 2. It's an invalidation (e.g., UpgradeReq), and we need 3597667Ssteve.reinhardt@amd.com // to forward the snoop up the hierarchy after the current 3607667Ssteve.reinhardt@amd.com // transaction completes. 3617667Ssteve.reinhardt@amd.com 3628931Sandreas.hansson@arm.com // Actual target device (typ. a memory) will delete the 3637667Ssteve.reinhardt@amd.com // packet on reception, so we need to save a copy here. 36410571Sandreas.hansson@arm.com 36510571Sandreas.hansson@arm.com // Clear flags and also allocate new data as the original 36610571Sandreas.hansson@arm.com // packet data storage may have been deleted by the time we 36710571Sandreas.hansson@arm.com // get to send this packet. 36810826Sstephan.diestelhorst@ARM.com PacketPtr cp_pkt = nullptr; 3694670SN/A 37010582SCurtis.Dunham@arm.com if (isPendingDirty()) { 37110826Sstephan.diestelhorst@ARM.com // Case 1: The new packet will need to get the response from the 37210826Sstephan.diestelhorst@ARM.com // MSHR already queued up here 37310826Sstephan.diestelhorst@ARM.com cp_pkt = new Packet(pkt, true, true); 3744670SN/A pkt->assertMemInhibit(); 37510821Sandreas.hansson@arm.com // in the case of an uncacheable request there is no need 37610821Sandreas.hansson@arm.com // to set the exclusive flag, but since the recipient does 37710821Sandreas.hansson@arm.com // not care there is no harm in doing so 3784916SN/A pkt->setSupplyExclusive(); 37910826Sstephan.diestelhorst@ARM.com } else { 38010826Sstephan.diestelhorst@ARM.com // Case 2: We only need to buffer the packet for information 38110826Sstephan.diestelhorst@ARM.com // purposes; the original request can proceed without waiting 38210826Sstephan.diestelhorst@ARM.com // => Create a copy of the request, as that may get deallocated as 38310826Sstephan.diestelhorst@ARM.com // well 38410826Sstephan.diestelhorst@ARM.com cp_pkt = new Packet(new Request(*pkt->req), pkt->cmd); 38510826Sstephan.diestelhorst@ARM.com DPRINTF(Cache, "Copying packet %p -> %p and request %p -> %p\n", 38610826Sstephan.diestelhorst@ARM.com pkt, cp_pkt, pkt->req, cp_pkt->req); 3874670SN/A } 38810826Sstephan.diestelhorst@ARM.com targets.add(cp_pkt, curTick(), _order, Target::FromSnoop, 38910826Sstephan.diestelhorst@ARM.com downstreamPending && targets.needsExclusive); 3904670SN/A 3914670SN/A if (pkt->needsExclusive()) { 3924670SN/A // This transaction will take away our pending copy 3937667Ssteve.reinhardt@amd.com postInvalidate = true; 3944670SN/A } 3957667Ssteve.reinhardt@amd.com } 3967667Ssteve.reinhardt@amd.com 39710821Sandreas.hansson@arm.com if (!pkt->needsExclusive() && !pkt->req->isUncacheable()) { 3987667Ssteve.reinhardt@amd.com // This transaction will get a read-shared copy, downgrading 3997667Ssteve.reinhardt@amd.com // our copy if we had an exclusive one 4007667Ssteve.reinhardt@amd.com postDowngrade = true; 4014670SN/A pkt->assertShared(); 4024667SN/A } 4034902SN/A 4044902SN/A return true; 4054665SN/A} 4064665SN/A 4074665SN/A 4084665SN/Abool 4094665SN/AMSHR::promoteDeferredTargets() 4104665SN/A{ 4119725Sandreas.hansson@arm.com assert(targets.empty()); 4129725Sandreas.hansson@arm.com if (deferredTargets.empty()) { 4134665SN/A return false; 4144665SN/A } 4154665SN/A 4164903SN/A // swap targets & deferredTargets lists 4179725Sandreas.hansson@arm.com std::swap(targets, deferredTargets); 4184903SN/A 4194903SN/A // clear deferredTargets flags 4209725Sandreas.hansson@arm.com deferredTargets.resetFlags(); 4214903SN/A 4229725Sandreas.hansson@arm.com order = targets.front().order; 4239725Sandreas.hansson@arm.com readyTime = std::max(curTick(), targets.front().readyTime); 4244665SN/A 4254665SN/A return true; 4262810SN/A} 4272810SN/A 4282810SN/A 4292810SN/Avoid 43010766Sandreas.hansson@arm.comMSHR::handleFill(PacketPtr pkt, CacheBlk *blk) 4314668SN/A{ 4327667Ssteve.reinhardt@amd.com if (!pkt->sharedAsserted() 4337667Ssteve.reinhardt@amd.com && !(hasPostInvalidate() || hasPostDowngrade()) 4349725Sandreas.hansson@arm.com && deferredTargets.needsExclusive) { 4355270SN/A // We got an exclusive response, but we have deferred targets 4365270SN/A // which are waiting to request an exclusive copy (not because 4375270SN/A // of a pending invalidate). This can happen if the original 4385270SN/A // request was for a read-only (non-exclusive) block, but we 4395270SN/A // got an exclusive copy anyway because of the E part of the 4405270SN/A // MOESI/MESI protocol. Since we got the exclusive copy 4415270SN/A // there's no need to defer the targets, so move them up to 4425270SN/A // the regular target list. 4439725Sandreas.hansson@arm.com assert(!targets.needsExclusive); 4449725Sandreas.hansson@arm.com targets.needsExclusive = true; 4455318SN/A // if any of the deferred targets were upper-level cache 4465318SN/A // requests marked downstreamPending, need to clear that 4475318SN/A assert(!downstreamPending); // not pending here anymore 4489725Sandreas.hansson@arm.com deferredTargets.clearDownstreamPending(); 4495270SN/A // this clears out deferredTargets too 4509725Sandreas.hansson@arm.com targets.splice(targets.end(), deferredTargets); 4519725Sandreas.hansson@arm.com deferredTargets.resetFlags(); 4525270SN/A } 4534668SN/A} 4544668SN/A 4554668SN/A 4565314SN/Abool 4575314SN/AMSHR::checkFunctional(PacketPtr pkt) 4585314SN/A{ 4595314SN/A // For printing, we treat the MSHR as a whole as single entity. 4605314SN/A // For other requests, we iterate over the individual targets 4615314SN/A // since that's where the actual data lies. 4625314SN/A if (pkt->isPrint()) { 46310764Sandreas.hansson@arm.com pkt->checkFunctional(this, blkAddr, isSecure, blkSize, NULL); 4645314SN/A return false; 4655314SN/A } else { 4669725Sandreas.hansson@arm.com return (targets.checkFunctional(pkt) || 4679725Sandreas.hansson@arm.com deferredTargets.checkFunctional(pkt)); 4685314SN/A } 4695314SN/A} 4705314SN/A 4715314SN/A 4724668SN/Avoid 4735314SN/AMSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const 4742810SN/A{ 47510725Sandreas.hansson@arm.com ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n", 47610764Sandreas.hansson@arm.com prefix, blkAddr, blkAddr + blkSize - 1, 47710028SGiacomo.Gabrielli@arm.com isSecure ? "s" : "ns", 4785730SSteve.Reinhardt@amd.com isForward ? "Forward" : "", 4795730SSteve.Reinhardt@amd.com isForwardNoResponse() ? "ForwNoResp" : "", 4805314SN/A needsExclusive() ? "Excl" : "", 4815314SN/A _isUncacheable ? "Unc" : "", 4825314SN/A inService ? "InSvc" : "", 4835314SN/A downstreamPending ? "DwnPend" : "", 4847667Ssteve.reinhardt@amd.com hasPostInvalidate() ? "PostInv" : "", 4857667Ssteve.reinhardt@amd.com hasPostDowngrade() ? "PostDowngr" : ""); 4862810SN/A 4875314SN/A ccprintf(os, "%s Targets:\n", prefix); 4889725Sandreas.hansson@arm.com targets.print(os, verbosity, prefix + " "); 4899725Sandreas.hansson@arm.com if (!deferredTargets.empty()) { 4905314SN/A ccprintf(os, "%s Deferred Targets:\n", prefix); 4919725Sandreas.hansson@arm.com deferredTargets.print(os, verbosity, prefix + " "); 4922810SN/A } 4932810SN/A} 4942810SN/A 4959663Suri.wiener@arm.comstd::string 4969663Suri.wiener@arm.comMSHR::print() const 4979663Suri.wiener@arm.com{ 4989663Suri.wiener@arm.com ostringstream str; 4999663Suri.wiener@arm.com print(str); 5009663Suri.wiener@arm.com return str.str(); 5019663Suri.wiener@arm.com} 502