base.cc revision 13564:9bbd53a77887
13536SN/A/*
211274Sshingarov@labware.com * Copyright (c) 2012-2013, 2018 ARM Limited
310595Sgabeblack@google.com * All rights reserved.
412109SRekai.GonzalezAlberquilla@arm.com *
57752SWilliam.Wang@arm.com * The license below extends only to copyright in the software and shall
67752SWilliam.Wang@arm.com * not be construed as granting a license to any other intellectual
77752SWilliam.Wang@arm.com * property including but not limited to intellectual property relating
87752SWilliam.Wang@arm.com * to a hardware implementation of the functionality of the software
97752SWilliam.Wang@arm.com * licensed hereunder.  You may use the software subject to the license
107752SWilliam.Wang@arm.com * terms below provided that you ensure that this notice is replicated
117752SWilliam.Wang@arm.com * unmodified and in its entirety in all distributions of the software,
127752SWilliam.Wang@arm.com * modified or unmodified, in source code or in binary form.
137752SWilliam.Wang@arm.com *
147752SWilliam.Wang@arm.com * Copyright (c) 2003-2005 The Regents of The University of Michigan
157752SWilliam.Wang@arm.com * All rights reserved.
163536SN/A *
173536SN/A * Redistribution and use in source and binary forms, with or without
183536SN/A * modification, are permitted provided that the following conditions are
193536SN/A * met: redistributions of source code must retain the above copyright
203536SN/A * notice, this list of conditions and the following disclaimer;
213536SN/A * redistributions in binary form must reproduce the above copyright
223536SN/A * notice, this list of conditions and the following disclaimer in the
233536SN/A * documentation and/or other materials provided with the distribution;
243536SN/A * neither the name of the copyright holders nor the names of its
253536SN/A * contributors may be used to endorse or promote products derived from
263536SN/A * this software without specific prior written permission.
273536SN/A *
283536SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
293536SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
303536SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
313536SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
323536SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
333536SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
343536SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
353536SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
363536SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
373536SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
383536SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
393536SN/A *
403536SN/A * Authors: Erik Hallnor
413536SN/A *          Nikos Nikoleris
423536SN/A */
437752SWilliam.Wang@arm.com
4411274Sshingarov@labware.com/**
453536SN/A * @file
463536SN/A * Definition of BaseCache functions.
473536SN/A */
488332Snate@binkert.org
498332Snate@binkert.org#include "mem/cache/base.hh"
503536SN/A
513536SN/A#include "base/compiler.hh"
523536SN/A#include "base/logging.hh"
533536SN/A#include "debug/Cache.hh"
543536SN/A#include "debug/CachePort.hh"
553536SN/A#include "debug/CacheRepl.hh"
563536SN/A#include "debug/CacheVerbose.hh"
575543SN/A#include "mem/cache/mshr.hh"
585543SN/A#include "mem/cache/prefetch/base.hh"
593536SN/A#include "mem/cache/queue_entry.hh"
603536SN/A#include "params/BaseCache.hh"
613536SN/A#include "params/WriteAllocator.hh"
623536SN/A#include "sim/core.hh"
633536SN/A
643536SN/Aclass BaseMasterPort;
653536SN/Aclass BaseSlavePort;
663536SN/A
673536SN/Ausing namespace std;
683536SN/A
693536SN/ABaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
705543SN/A                                          BaseCache *_cache,
715543SN/A                                          const std::string &_label)
723536SN/A    : QueuedSlavePort(_name, _cache, queue),
733536SN/A      queue(*_cache, *this, true, _label),
743536SN/A      blocked(false), mustSendRetry(false),
753536SN/A      sendRetryEvent([this]{ processSendRetry(); }, _name)
763536SN/A{
773536SN/A}
783536SN/A
793536SN/ABaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
803536SN/A    : MemObject(p),
813536SN/A      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
823536SN/A      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
833536SN/A      mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
843536SN/A      writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
853536SN/A      tags(p->tags),
863536SN/A      prefetcher(p->prefetcher),
873536SN/A      writeAllocator(p->write_allocator),
885543SN/A      writebackClean(p->writeback_clean),
893536SN/A      tempBlockWriteback(nullptr),
903536SN/A      writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
913536SN/A                                    name(), false,
923536SN/A                                    EventBase::Delayed_Writeback_Pri),
933536SN/A      blkSize(blk_size),
943536SN/A      lookupLatency(p->tag_latency),
953536SN/A      dataLatency(p->data_latency),
963536SN/A      forwardLatency(p->tag_latency),
973536SN/A      fillLatency(p->data_latency),
983536SN/A      responseLatency(p->response_latency),
993536SN/A      sequentialAccess(p->sequential_access),
1003536SN/A      numTarget(p->tgts_per_mshr),
1013536SN/A      forwardSnoops(true),
1023536SN/A      clusivity(p->clusivity),
1033536SN/A      isReadOnly(p->is_read_only),
1043536SN/A      blocked(0),
1053536SN/A      order(0),
1063536SN/A      noTargetMSHR(nullptr),
1073536SN/A      missCount(p->max_miss_count),
1085543SN/A      addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
1095543SN/A      system(p->system)
1103536SN/A{
1113536SN/A    // the MSHR queue has no reserve entries as we check the MSHR
1123536SN/A    // queue on every single allocation, whereas the write queue has
1133536SN/A    // as many reserve entries as we have MSHRs, since every MSHR may
1143536SN/A    // eventually require a writeback, and we do not check the write
1153536SN/A    // buffer before committing to an MSHR
1163536SN/A
1173536SN/A    // forward snoops is overridden in init() once we can query
1183536SN/A    // whether the connected master is actually snooping or not
1193536SN/A
1203536SN/A    tempBlock = new TempCacheBlk(blkSize);
1213536SN/A
1223536SN/A    tags->tagsInit();
1233536SN/A    if (prefetcher)
1243536SN/A        prefetcher->setCache(this);
1253536SN/A}
1263536SN/A
1273536SN/ABaseCache::~BaseCache()
1283536SN/A{
1293536SN/A    delete tempBlock;
1303536SN/A}
1313536SN/A
1323536SN/Avoid
1333536SN/ABaseCache::CacheSlavePort::setBlocked()
1343536SN/A{
13511793Sbrandon.potter@amd.com    assert(!blocked);
13611793Sbrandon.potter@amd.com    DPRINTF(CachePort, "Port is blocking new requests\n");
1373536SN/A    blocked = true;
1385569SN/A    // if we already scheduled a retry in this cycle, but it has not yet
1393536SN/A    // happened, cancel it
1403536SN/A    if (sendRetryEvent.scheduled()) {
1413536SN/A        owner.deschedule(sendRetryEvent);
1429020Sgblack@eecs.umich.edu        DPRINTF(CachePort, "Port descheduled retry\n");
1438229Snate@binkert.org        mustSendRetry = true;
1448229Snate@binkert.org    }
14510037SARM gem5 Developers}
1467752SWilliam.Wang@arm.com
1477752SWilliam.Wang@arm.comvoid
14810707SAndreas.Sandberg@ARM.comBaseCache::CacheSlavePort::clearBlocked()
1493536SN/A{
1503536SN/A    assert(blocked);
1513536SN/A    DPRINTF(CachePort, "Port is accepting new requests\n");
1523536SN/A    blocked = false;
1538229Snate@binkert.org    if (mustSendRetry) {
1543536SN/A        // @TODO: need to find a better time (next cycle?)
1557752SWilliam.Wang@arm.com        owner.schedule(sendRetryEvent, curTick() + 1);
1568232Snate@binkert.org    }
1578232Snate@binkert.org}
1588229Snate@binkert.org
1593536SN/Avoid
1603536SN/ABaseCache::CacheSlavePort::processSendRetry()
1618782Sgblack@eecs.umich.edu{
1623536SN/A    DPRINTF(CachePort, "Port is sending retry\n");
1633536SN/A
1643536SN/A    // reset the flag and call retry
1657752SWilliam.Wang@arm.com    mustSendRetry = false;
1663536SN/A    sendRetryReq();
16712449Sgabeblack@google.com}
16812449Sgabeblack@google.com
1693536SN/AAddr
1703536SN/ABaseCache::regenerateBlkAddr(CacheBlk* blk)
1713536SN/A{
1725569SN/A    if (blk != tempBlock) {
1735569SN/A        return tags->regenerateBlkAddr(blk);
1745569SN/A    } else {
1753536SN/A        return tempBlock->getAddr();
1763536SN/A    }
1773536SN/A}
1788782Sgblack@eecs.umich.edu
17910707SAndreas.Sandberg@ARM.comvoid
18012449Sgabeblack@google.comBaseCache::init()
18110707SAndreas.Sandberg@ARM.com{
18210707SAndreas.Sandberg@ARM.com    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
1838782Sgblack@eecs.umich.edu        fatal("Cache ports on %s are not connected\n", name());
18410707SAndreas.Sandberg@ARM.com    cpuSidePort.sendRangeChange();
1858782Sgblack@eecs.umich.edu    forwardSnoops = cpuSidePort.isSnooping();
1868782Sgblack@eecs.umich.edu}
1878782Sgblack@eecs.umich.edu
1888782Sgblack@eecs.umich.eduBaseMasterPort &
18912455Sgabeblack@google.comBaseCache::getMasterPort(const std::string &if_name, PortID idx)
19012455Sgabeblack@google.com{
19112455Sgabeblack@google.com    if (if_name == "mem_side") {
1928782Sgblack@eecs.umich.edu        return memSidePort;
1933536SN/A    }  else {
1943536SN/A        return MemObject::getMasterPort(if_name, idx);
1953536SN/A    }
19611274Sshingarov@labware.com}
1973536SN/A
19811274Sshingarov@labware.comBaseSlavePort &
1997752SWilliam.Wang@arm.comBaseCache::getSlavePort(const std::string &if_name, PortID idx)
20011274Sshingarov@labware.com{
20111274Sshingarov@labware.com    if (if_name == "cpu_side") {
20211274Sshingarov@labware.com        return cpuSidePort;
20311274Sshingarov@labware.com    } else {
20411274Sshingarov@labware.com        return MemObject::getSlavePort(if_name, idx);
2053536SN/A    }
20611274Sshingarov@labware.com}
20711274Sshingarov@labware.com
20811274Sshingarov@labware.combool
20911274Sshingarov@labware.comBaseCache::inRange(Addr addr) const
21011274Sshingarov@labware.com{
2113536SN/A    for (const auto& r : addrRanges) {
2123536SN/A        if (r.contains(addr)) {
2133536SN/A            return true;
2143536SN/A       }
21511274Sshingarov@labware.com    }
2163536SN/A    return false;
21711274Sshingarov@labware.com}
2187752SWilliam.Wang@arm.com
21911274Sshingarov@labware.comvoid
22011274Sshingarov@labware.comBaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
22113469Sciro.santilli@arm.com{
22213469Sciro.santilli@arm.com    if (pkt->needsResponse()) {
22313469Sciro.santilli@arm.com        pkt->makeTimingResponse();
22411274Sshingarov@labware.com        // @todo: Make someone pay for this
22511274Sshingarov@labware.com        pkt->headerDelay = pkt->payloadDelay = 0;
22611274Sshingarov@labware.com
22711274Sshingarov@labware.com        // In this case we are considering request_time that takes
22811274Sshingarov@labware.com        // into account the delay of the xbar, if any, and just
2297752SWilliam.Wang@arm.com        // lat, neglecting responseLatency, modelling hit latency
23011274Sshingarov@labware.com        // just as the value of lat overriden by access(), which calls
23111274Sshingarov@labware.com        // the calculateAccessLatency() function.
23211274Sshingarov@labware.com        cpuSidePort.schedTimingResp(pkt, request_time);
23311274Sshingarov@labware.com    } else {
23411274Sshingarov@labware.com        DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
2353536SN/A                pkt->print());
2363536SN/A
2373536SN/A        // queue the packet for deletion, as the sending cache is
23811274Sshingarov@labware.com        // still relying on it; if the block is found in access(),
23911274Sshingarov@labware.com        // CleanEvict and Writeback messages will be deleted
2403536SN/A        // here as well
24111274Sshingarov@labware.com        pendingDelete.reset(pkt);
24211274Sshingarov@labware.com    }
24311274Sshingarov@labware.com}
24411274Sshingarov@labware.com
24511274Sshingarov@labware.comvoid
24611274Sshingarov@labware.comBaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
24711274Sshingarov@labware.com                               Tick forward_time, Tick request_time)
24811274Sshingarov@labware.com{
24911274Sshingarov@labware.com    if (writeAllocator &&
25011274Sshingarov@labware.com        pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
25111274Sshingarov@labware.com        writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
25211274Sshingarov@labware.com                                   pkt->getBlockAddr(blkSize));
25311274Sshingarov@labware.com    }
25411274Sshingarov@labware.com
25511274Sshingarov@labware.com    if (mshr) {
25611274Sshingarov@labware.com        /// MSHR hit
25711274Sshingarov@labware.com        /// @note writebacks will be checked in getNextMSHR()
25811274Sshingarov@labware.com        /// for any conflicting requests to the same block
25911274Sshingarov@labware.com
26011274Sshingarov@labware.com        //@todo remove hw_pf here
26111274Sshingarov@labware.com
26211274Sshingarov@labware.com        // Coalesce unless it was a software prefetch (see above).
26311274Sshingarov@labware.com        if (pkt) {
26411274Sshingarov@labware.com            assert(!pkt->isWriteback());
2653536SN/A            // CleanEvicts corresponding to blocks which have
2663536SN/A            // outstanding requests in MSHRs are simply sunk here
26711274Sshingarov@labware.com            if (pkt->cmd == MemCmd::CleanEvict) {
26811274Sshingarov@labware.com                pendingDelete.reset(pkt);
26911274Sshingarov@labware.com            } else if (pkt->cmd == MemCmd::WriteClean) {
27011274Sshingarov@labware.com                // A WriteClean should never coalesce with any
27111274Sshingarov@labware.com                // outstanding cache maintenance requests.
27211274Sshingarov@labware.com
27311274Sshingarov@labware.com                // We use forward_time here because there is an
27411274Sshingarov@labware.com                // uncached memory write, forwarded to WriteBuffer.
27511274Sshingarov@labware.com                allocateWriteBuffer(pkt, forward_time);
27611274Sshingarov@labware.com            } else {
27711274Sshingarov@labware.com                DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
27811274Sshingarov@labware.com                        pkt->print());
27911274Sshingarov@labware.com
28011274Sshingarov@labware.com                assert(pkt->req->masterId() < system->maxMasters());
28111274Sshingarov@labware.com                mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
28211274Sshingarov@labware.com
28311274Sshingarov@labware.com                // We use forward_time here because it is the same
28411274Sshingarov@labware.com                // considering new targets. We have multiple
28511274Sshingarov@labware.com                // requests for the same address here. It
28611274Sshingarov@labware.com                // specifies the latency to allocate an internal
28713469Sciro.santilli@arm.com                // buffer and to schedule an event to the queued
28813469Sciro.santilli@arm.com                // port and also takes into account the additional
28913469Sciro.santilli@arm.com                // delay of the xbar.
29011274Sshingarov@labware.com                mshr->allocateTarget(pkt, forward_time, order++,
29111274Sshingarov@labware.com                                     allocOnFill(pkt->cmd));
29211274Sshingarov@labware.com                if (mshr->getNumTargets() == numTarget) {
29311274Sshingarov@labware.com                    noTargetMSHR = mshr;
29411274Sshingarov@labware.com                    setBlocked(Blocked_NoTargets);
29511274Sshingarov@labware.com                    // need to be careful with this... if this mshr isn't
29611274Sshingarov@labware.com                    // ready yet (i.e. time > curTick()), we don't want to
29712449Sgabeblack@google.com                    // move it ahead of mshrs that are ready
29811274Sshingarov@labware.com                    // mshrQueue.moveToFront(mshr);
29911274Sshingarov@labware.com                }
30012449Sgabeblack@google.com            }
30112221Sshingarov@gmail.com        }
30212221Sshingarov@gmail.com    } else {
30312031Sgabeblack@google.com        // no MSHR
30411274Sshingarov@labware.com        assert(pkt->req->masterId() < system->maxMasters());
305        mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
306
307        if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
308            // We use forward_time here because there is an
309            // writeback or writeclean, forwarded to WriteBuffer.
310            allocateWriteBuffer(pkt, forward_time);
311        } else {
312            if (blk && blk->isValid()) {
313                // If we have a write miss to a valid block, we
314                // need to mark the block non-readable.  Otherwise
315                // if we allow reads while there's an outstanding
316                // write miss, the read could return stale data
317                // out of the cache block... a more aggressive
318                // system could detect the overlap (if any) and
319                // forward data out of the MSHRs, but we don't do
320                // that yet.  Note that we do need to leave the
321                // block valid so that it stays in the cache, in
322                // case we get an upgrade response (and hence no
323                // new data) when the write miss completes.
324                // As long as CPUs do proper store/load forwarding
325                // internally, and have a sufficiently weak memory
326                // model, this is probably unnecessary, but at some
327                // point it must have seemed like we needed it...
328                assert((pkt->needsWritable() && !blk->isWritable()) ||
329                       pkt->req->isCacheMaintenance());
330                blk->status &= ~BlkReadable;
331            }
332            // Here we are using forward_time, modelling the latency of
333            // a miss (outbound) just as forwardLatency, neglecting the
334            // lookupLatency component.
335            allocateMissBuffer(pkt, forward_time);
336        }
337    }
338}
339
340void
341BaseCache::recvTimingReq(PacketPtr pkt)
342{
343    // anything that is merely forwarded pays for the forward latency and
344    // the delay provided by the crossbar
345    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
346
347    Cycles lat;
348    CacheBlk *blk = nullptr;
349    bool satisfied = false;
350    {
351        PacketList writebacks;
352        // Note that lat is passed by reference here. The function
353        // access() will set the lat value.
354        satisfied = access(pkt, blk, lat, writebacks);
355
356        // copy writebacks to write buffer here to ensure they logically
357        // precede anything happening below
358        doWritebacks(writebacks, forward_time);
359    }
360
361    // Here we charge the headerDelay that takes into account the latencies
362    // of the bus, if the packet comes from it.
363    // The latency charged is just the value set by the access() function.
364    // In case of a hit we are neglecting response latency.
365    // In case of a miss we are neglecting forward latency.
366    Tick request_time = clockEdge(lat) + pkt->headerDelay;
367    // Here we reset the timing of the packet.
368    pkt->headerDelay = pkt->payloadDelay = 0;
369
370    if (satisfied) {
371        // notify before anything else as later handleTimingReqHit might turn
372        // the packet in a response
373        ppHit->notify(pkt);
374
375        if (prefetcher && blk && blk->wasPrefetched()) {
376            blk->status &= ~BlkHWPrefetched;
377        }
378
379        handleTimingReqHit(pkt, blk, request_time);
380    } else {
381        handleTimingReqMiss(pkt, blk, forward_time, request_time);
382
383        ppMiss->notify(pkt);
384    }
385
386    if (prefetcher) {
387        // track time of availability of next prefetch, if any
388        Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
389        if (next_pf_time != MaxTick) {
390            schedMemSideSendEvent(next_pf_time);
391        }
392    }
393}
394
395void
396BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
397{
398    Tick completion_time = clockEdge(responseLatency) +
399        pkt->headerDelay + pkt->payloadDelay;
400
401    // Reset the bus additional time as it is now accounted for
402    pkt->headerDelay = pkt->payloadDelay = 0;
403
404    cpuSidePort.schedTimingResp(pkt, completion_time);
405}
406
407void
408BaseCache::recvTimingResp(PacketPtr pkt)
409{
410    assert(pkt->isResponse());
411
412    // all header delay should be paid for by the crossbar, unless
413    // this is a prefetch response from above
414    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
415             "%s saw a non-zero packet delay\n", name());
416
417    const bool is_error = pkt->isError();
418
419    if (is_error) {
420        DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
421                pkt->print());
422    }
423
424    DPRINTF(Cache, "%s: Handling response %s\n", __func__,
425            pkt->print());
426
427    // if this is a write, we should be looking at an uncacheable
428    // write
429    if (pkt->isWrite()) {
430        assert(pkt->req->isUncacheable());
431        handleUncacheableWriteResp(pkt);
432        return;
433    }
434
435    // we have dealt with any (uncacheable) writes above, from here on
436    // we know we are dealing with an MSHR due to a miss or a prefetch
437    MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
438    assert(mshr);
439
440    if (mshr == noTargetMSHR) {
441        // we always clear at least one target
442        clearBlocked(Blocked_NoTargets);
443        noTargetMSHR = nullptr;
444    }
445
446    // Initial target is used just for stats
447    MSHR::Target *initial_tgt = mshr->getTarget();
448    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
449    Tick miss_latency = curTick() - initial_tgt->recvTime;
450
451    if (pkt->req->isUncacheable()) {
452        assert(pkt->req->masterId() < system->maxMasters());
453        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
454            miss_latency;
455    } else {
456        assert(pkt->req->masterId() < system->maxMasters());
457        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
458            miss_latency;
459    }
460
461    PacketList writebacks;
462
463    bool is_fill = !mshr->isForward &&
464        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
465         mshr->wasWholeLineWrite);
466
467    // make sure that if the mshr was due to a whole line write then
468    // the response is an invalidation
469    assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
470
471    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
472
473    if (is_fill && !is_error) {
474        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
475                pkt->getAddr());
476
477        const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
478            writeAllocator->allocate() : mshr->allocOnFill();
479        blk = handleFill(pkt, blk, writebacks, allocate);
480        assert(blk != nullptr);
481    }
482
483    if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
484        // The block was marked not readable while there was a pending
485        // cache maintenance operation, restore its flag.
486        blk->status |= BlkReadable;
487
488        // This was a cache clean operation (without invalidate)
489        // and we have a copy of the block already. Since there
490        // is no invalidation, we can promote targets that don't
491        // require a writable copy
492        mshr->promoteReadable();
493    }
494
495    if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
496        // If at this point the referenced block is writable and the
497        // response is not a cache invalidate, we promote targets that
498        // were deferred as we couldn't guarrantee a writable copy
499        mshr->promoteWritable();
500    }
501
502    serviceMSHRTargets(mshr, pkt, blk);
503
504    if (mshr->promoteDeferredTargets()) {
505        // avoid later read getting stale data while write miss is
506        // outstanding.. see comment in timingAccess()
507        if (blk) {
508            blk->status &= ~BlkReadable;
509        }
510        mshrQueue.markPending(mshr);
511        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
512    } else {
513        // while we deallocate an mshr from the queue we still have to
514        // check the isFull condition before and after as we might
515        // have been using the reserved entries already
516        const bool was_full = mshrQueue.isFull();
517        mshrQueue.deallocate(mshr);
518        if (was_full && !mshrQueue.isFull()) {
519            clearBlocked(Blocked_NoMSHRs);
520        }
521
522        // Request the bus for a prefetch if this deallocation freed enough
523        // MSHRs for a prefetch to take place
524        if (prefetcher && mshrQueue.canPrefetch()) {
525            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
526                                         clockEdge());
527            if (next_pf_time != MaxTick)
528                schedMemSideSendEvent(next_pf_time);
529        }
530    }
531
532    // if we used temp block, check to see if its valid and then clear it out
533    if (blk == tempBlock && tempBlock->isValid()) {
534        evictBlock(blk, writebacks);
535    }
536
537    const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
538    // copy writebacks to write buffer
539    doWritebacks(writebacks, forward_time);
540
541    DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
542    delete pkt;
543}
544
545
546Tick
547BaseCache::recvAtomic(PacketPtr pkt)
548{
549    // should assert here that there are no outstanding MSHRs or
550    // writebacks... that would mean that someone used an atomic
551    // access in timing mode
552
553    // We use lookupLatency here because it is used to specify the latency
554    // to access.
555    Cycles lat = lookupLatency;
556
557    CacheBlk *blk = nullptr;
558    PacketList writebacks;
559    bool satisfied = access(pkt, blk, lat, writebacks);
560
561    if (pkt->isClean() && blk && blk->isDirty()) {
562        // A cache clean opearation is looking for a dirty
563        // block. If a dirty block is encountered a WriteClean
564        // will update any copies to the path to the memory
565        // until the point of reference.
566        DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
567                __func__, pkt->print(), blk->print());
568        PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
569        writebacks.push_back(wb_pkt);
570        pkt->setSatisfied();
571    }
572
573    // handle writebacks resulting from the access here to ensure they
574    // logically precede anything happening below
575    doWritebacksAtomic(writebacks);
576    assert(writebacks.empty());
577
578    if (!satisfied) {
579        lat += handleAtomicReqMiss(pkt, blk, writebacks);
580    }
581
582    // Note that we don't invoke the prefetcher at all in atomic mode.
583    // It's not clear how to do it properly, particularly for
584    // prefetchers that aggressively generate prefetch candidates and
585    // rely on bandwidth contention to throttle them; these will tend
586    // to pollute the cache in atomic mode since there is no bandwidth
587    // contention.  If we ever do want to enable prefetching in atomic
588    // mode, though, this is the place to do it... see timingAccess()
589    // for an example (though we'd want to issue the prefetch(es)
590    // immediately rather than calling requestMemSideBus() as we do
591    // there).
592
593    // do any writebacks resulting from the response handling
594    doWritebacksAtomic(writebacks);
595
596    // if we used temp block, check to see if its valid and if so
597    // clear it out, but only do so after the call to recvAtomic is
598    // finished so that any downstream observers (such as a snoop
599    // filter), first see the fill, and only then see the eviction
600    if (blk == tempBlock && tempBlock->isValid()) {
601        // the atomic CPU calls recvAtomic for fetch and load/store
602        // sequentuially, and we may already have a tempBlock
603        // writeback from the fetch that we have not yet sent
604        if (tempBlockWriteback) {
605            // if that is the case, write the prevoius one back, and
606            // do not schedule any new event
607            writebackTempBlockAtomic();
608        } else {
609            // the writeback/clean eviction happens after the call to
610            // recvAtomic has finished (but before any successive
611            // calls), so that the response handling from the fill is
612            // allowed to happen first
613            schedule(writebackTempBlockAtomicEvent, curTick());
614        }
615
616        tempBlockWriteback = evictBlock(blk);
617    }
618
619    if (pkt->needsResponse()) {
620        pkt->makeAtomicResponse();
621    }
622
623    return lat * clockPeriod();
624}
625
626void
627BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
628{
629    Addr blk_addr = pkt->getBlockAddr(blkSize);
630    bool is_secure = pkt->isSecure();
631    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
632    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
633
634    pkt->pushLabel(name());
635
636    CacheBlkPrintWrapper cbpw(blk);
637
638    // Note that just because an L2/L3 has valid data doesn't mean an
639    // L1 doesn't have a more up-to-date modified copy that still
640    // needs to be found.  As a result we always update the request if
641    // we have it, but only declare it satisfied if we are the owner.
642
643    // see if we have data at all (owned or otherwise)
644    bool have_data = blk && blk->isValid()
645        && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
646                                     blk->data);
647
648    // data we have is dirty if marked as such or if we have an
649    // in-service MSHR that is pending a modified line
650    bool have_dirty =
651        have_data && (blk->isDirty() ||
652                      (mshr && mshr->inService && mshr->isPendingModified()));
653
654    bool done = have_dirty ||
655        cpuSidePort.trySatisfyFunctional(pkt) ||
656        mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
657        writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
658        memSidePort.trySatisfyFunctional(pkt);
659
660    DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__,  pkt->print(),
661            (blk && blk->isValid()) ? "valid " : "",
662            have_data ? "data " : "", done ? "done " : "");
663
664    // We're leaving the cache, so pop cache->name() label
665    pkt->popLabel();
666
667    if (done) {
668        pkt->makeResponse();
669    } else {
670        // if it came as a request from the CPU side then make sure it
671        // continues towards the memory side
672        if (from_cpu_side) {
673            memSidePort.sendFunctional(pkt);
674        } else if (cpuSidePort.isSnooping()) {
675            // if it came from the memory side, it must be a snoop request
676            // and we should only forward it if we are forwarding snoops
677            cpuSidePort.sendFunctionalSnoop(pkt);
678        }
679    }
680}
681
682
683void
684BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
685{
686    assert(pkt->isRequest());
687
688    uint64_t overwrite_val;
689    bool overwrite_mem;
690    uint64_t condition_val64;
691    uint32_t condition_val32;
692
693    int offset = pkt->getOffset(blkSize);
694    uint8_t *blk_data = blk->data + offset;
695
696    assert(sizeof(uint64_t) >= pkt->getSize());
697
698    overwrite_mem = true;
699    // keep a copy of our possible write value, and copy what is at the
700    // memory address into the packet
701    pkt->writeData((uint8_t *)&overwrite_val);
702    pkt->setData(blk_data);
703
704    if (pkt->req->isCondSwap()) {
705        if (pkt->getSize() == sizeof(uint64_t)) {
706            condition_val64 = pkt->req->getExtraData();
707            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
708                                         sizeof(uint64_t));
709        } else if (pkt->getSize() == sizeof(uint32_t)) {
710            condition_val32 = (uint32_t)pkt->req->getExtraData();
711            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
712                                         sizeof(uint32_t));
713        } else
714            panic("Invalid size for conditional read/write\n");
715    }
716
717    if (overwrite_mem) {
718        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
719        blk->status |= BlkDirty;
720    }
721}
722
723QueueEntry*
724BaseCache::getNextQueueEntry()
725{
726    // Check both MSHR queue and write buffer for potential requests,
727    // note that null does not mean there is no request, it could
728    // simply be that it is not ready
729    MSHR *miss_mshr  = mshrQueue.getNext();
730    WriteQueueEntry *wq_entry = writeBuffer.getNext();
731
732    // If we got a write buffer request ready, first priority is a
733    // full write buffer, otherwise we favour the miss requests
734    if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
735        // need to search MSHR queue for conflicting earlier miss.
736        MSHR *conflict_mshr =
737            mshrQueue.findPending(wq_entry->blkAddr,
738                                  wq_entry->isSecure);
739
740        if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
741            // Service misses in order until conflict is cleared.
742            return conflict_mshr;
743
744            // @todo Note that we ignore the ready time of the conflict here
745        }
746
747        // No conflicts; issue write
748        return wq_entry;
749    } else if (miss_mshr) {
750        // need to check for conflicting earlier writeback
751        WriteQueueEntry *conflict_mshr =
752            writeBuffer.findPending(miss_mshr->blkAddr,
753                                    miss_mshr->isSecure);
754        if (conflict_mshr) {
755            // not sure why we don't check order here... it was in the
756            // original code but commented out.
757
758            // The only way this happens is if we are
759            // doing a write and we didn't have permissions
760            // then subsequently saw a writeback (owned got evicted)
761            // We need to make sure to perform the writeback first
762            // To preserve the dirty data, then we can issue the write
763
764            // should we return wq_entry here instead?  I.e. do we
765            // have to flush writes in order?  I don't think so... not
766            // for Alpha anyway.  Maybe for x86?
767            return conflict_mshr;
768
769            // @todo Note that we ignore the ready time of the conflict here
770        }
771
772        // No conflicts; issue read
773        return miss_mshr;
774    }
775
776    // fall through... no pending requests.  Try a prefetch.
777    assert(!miss_mshr && !wq_entry);
778    if (prefetcher && mshrQueue.canPrefetch()) {
779        // If we have a miss queue slot, we can try a prefetch
780        PacketPtr pkt = prefetcher->getPacket();
781        if (pkt) {
782            Addr pf_addr = pkt->getBlockAddr(blkSize);
783            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
784                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
785                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
786                // Update statistic on number of prefetches issued
787                // (hwpf_mshr_misses)
788                assert(pkt->req->masterId() < system->maxMasters());
789                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
790
791                // allocate an MSHR and return it, note
792                // that we send the packet straight away, so do not
793                // schedule the send
794                return allocateMissBuffer(pkt, curTick(), false);
795            } else {
796                // free the request and packet
797                delete pkt;
798            }
799        }
800    }
801
802    return nullptr;
803}
804
805void
806BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
807{
808    assert(pkt->isRequest());
809
810    assert(blk && blk->isValid());
811    // Occasionally this is not true... if we are a lower-level cache
812    // satisfying a string of Read and ReadEx requests from
813    // upper-level caches, a Read will mark the block as shared but we
814    // can satisfy a following ReadEx anyway since we can rely on the
815    // Read requester(s) to have buffered the ReadEx snoop and to
816    // invalidate their blocks after receiving them.
817    // assert(!pkt->needsWritable() || blk->isWritable());
818    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
819
820    // Check RMW operations first since both isRead() and
821    // isWrite() will be true for them
822    if (pkt->cmd == MemCmd::SwapReq) {
823        if (pkt->isAtomicOp()) {
824            // extract data from cache and save it into the data field in
825            // the packet as a return value from this atomic op
826            int offset = tags->extractBlkOffset(pkt->getAddr());
827            uint8_t *blk_data = blk->data + offset;
828            pkt->setData(blk_data);
829
830            // execute AMO operation
831            (*(pkt->getAtomicOp()))(blk_data);
832
833            // set block status to dirty
834            blk->status |= BlkDirty;
835        } else {
836            cmpAndSwap(blk, pkt);
837        }
838    } else if (pkt->isWrite()) {
839        // we have the block in a writable state and can go ahead,
840        // note that the line may be also be considered writable in
841        // downstream caches along the path to memory, but always
842        // Exclusive, and never Modified
843        assert(blk->isWritable());
844        // Write or WriteLine at the first cache with block in writable state
845        if (blk->checkWrite(pkt)) {
846            pkt->writeDataToBlock(blk->data, blkSize);
847        }
848        // Always mark the line as dirty (and thus transition to the
849        // Modified state) even if we are a failed StoreCond so we
850        // supply data to any snoops that have appended themselves to
851        // this cache before knowing the store will fail.
852        blk->status |= BlkDirty;
853        DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
854    } else if (pkt->isRead()) {
855        if (pkt->isLLSC()) {
856            blk->trackLoadLocked(pkt);
857        }
858
859        // all read responses have a data payload
860        assert(pkt->hasRespData());
861        pkt->setDataFromBlock(blk->data, blkSize);
862    } else if (pkt->isUpgrade()) {
863        // sanity check
864        assert(!pkt->hasSharers());
865
866        if (blk->isDirty()) {
867            // we were in the Owned state, and a cache above us that
868            // has the line in Shared state needs to be made aware
869            // that the data it already has is in fact dirty
870            pkt->setCacheResponding();
871            blk->status &= ~BlkDirty;
872        }
873    } else if (pkt->isClean()) {
874        blk->status &= ~BlkDirty;
875    } else {
876        assert(pkt->isInvalidate());
877        invalidateBlock(blk);
878        DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
879                pkt->print());
880    }
881}
882
883/////////////////////////////////////////////////////
884//
885// Access path: requests coming in from the CPU side
886//
887/////////////////////////////////////////////////////
888Cycles
889BaseCache::calculateAccessLatency(const CacheBlk* blk,
890                                  const Cycles lookup_lat) const
891{
892    Cycles lat(lookup_lat);
893
894    if (blk != nullptr) {
895        // First access tags, then data
896        if (sequentialAccess) {
897            lat += dataLatency;
898        // Latency is dictated by the slowest of tag and data latencies
899        } else {
900            lat = std::max(lookup_lat, dataLatency);
901        }
902
903        // Check if the block to be accessed is available. If not, apply the
904        // access latency on top of when the block is ready to be accessed.
905        const Tick when_ready = blk->getWhenReady();
906        if (when_ready > curTick() &&
907            ticksToCycles(when_ready - curTick()) > lat) {
908            lat += ticksToCycles(when_ready - curTick());
909        }
910    }
911
912    return lat;
913}
914
915bool
916BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
917                  PacketList &writebacks)
918{
919    // sanity check
920    assert(pkt->isRequest());
921
922    chatty_assert(!(isReadOnly && pkt->isWrite()),
923                  "Should never see a write in a read-only cache %s\n",
924                  name());
925
926    // Access block in the tags
927    Cycles tag_latency(0);
928    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
929
930    // Calculate access latency
931    lat = calculateAccessLatency(blk, tag_latency);
932
933    DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
934            blk ? "hit " + blk->print() : "miss");
935
936    if (pkt->req->isCacheMaintenance()) {
937        // A cache maintenance operation is always forwarded to the
938        // memory below even if the block is found in dirty state.
939
940        // We defer any changes to the state of the block until we
941        // create and mark as in service the mshr for the downstream
942        // packet.
943        return false;
944    }
945
946    if (pkt->isEviction()) {
947        // We check for presence of block in above caches before issuing
948        // Writeback or CleanEvict to write buffer. Therefore the only
949        // possible cases can be of a CleanEvict packet coming from above
950        // encountering a Writeback generated in this cache peer cache and
951        // waiting in the write buffer. Cases of upper level peer caches
952        // generating CleanEvict and Writeback or simply CleanEvict and
953        // CleanEvict almost simultaneously will be caught by snoops sent out
954        // by crossbar.
955        WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
956                                                          pkt->isSecure());
957        if (wb_entry) {
958            assert(wb_entry->getNumTargets() == 1);
959            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
960            assert(wbPkt->isWriteback());
961
962            if (pkt->isCleanEviction()) {
963                // The CleanEvict and WritebackClean snoops into other
964                // peer caches of the same level while traversing the
965                // crossbar. If a copy of the block is found, the
966                // packet is deleted in the crossbar. Hence, none of
967                // the other upper level caches connected to this
968                // cache have the block, so we can clear the
969                // BLOCK_CACHED flag in the Writeback if set and
970                // discard the CleanEvict by returning true.
971                wbPkt->clearBlockCached();
972                return true;
973            } else {
974                assert(pkt->cmd == MemCmd::WritebackDirty);
975                // Dirty writeback from above trumps our clean
976                // writeback... discard here
977                // Note: markInService will remove entry from writeback buffer.
978                markInService(wb_entry);
979                delete wbPkt;
980            }
981        }
982    }
983
984    // Writeback handling is special case.  We can write the block into
985    // the cache without having a writeable copy (or any copy at all).
986    if (pkt->isWriteback()) {
987        assert(blkSize == pkt->getSize());
988
989        // we could get a clean writeback while we are having
990        // outstanding accesses to a block, do the simple thing for
991        // now and drop the clean writeback so that we do not upset
992        // any ordering/decisions about ownership already taken
993        if (pkt->cmd == MemCmd::WritebackClean &&
994            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
995            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
996                    "dropping\n", pkt->getAddr());
997            return true;
998        }
999
1000        if (!blk) {
1001            // need to do a replacement
1002            blk = allocateBlock(pkt, writebacks);
1003            if (!blk) {
1004                // no replaceable block available: give up, fwd to next level.
1005                incMissCount(pkt);
1006                return false;
1007            }
1008
1009            blk->status |= BlkReadable;
1010        }
1011        // only mark the block dirty if we got a writeback command,
1012        // and leave it as is for a clean writeback
1013        if (pkt->cmd == MemCmd::WritebackDirty) {
1014            // TODO: the coherent cache can assert(!blk->isDirty());
1015            blk->status |= BlkDirty;
1016        }
1017        // if the packet does not have sharers, it is passing
1018        // writable, and we got the writeback in Modified or Exclusive
1019        // state, if not we are in the Owned or Shared state
1020        if (!pkt->hasSharers()) {
1021            blk->status |= BlkWritable;
1022        }
1023        // nothing else to do; writeback doesn't expect response
1024        assert(!pkt->needsResponse());
1025        pkt->writeDataToBlock(blk->data, blkSize);
1026        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1027        incHitCount(pkt);
1028        // populate the time when the block will be ready to access.
1029        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1030            pkt->payloadDelay);
1031        return true;
1032    } else if (pkt->cmd == MemCmd::CleanEvict) {
1033        if (blk) {
1034            // Found the block in the tags, need to stop CleanEvict from
1035            // propagating further down the hierarchy. Returning true will
1036            // treat the CleanEvict like a satisfied write request and delete
1037            // it.
1038            return true;
1039        }
1040        // We didn't find the block here, propagate the CleanEvict further
1041        // down the memory hierarchy. Returning false will treat the CleanEvict
1042        // like a Writeback which could not find a replaceable block so has to
1043        // go to next level.
1044        return false;
1045    } else if (pkt->cmd == MemCmd::WriteClean) {
1046        // WriteClean handling is a special case. We can allocate a
1047        // block directly if it doesn't exist and we can update the
1048        // block immediately. The WriteClean transfers the ownership
1049        // of the block as well.
1050        assert(blkSize == pkt->getSize());
1051
1052        if (!blk) {
1053            if (pkt->writeThrough()) {
1054                // if this is a write through packet, we don't try to
1055                // allocate if the block is not present
1056                return false;
1057            } else {
1058                // a writeback that misses needs to allocate a new block
1059                blk = allocateBlock(pkt, writebacks);
1060                if (!blk) {
1061                    // no replaceable block available: give up, fwd to
1062                    // next level.
1063                    incMissCount(pkt);
1064                    return false;
1065                }
1066
1067                blk->status |= BlkReadable;
1068            }
1069        }
1070
1071        // at this point either this is a writeback or a write-through
1072        // write clean operation and the block is already in this
1073        // cache, we need to update the data and the block flags
1074        assert(blk);
1075        // TODO: the coherent cache can assert(!blk->isDirty());
1076        if (!pkt->writeThrough()) {
1077            blk->status |= BlkDirty;
1078        }
1079        // nothing else to do; writeback doesn't expect response
1080        assert(!pkt->needsResponse());
1081        pkt->writeDataToBlock(blk->data, blkSize);
1082        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1083
1084        incHitCount(pkt);
1085        // populate the time when the block will be ready to access.
1086        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1087            pkt->payloadDelay);
1088        // if this a write-through packet it will be sent to cache
1089        // below
1090        return !pkt->writeThrough();
1091    } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1092                       blk->isReadable())) {
1093        // OK to satisfy access
1094        incHitCount(pkt);
1095        satisfyRequest(pkt, blk);
1096        maintainClusivity(pkt->fromCache(), blk);
1097
1098        return true;
1099    }
1100
1101    // Can't satisfy access normally... either no block (blk == nullptr)
1102    // or have block but need writable
1103
1104    incMissCount(pkt);
1105
1106    if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1107        // complete miss on store conditional... just give up now
1108        pkt->req->setExtraData(0);
1109        return true;
1110    }
1111
1112    return false;
1113}
1114
1115void
1116BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1117{
1118    if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1119        clusivity == Enums::mostly_excl) {
1120        // if we have responded to a cache, and our block is still
1121        // valid, but not dirty, and this cache is mostly exclusive
1122        // with respect to the cache above, drop the block
1123        invalidateBlock(blk);
1124    }
1125}
1126
1127CacheBlk*
1128BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1129                      bool allocate)
1130{
1131    assert(pkt->isResponse());
1132    Addr addr = pkt->getAddr();
1133    bool is_secure = pkt->isSecure();
1134#if TRACING_ON
1135    CacheBlk::State old_state = blk ? blk->status : 0;
1136#endif
1137
1138    // When handling a fill, we should have no writes to this line.
1139    assert(addr == pkt->getBlockAddr(blkSize));
1140    assert(!writeBuffer.findMatch(addr, is_secure));
1141
1142    if (!blk) {
1143        // better have read new data...
1144        assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1145
1146        // need to do a replacement if allocating, otherwise we stick
1147        // with the temporary storage
1148        blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1149
1150        if (!blk) {
1151            // No replaceable block or a mostly exclusive
1152            // cache... just use temporary storage to complete the
1153            // current request and then get rid of it
1154            blk = tempBlock;
1155            tempBlock->insert(addr, is_secure);
1156            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1157                    is_secure ? "s" : "ns");
1158        }
1159    } else {
1160        // existing block... probably an upgrade
1161        // don't clear block status... if block is already dirty we
1162        // don't want to lose that
1163    }
1164
1165    // Block is guaranteed to be valid at this point
1166    assert(blk->isValid());
1167    assert(blk->isSecure() == is_secure);
1168    assert(regenerateBlkAddr(blk) == addr);
1169
1170    blk->status |= BlkReadable;
1171
1172    // sanity check for whole-line writes, which should always be
1173    // marked as writable as part of the fill, and then later marked
1174    // dirty as part of satisfyRequest
1175    if (pkt->cmd == MemCmd::InvalidateResp) {
1176        assert(!pkt->hasSharers());
1177    }
1178
1179    // here we deal with setting the appropriate state of the line,
1180    // and we start by looking at the hasSharers flag, and ignore the
1181    // cacheResponding flag (normally signalling dirty data) if the
1182    // packet has sharers, thus the line is never allocated as Owned
1183    // (dirty but not writable), and always ends up being either
1184    // Shared, Exclusive or Modified, see Packet::setCacheResponding
1185    // for more details
1186    if (!pkt->hasSharers()) {
1187        // we could get a writable line from memory (rather than a
1188        // cache) even in a read-only cache, note that we set this bit
1189        // even for a read-only cache, possibly revisit this decision
1190        blk->status |= BlkWritable;
1191
1192        // check if we got this via cache-to-cache transfer (i.e., from a
1193        // cache that had the block in Modified or Owned state)
1194        if (pkt->cacheResponding()) {
1195            // we got the block in Modified state, and invalidated the
1196            // owners copy
1197            blk->status |= BlkDirty;
1198
1199            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1200                          "in read-only cache %s\n", name());
1201        }
1202    }
1203
1204    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1205            addr, is_secure ? "s" : "ns", old_state, blk->print());
1206
1207    // if we got new data, copy it in (checking for a read response
1208    // and a response that has data is the same in the end)
1209    if (pkt->isRead()) {
1210        // sanity checks
1211        assert(pkt->hasData());
1212        assert(pkt->getSize() == blkSize);
1213
1214        pkt->writeDataToBlock(blk->data, blkSize);
1215    }
1216    // We pay for fillLatency here.
1217    blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay);
1218
1219    return blk;
1220}
1221
1222CacheBlk*
1223BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1224{
1225    // Get address
1226    const Addr addr = pkt->getAddr();
1227
1228    // Get secure bit
1229    const bool is_secure = pkt->isSecure();
1230
1231    // Find replacement victim
1232    std::vector<CacheBlk*> evict_blks;
1233    CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1234
1235    // It is valid to return nullptr if there is no victim
1236    if (!victim)
1237        return nullptr;
1238
1239    // Print victim block's information
1240    DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1241
1242    // Check for transient state allocations. If any of the entries listed
1243    // for eviction has a transient state, the allocation fails
1244    for (const auto& blk : evict_blks) {
1245        if (blk->isValid()) {
1246            Addr repl_addr = regenerateBlkAddr(blk);
1247            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1248            if (repl_mshr) {
1249                // must be an outstanding upgrade or clean request
1250                // on a block we're about to replace...
1251                assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1252                       repl_mshr->isCleaning());
1253
1254                // too hard to replace block with transient state
1255                // allocation failed, block not inserted
1256                return nullptr;
1257            }
1258        }
1259    }
1260
1261    // The victim will be replaced by a new entry, so increase the replacement
1262    // counter if a valid block is being replaced
1263    if (victim->isValid()) {
1264        DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1265                "(%s): %s\n", regenerateBlkAddr(victim),
1266                victim->isSecure() ? "s" : "ns",
1267                addr, is_secure ? "s" : "ns",
1268                victim->isDirty() ? "writeback" : "clean");
1269
1270        replacements++;
1271    }
1272
1273    // Evict valid blocks associated to this victim block
1274    for (const auto& blk : evict_blks) {
1275        if (blk->isValid()) {
1276            if (blk->wasPrefetched()) {
1277                unusedPrefetches++;
1278            }
1279
1280            evictBlock(blk, writebacks);
1281        }
1282    }
1283
1284    // Insert new block at victimized entry
1285    tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1286                      pkt->req->taskId(), victim);
1287
1288    return victim;
1289}
1290
1291void
1292BaseCache::invalidateBlock(CacheBlk *blk)
1293{
1294    // If handling a block present in the Tags, let it do its invalidation
1295    // process, which will update stats and invalidate the block itself
1296    if (blk != tempBlock) {
1297        tags->invalidate(blk);
1298    } else {
1299        tempBlock->invalidate();
1300    }
1301}
1302
1303void
1304BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1305{
1306    PacketPtr pkt = evictBlock(blk);
1307    if (pkt) {
1308        writebacks.push_back(pkt);
1309    }
1310}
1311
1312PacketPtr
1313BaseCache::writebackBlk(CacheBlk *blk)
1314{
1315    chatty_assert(!isReadOnly || writebackClean,
1316                  "Writeback from read-only cache");
1317    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1318
1319    writebacks[Request::wbMasterId]++;
1320
1321    RequestPtr req = std::make_shared<Request>(
1322        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1323
1324    if (blk->isSecure())
1325        req->setFlags(Request::SECURE);
1326
1327    req->taskId(blk->task_id);
1328
1329    PacketPtr pkt =
1330        new Packet(req, blk->isDirty() ?
1331                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1332
1333    DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1334            pkt->print(), blk->isWritable(), blk->isDirty());
1335
1336    if (blk->isWritable()) {
1337        // not asserting shared means we pass the block in modified
1338        // state, mark our own block non-writeable
1339        blk->status &= ~BlkWritable;
1340    } else {
1341        // we are in the Owned state, tell the receiver
1342        pkt->setHasSharers();
1343    }
1344
1345    // make sure the block is not marked dirty
1346    blk->status &= ~BlkDirty;
1347
1348    pkt->allocate();
1349    pkt->setDataFromBlock(blk->data, blkSize);
1350
1351    return pkt;
1352}
1353
1354PacketPtr
1355BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1356{
1357    RequestPtr req = std::make_shared<Request>(
1358        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1359
1360    if (blk->isSecure()) {
1361        req->setFlags(Request::SECURE);
1362    }
1363    req->taskId(blk->task_id);
1364
1365    PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1366
1367    if (dest) {
1368        req->setFlags(dest);
1369        pkt->setWriteThrough();
1370    }
1371
1372    DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1373            blk->isWritable(), blk->isDirty());
1374
1375    if (blk->isWritable()) {
1376        // not asserting shared means we pass the block in modified
1377        // state, mark our own block non-writeable
1378        blk->status &= ~BlkWritable;
1379    } else {
1380        // we are in the Owned state, tell the receiver
1381        pkt->setHasSharers();
1382    }
1383
1384    // make sure the block is not marked dirty
1385    blk->status &= ~BlkDirty;
1386
1387    pkt->allocate();
1388    pkt->setDataFromBlock(blk->data, blkSize);
1389
1390    return pkt;
1391}
1392
1393
1394void
1395BaseCache::memWriteback()
1396{
1397    tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1398}
1399
1400void
1401BaseCache::memInvalidate()
1402{
1403    tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1404}
1405
1406bool
1407BaseCache::isDirty() const
1408{
1409    return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1410}
1411
1412bool
1413BaseCache::coalesce() const
1414{
1415    return writeAllocator && writeAllocator->coalesce();
1416}
1417
1418void
1419BaseCache::writebackVisitor(CacheBlk &blk)
1420{
1421    if (blk.isDirty()) {
1422        assert(blk.isValid());
1423
1424        RequestPtr request = std::make_shared<Request>(
1425            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1426
1427        request->taskId(blk.task_id);
1428        if (blk.isSecure()) {
1429            request->setFlags(Request::SECURE);
1430        }
1431
1432        Packet packet(request, MemCmd::WriteReq);
1433        packet.dataStatic(blk.data);
1434
1435        memSidePort.sendFunctional(&packet);
1436
1437        blk.status &= ~BlkDirty;
1438    }
1439}
1440
1441void
1442BaseCache::invalidateVisitor(CacheBlk &blk)
1443{
1444    if (blk.isDirty())
1445        warn_once("Invalidating dirty cache lines. " \
1446                  "Expect things to break.\n");
1447
1448    if (blk.isValid()) {
1449        assert(!blk.isDirty());
1450        invalidateBlock(&blk);
1451    }
1452}
1453
1454Tick
1455BaseCache::nextQueueReadyTime() const
1456{
1457    Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1458                              writeBuffer.nextReadyTime());
1459
1460    // Don't signal prefetch ready time if no MSHRs available
1461    // Will signal once enoguh MSHRs are deallocated
1462    if (prefetcher && mshrQueue.canPrefetch()) {
1463        nextReady = std::min(nextReady,
1464                             prefetcher->nextPrefetchReadyTime());
1465    }
1466
1467    return nextReady;
1468}
1469
1470
1471bool
1472BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1473{
1474    assert(mshr);
1475
1476    // use request from 1st target
1477    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1478
1479    DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1480
1481    // if the cache is in write coalescing mode or (additionally) in
1482    // no allocation mode, and we have a write packet with an MSHR
1483    // that is not a whole-line write (due to incompatible flags etc),
1484    // then reset the write mode
1485    if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1486        if (!mshr->isWholeLineWrite()) {
1487            // if we are currently write coalescing, hold on the
1488            // MSHR as many cycles extra as we need to completely
1489            // write a cache line
1490            if (writeAllocator->delay(mshr->blkAddr)) {
1491                Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1492                DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1493                        "for write coalescing\n", tgt_pkt->print(), delay);
1494                mshrQueue.delay(mshr, delay);
1495                return false;
1496            } else {
1497                writeAllocator->reset();
1498            }
1499        } else {
1500            writeAllocator->resetDelay(mshr->blkAddr);
1501        }
1502    }
1503
1504    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1505
1506    // either a prefetch that is not present upstream, or a normal
1507    // MSHR request, proceed to get the packet to send downstream
1508    PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1509                                     mshr->isWholeLineWrite());
1510
1511    mshr->isForward = (pkt == nullptr);
1512
1513    if (mshr->isForward) {
1514        // not a cache block request, but a response is expected
1515        // make copy of current packet to forward, keep current
1516        // copy for response handling
1517        pkt = new Packet(tgt_pkt, false, true);
1518        assert(!pkt->isWrite());
1519    }
1520
1521    // play it safe and append (rather than set) the sender state,
1522    // as forwarded packets may already have existing state
1523    pkt->pushSenderState(mshr);
1524
1525    if (pkt->isClean() && blk && blk->isDirty()) {
1526        // A cache clean opearation is looking for a dirty block. Mark
1527        // the packet so that the destination xbar can determine that
1528        // there will be a follow-up write packet as well.
1529        pkt->setSatisfied();
1530    }
1531
1532    if (!memSidePort.sendTimingReq(pkt)) {
1533        // we are awaiting a retry, but we
1534        // delete the packet and will be creating a new packet
1535        // when we get the opportunity
1536        delete pkt;
1537
1538        // note that we have now masked any requestBus and
1539        // schedSendEvent (we will wait for a retry before
1540        // doing anything), and this is so even if we do not
1541        // care about this packet and might override it before
1542        // it gets retried
1543        return true;
1544    } else {
1545        // As part of the call to sendTimingReq the packet is
1546        // forwarded to all neighbouring caches (and any caches
1547        // above them) as a snoop. Thus at this point we know if
1548        // any of the neighbouring caches are responding, and if
1549        // so, we know it is dirty, and we can determine if it is
1550        // being passed as Modified, making our MSHR the ordering
1551        // point
1552        bool pending_modified_resp = !pkt->hasSharers() &&
1553            pkt->cacheResponding();
1554        markInService(mshr, pending_modified_resp);
1555
1556        if (pkt->isClean() && blk && blk->isDirty()) {
1557            // A cache clean opearation is looking for a dirty
1558            // block. If a dirty block is encountered a WriteClean
1559            // will update any copies to the path to the memory
1560            // until the point of reference.
1561            DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1562                    __func__, pkt->print(), blk->print());
1563            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1564                                             pkt->id);
1565            PacketList writebacks;
1566            writebacks.push_back(wb_pkt);
1567            doWritebacks(writebacks, 0);
1568        }
1569
1570        return false;
1571    }
1572}
1573
1574bool
1575BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1576{
1577    assert(wq_entry);
1578
1579    // always a single target for write queue entries
1580    PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1581
1582    DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1583
1584    // forward as is, both for evictions and uncacheable writes
1585    if (!memSidePort.sendTimingReq(tgt_pkt)) {
1586        // note that we have now masked any requestBus and
1587        // schedSendEvent (we will wait for a retry before
1588        // doing anything), and this is so even if we do not
1589        // care about this packet and might override it before
1590        // it gets retried
1591        return true;
1592    } else {
1593        markInService(wq_entry);
1594        return false;
1595    }
1596}
1597
1598void
1599BaseCache::serialize(CheckpointOut &cp) const
1600{
1601    bool dirty(isDirty());
1602
1603    if (dirty) {
1604        warn("*** The cache still contains dirty data. ***\n");
1605        warn("    Make sure to drain the system using the correct flags.\n");
1606        warn("    This checkpoint will not restore correctly " \
1607             "and dirty data in the cache will be lost!\n");
1608    }
1609
1610    // Since we don't checkpoint the data in the cache, any dirty data
1611    // will be lost when restoring from a checkpoint of a system that
1612    // wasn't drained properly. Flag the checkpoint as invalid if the
1613    // cache contains dirty data.
1614    bool bad_checkpoint(dirty);
1615    SERIALIZE_SCALAR(bad_checkpoint);
1616}
1617
1618void
1619BaseCache::unserialize(CheckpointIn &cp)
1620{
1621    bool bad_checkpoint;
1622    UNSERIALIZE_SCALAR(bad_checkpoint);
1623    if (bad_checkpoint) {
1624        fatal("Restoring from checkpoints with dirty caches is not "
1625              "supported in the classic memory system. Please remove any "
1626              "caches or drain them properly before taking checkpoints.\n");
1627    }
1628}
1629
1630void
1631BaseCache::regStats()
1632{
1633    MemObject::regStats();
1634
1635    using namespace Stats;
1636
1637    // Hit statistics
1638    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1639        MemCmd cmd(access_idx);
1640        const string &cstr = cmd.toString();
1641
1642        hits[access_idx]
1643            .init(system->maxMasters())
1644            .name(name() + "." + cstr + "_hits")
1645            .desc("number of " + cstr + " hits")
1646            .flags(total | nozero | nonan)
1647            ;
1648        for (int i = 0; i < system->maxMasters(); i++) {
1649            hits[access_idx].subname(i, system->getMasterName(i));
1650        }
1651    }
1652
1653// These macros make it easier to sum the right subset of commands and
1654// to change the subset of commands that are considered "demand" vs
1655// "non-demand"
1656#define SUM_DEMAND(s) \
1657    (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1658     s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1659
1660// should writebacks be included here?  prior code was inconsistent...
1661#define SUM_NON_DEMAND(s) \
1662    (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1663
1664    demandHits
1665        .name(name() + ".demand_hits")
1666        .desc("number of demand (read+write) hits")
1667        .flags(total | nozero | nonan)
1668        ;
1669    demandHits = SUM_DEMAND(hits);
1670    for (int i = 0; i < system->maxMasters(); i++) {
1671        demandHits.subname(i, system->getMasterName(i));
1672    }
1673
1674    overallHits
1675        .name(name() + ".overall_hits")
1676        .desc("number of overall hits")
1677        .flags(total | nozero | nonan)
1678        ;
1679    overallHits = demandHits + SUM_NON_DEMAND(hits);
1680    for (int i = 0; i < system->maxMasters(); i++) {
1681        overallHits.subname(i, system->getMasterName(i));
1682    }
1683
1684    // Miss statistics
1685    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1686        MemCmd cmd(access_idx);
1687        const string &cstr = cmd.toString();
1688
1689        misses[access_idx]
1690            .init(system->maxMasters())
1691            .name(name() + "." + cstr + "_misses")
1692            .desc("number of " + cstr + " misses")
1693            .flags(total | nozero | nonan)
1694            ;
1695        for (int i = 0; i < system->maxMasters(); i++) {
1696            misses[access_idx].subname(i, system->getMasterName(i));
1697        }
1698    }
1699
1700    demandMisses
1701        .name(name() + ".demand_misses")
1702        .desc("number of demand (read+write) misses")
1703        .flags(total | nozero | nonan)
1704        ;
1705    demandMisses = SUM_DEMAND(misses);
1706    for (int i = 0; i < system->maxMasters(); i++) {
1707        demandMisses.subname(i, system->getMasterName(i));
1708    }
1709
1710    overallMisses
1711        .name(name() + ".overall_misses")
1712        .desc("number of overall misses")
1713        .flags(total | nozero | nonan)
1714        ;
1715    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1716    for (int i = 0; i < system->maxMasters(); i++) {
1717        overallMisses.subname(i, system->getMasterName(i));
1718    }
1719
1720    // Miss latency statistics
1721    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1722        MemCmd cmd(access_idx);
1723        const string &cstr = cmd.toString();
1724
1725        missLatency[access_idx]
1726            .init(system->maxMasters())
1727            .name(name() + "." + cstr + "_miss_latency")
1728            .desc("number of " + cstr + " miss cycles")
1729            .flags(total | nozero | nonan)
1730            ;
1731        for (int i = 0; i < system->maxMasters(); i++) {
1732            missLatency[access_idx].subname(i, system->getMasterName(i));
1733        }
1734    }
1735
1736    demandMissLatency
1737        .name(name() + ".demand_miss_latency")
1738        .desc("number of demand (read+write) miss cycles")
1739        .flags(total | nozero | nonan)
1740        ;
1741    demandMissLatency = SUM_DEMAND(missLatency);
1742    for (int i = 0; i < system->maxMasters(); i++) {
1743        demandMissLatency.subname(i, system->getMasterName(i));
1744    }
1745
1746    overallMissLatency
1747        .name(name() + ".overall_miss_latency")
1748        .desc("number of overall miss cycles")
1749        .flags(total | nozero | nonan)
1750        ;
1751    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1752    for (int i = 0; i < system->maxMasters(); i++) {
1753        overallMissLatency.subname(i, system->getMasterName(i));
1754    }
1755
1756    // access formulas
1757    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1758        MemCmd cmd(access_idx);
1759        const string &cstr = cmd.toString();
1760
1761        accesses[access_idx]
1762            .name(name() + "." + cstr + "_accesses")
1763            .desc("number of " + cstr + " accesses(hits+misses)")
1764            .flags(total | nozero | nonan)
1765            ;
1766        accesses[access_idx] = hits[access_idx] + misses[access_idx];
1767
1768        for (int i = 0; i < system->maxMasters(); i++) {
1769            accesses[access_idx].subname(i, system->getMasterName(i));
1770        }
1771    }
1772
1773    demandAccesses
1774        .name(name() + ".demand_accesses")
1775        .desc("number of demand (read+write) accesses")
1776        .flags(total | nozero | nonan)
1777        ;
1778    demandAccesses = demandHits + demandMisses;
1779    for (int i = 0; i < system->maxMasters(); i++) {
1780        demandAccesses.subname(i, system->getMasterName(i));
1781    }
1782
1783    overallAccesses
1784        .name(name() + ".overall_accesses")
1785        .desc("number of overall (read+write) accesses")
1786        .flags(total | nozero | nonan)
1787        ;
1788    overallAccesses = overallHits + overallMisses;
1789    for (int i = 0; i < system->maxMasters(); i++) {
1790        overallAccesses.subname(i, system->getMasterName(i));
1791    }
1792
1793    // miss rate formulas
1794    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1795        MemCmd cmd(access_idx);
1796        const string &cstr = cmd.toString();
1797
1798        missRate[access_idx]
1799            .name(name() + "." + cstr + "_miss_rate")
1800            .desc("miss rate for " + cstr + " accesses")
1801            .flags(total | nozero | nonan)
1802            ;
1803        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1804
1805        for (int i = 0; i < system->maxMasters(); i++) {
1806            missRate[access_idx].subname(i, system->getMasterName(i));
1807        }
1808    }
1809
1810    demandMissRate
1811        .name(name() + ".demand_miss_rate")
1812        .desc("miss rate for demand accesses")
1813        .flags(total | nozero | nonan)
1814        ;
1815    demandMissRate = demandMisses / demandAccesses;
1816    for (int i = 0; i < system->maxMasters(); i++) {
1817        demandMissRate.subname(i, system->getMasterName(i));
1818    }
1819
1820    overallMissRate
1821        .name(name() + ".overall_miss_rate")
1822        .desc("miss rate for overall accesses")
1823        .flags(total | nozero | nonan)
1824        ;
1825    overallMissRate = overallMisses / overallAccesses;
1826    for (int i = 0; i < system->maxMasters(); i++) {
1827        overallMissRate.subname(i, system->getMasterName(i));
1828    }
1829
1830    // miss latency formulas
1831    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1832        MemCmd cmd(access_idx);
1833        const string &cstr = cmd.toString();
1834
1835        avgMissLatency[access_idx]
1836            .name(name() + "." + cstr + "_avg_miss_latency")
1837            .desc("average " + cstr + " miss latency")
1838            .flags(total | nozero | nonan)
1839            ;
1840        avgMissLatency[access_idx] =
1841            missLatency[access_idx] / misses[access_idx];
1842
1843        for (int i = 0; i < system->maxMasters(); i++) {
1844            avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1845        }
1846    }
1847
1848    demandAvgMissLatency
1849        .name(name() + ".demand_avg_miss_latency")
1850        .desc("average overall miss latency")
1851        .flags(total | nozero | nonan)
1852        ;
1853    demandAvgMissLatency = demandMissLatency / demandMisses;
1854    for (int i = 0; i < system->maxMasters(); i++) {
1855        demandAvgMissLatency.subname(i, system->getMasterName(i));
1856    }
1857
1858    overallAvgMissLatency
1859        .name(name() + ".overall_avg_miss_latency")
1860        .desc("average overall miss latency")
1861        .flags(total | nozero | nonan)
1862        ;
1863    overallAvgMissLatency = overallMissLatency / overallMisses;
1864    for (int i = 0; i < system->maxMasters(); i++) {
1865        overallAvgMissLatency.subname(i, system->getMasterName(i));
1866    }
1867
1868    blocked_cycles.init(NUM_BLOCKED_CAUSES);
1869    blocked_cycles
1870        .name(name() + ".blocked_cycles")
1871        .desc("number of cycles access was blocked")
1872        .subname(Blocked_NoMSHRs, "no_mshrs")
1873        .subname(Blocked_NoTargets, "no_targets")
1874        ;
1875
1876
1877    blocked_causes.init(NUM_BLOCKED_CAUSES);
1878    blocked_causes
1879        .name(name() + ".blocked")
1880        .desc("number of cycles access was blocked")
1881        .subname(Blocked_NoMSHRs, "no_mshrs")
1882        .subname(Blocked_NoTargets, "no_targets")
1883        ;
1884
1885    avg_blocked
1886        .name(name() + ".avg_blocked_cycles")
1887        .desc("average number of cycles each access was blocked")
1888        .subname(Blocked_NoMSHRs, "no_mshrs")
1889        .subname(Blocked_NoTargets, "no_targets")
1890        ;
1891
1892    avg_blocked = blocked_cycles / blocked_causes;
1893
1894    unusedPrefetches
1895        .name(name() + ".unused_prefetches")
1896        .desc("number of HardPF blocks evicted w/o reference")
1897        .flags(nozero)
1898        ;
1899
1900    writebacks
1901        .init(system->maxMasters())
1902        .name(name() + ".writebacks")
1903        .desc("number of writebacks")
1904        .flags(total | nozero | nonan)
1905        ;
1906    for (int i = 0; i < system->maxMasters(); i++) {
1907        writebacks.subname(i, system->getMasterName(i));
1908    }
1909
1910    // MSHR statistics
1911    // MSHR hit statistics
1912    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1913        MemCmd cmd(access_idx);
1914        const string &cstr = cmd.toString();
1915
1916        mshr_hits[access_idx]
1917            .init(system->maxMasters())
1918            .name(name() + "." + cstr + "_mshr_hits")
1919            .desc("number of " + cstr + " MSHR hits")
1920            .flags(total | nozero | nonan)
1921            ;
1922        for (int i = 0; i < system->maxMasters(); i++) {
1923            mshr_hits[access_idx].subname(i, system->getMasterName(i));
1924        }
1925    }
1926
1927    demandMshrHits
1928        .name(name() + ".demand_mshr_hits")
1929        .desc("number of demand (read+write) MSHR hits")
1930        .flags(total | nozero | nonan)
1931        ;
1932    demandMshrHits = SUM_DEMAND(mshr_hits);
1933    for (int i = 0; i < system->maxMasters(); i++) {
1934        demandMshrHits.subname(i, system->getMasterName(i));
1935    }
1936
1937    overallMshrHits
1938        .name(name() + ".overall_mshr_hits")
1939        .desc("number of overall MSHR hits")
1940        .flags(total | nozero | nonan)
1941        ;
1942    overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1943    for (int i = 0; i < system->maxMasters(); i++) {
1944        overallMshrHits.subname(i, system->getMasterName(i));
1945    }
1946
1947    // MSHR miss statistics
1948    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1949        MemCmd cmd(access_idx);
1950        const string &cstr = cmd.toString();
1951
1952        mshr_misses[access_idx]
1953            .init(system->maxMasters())
1954            .name(name() + "." + cstr + "_mshr_misses")
1955            .desc("number of " + cstr + " MSHR misses")
1956            .flags(total | nozero | nonan)
1957            ;
1958        for (int i = 0; i < system->maxMasters(); i++) {
1959            mshr_misses[access_idx].subname(i, system->getMasterName(i));
1960        }
1961    }
1962
1963    demandMshrMisses
1964        .name(name() + ".demand_mshr_misses")
1965        .desc("number of demand (read+write) MSHR misses")
1966        .flags(total | nozero | nonan)
1967        ;
1968    demandMshrMisses = SUM_DEMAND(mshr_misses);
1969    for (int i = 0; i < system->maxMasters(); i++) {
1970        demandMshrMisses.subname(i, system->getMasterName(i));
1971    }
1972
1973    overallMshrMisses
1974        .name(name() + ".overall_mshr_misses")
1975        .desc("number of overall MSHR misses")
1976        .flags(total | nozero | nonan)
1977        ;
1978    overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1979    for (int i = 0; i < system->maxMasters(); i++) {
1980        overallMshrMisses.subname(i, system->getMasterName(i));
1981    }
1982
1983    // MSHR miss latency statistics
1984    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1985        MemCmd cmd(access_idx);
1986        const string &cstr = cmd.toString();
1987
1988        mshr_miss_latency[access_idx]
1989            .init(system->maxMasters())
1990            .name(name() + "." + cstr + "_mshr_miss_latency")
1991            .desc("number of " + cstr + " MSHR miss cycles")
1992            .flags(total | nozero | nonan)
1993            ;
1994        for (int i = 0; i < system->maxMasters(); i++) {
1995            mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1996        }
1997    }
1998
1999    demandMshrMissLatency
2000        .name(name() + ".demand_mshr_miss_latency")
2001        .desc("number of demand (read+write) MSHR miss cycles")
2002        .flags(total | nozero | nonan)
2003        ;
2004    demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2005    for (int i = 0; i < system->maxMasters(); i++) {
2006        demandMshrMissLatency.subname(i, system->getMasterName(i));
2007    }
2008
2009    overallMshrMissLatency
2010        .name(name() + ".overall_mshr_miss_latency")
2011        .desc("number of overall MSHR miss cycles")
2012        .flags(total | nozero | nonan)
2013        ;
2014    overallMshrMissLatency =
2015        demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2016    for (int i = 0; i < system->maxMasters(); i++) {
2017        overallMshrMissLatency.subname(i, system->getMasterName(i));
2018    }
2019
2020    // MSHR uncacheable statistics
2021    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2022        MemCmd cmd(access_idx);
2023        const string &cstr = cmd.toString();
2024
2025        mshr_uncacheable[access_idx]
2026            .init(system->maxMasters())
2027            .name(name() + "." + cstr + "_mshr_uncacheable")
2028            .desc("number of " + cstr + " MSHR uncacheable")
2029            .flags(total | nozero | nonan)
2030            ;
2031        for (int i = 0; i < system->maxMasters(); i++) {
2032            mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2033        }
2034    }
2035
2036    overallMshrUncacheable
2037        .name(name() + ".overall_mshr_uncacheable_misses")
2038        .desc("number of overall MSHR uncacheable misses")
2039        .flags(total | nozero | nonan)
2040        ;
2041    overallMshrUncacheable =
2042        SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2043    for (int i = 0; i < system->maxMasters(); i++) {
2044        overallMshrUncacheable.subname(i, system->getMasterName(i));
2045    }
2046
2047    // MSHR miss latency statistics
2048    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2049        MemCmd cmd(access_idx);
2050        const string &cstr = cmd.toString();
2051
2052        mshr_uncacheable_lat[access_idx]
2053            .init(system->maxMasters())
2054            .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2055            .desc("number of " + cstr + " MSHR uncacheable cycles")
2056            .flags(total | nozero | nonan)
2057            ;
2058        for (int i = 0; i < system->maxMasters(); i++) {
2059            mshr_uncacheable_lat[access_idx].subname(
2060                i, system->getMasterName(i));
2061        }
2062    }
2063
2064    overallMshrUncacheableLatency
2065        .name(name() + ".overall_mshr_uncacheable_latency")
2066        .desc("number of overall MSHR uncacheable cycles")
2067        .flags(total | nozero | nonan)
2068        ;
2069    overallMshrUncacheableLatency =
2070        SUM_DEMAND(mshr_uncacheable_lat) +
2071        SUM_NON_DEMAND(mshr_uncacheable_lat);
2072    for (int i = 0; i < system->maxMasters(); i++) {
2073        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2074    }
2075
2076#if 0
2077    // MSHR access formulas
2078    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2079        MemCmd cmd(access_idx);
2080        const string &cstr = cmd.toString();
2081
2082        mshrAccesses[access_idx]
2083            .name(name() + "." + cstr + "_mshr_accesses")
2084            .desc("number of " + cstr + " mshr accesses(hits+misses)")
2085            .flags(total | nozero | nonan)
2086            ;
2087        mshrAccesses[access_idx] =
2088            mshr_hits[access_idx] + mshr_misses[access_idx]
2089            + mshr_uncacheable[access_idx];
2090    }
2091
2092    demandMshrAccesses
2093        .name(name() + ".demand_mshr_accesses")
2094        .desc("number of demand (read+write) mshr accesses")
2095        .flags(total | nozero | nonan)
2096        ;
2097    demandMshrAccesses = demandMshrHits + demandMshrMisses;
2098
2099    overallMshrAccesses
2100        .name(name() + ".overall_mshr_accesses")
2101        .desc("number of overall (read+write) mshr accesses")
2102        .flags(total | nozero | nonan)
2103        ;
2104    overallMshrAccesses = overallMshrHits + overallMshrMisses
2105        + overallMshrUncacheable;
2106#endif
2107
2108    // MSHR miss rate formulas
2109    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2110        MemCmd cmd(access_idx);
2111        const string &cstr = cmd.toString();
2112
2113        mshrMissRate[access_idx]
2114            .name(name() + "." + cstr + "_mshr_miss_rate")
2115            .desc("mshr miss rate for " + cstr + " accesses")
2116            .flags(total | nozero | nonan)
2117            ;
2118        mshrMissRate[access_idx] =
2119            mshr_misses[access_idx] / accesses[access_idx];
2120
2121        for (int i = 0; i < system->maxMasters(); i++) {
2122            mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2123        }
2124    }
2125
2126    demandMshrMissRate
2127        .name(name() + ".demand_mshr_miss_rate")
2128        .desc("mshr miss rate for demand accesses")
2129        .flags(total | nozero | nonan)
2130        ;
2131    demandMshrMissRate = demandMshrMisses / demandAccesses;
2132    for (int i = 0; i < system->maxMasters(); i++) {
2133        demandMshrMissRate.subname(i, system->getMasterName(i));
2134    }
2135
2136    overallMshrMissRate
2137        .name(name() + ".overall_mshr_miss_rate")
2138        .desc("mshr miss rate for overall accesses")
2139        .flags(total | nozero | nonan)
2140        ;
2141    overallMshrMissRate = overallMshrMisses / overallAccesses;
2142    for (int i = 0; i < system->maxMasters(); i++) {
2143        overallMshrMissRate.subname(i, system->getMasterName(i));
2144    }
2145
2146    // mshrMiss latency formulas
2147    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2148        MemCmd cmd(access_idx);
2149        const string &cstr = cmd.toString();
2150
2151        avgMshrMissLatency[access_idx]
2152            .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2153            .desc("average " + cstr + " mshr miss latency")
2154            .flags(total | nozero | nonan)
2155            ;
2156        avgMshrMissLatency[access_idx] =
2157            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2158
2159        for (int i = 0; i < system->maxMasters(); i++) {
2160            avgMshrMissLatency[access_idx].subname(
2161                i, system->getMasterName(i));
2162        }
2163    }
2164
2165    demandAvgMshrMissLatency
2166        .name(name() + ".demand_avg_mshr_miss_latency")
2167        .desc("average overall mshr miss latency")
2168        .flags(total | nozero | nonan)
2169        ;
2170    demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2171    for (int i = 0; i < system->maxMasters(); i++) {
2172        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2173    }
2174
2175    overallAvgMshrMissLatency
2176        .name(name() + ".overall_avg_mshr_miss_latency")
2177        .desc("average overall mshr miss latency")
2178        .flags(total | nozero | nonan)
2179        ;
2180    overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2181    for (int i = 0; i < system->maxMasters(); i++) {
2182        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2183    }
2184
2185    // mshrUncacheable latency formulas
2186    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2187        MemCmd cmd(access_idx);
2188        const string &cstr = cmd.toString();
2189
2190        avgMshrUncacheableLatency[access_idx]
2191            .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2192            .desc("average " + cstr + " mshr uncacheable latency")
2193            .flags(total | nozero | nonan)
2194            ;
2195        avgMshrUncacheableLatency[access_idx] =
2196            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2197
2198        for (int i = 0; i < system->maxMasters(); i++) {
2199            avgMshrUncacheableLatency[access_idx].subname(
2200                i, system->getMasterName(i));
2201        }
2202    }
2203
2204    overallAvgMshrUncacheableLatency
2205        .name(name() + ".overall_avg_mshr_uncacheable_latency")
2206        .desc("average overall mshr uncacheable latency")
2207        .flags(total | nozero | nonan)
2208        ;
2209    overallAvgMshrUncacheableLatency =
2210        overallMshrUncacheableLatency / overallMshrUncacheable;
2211    for (int i = 0; i < system->maxMasters(); i++) {
2212        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2213    }
2214
2215    replacements
2216        .name(name() + ".replacements")
2217        .desc("number of replacements")
2218        ;
2219}
2220
2221void
2222BaseCache::regProbePoints()
2223{
2224    ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2225    ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2226}
2227
2228///////////////
2229//
2230// CpuSidePort
2231//
2232///////////////
2233bool
2234BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2235{
2236    // Snoops shouldn't happen when bypassing caches
2237    assert(!cache->system->bypassCaches());
2238
2239    assert(pkt->isResponse());
2240
2241    // Express snoop responses from master to slave, e.g., from L1 to L2
2242    cache->recvTimingSnoopResp(pkt);
2243    return true;
2244}
2245
2246
2247bool
2248BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2249{
2250    if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2251        // always let express snoop packets through even if blocked
2252        return true;
2253    } else if (blocked || mustSendRetry) {
2254        // either already committed to send a retry, or blocked
2255        mustSendRetry = true;
2256        return false;
2257    }
2258    mustSendRetry = false;
2259    return true;
2260}
2261
2262bool
2263BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2264{
2265    assert(pkt->isRequest());
2266
2267    if (cache->system->bypassCaches()) {
2268        // Just forward the packet if caches are disabled.
2269        // @todo This should really enqueue the packet rather
2270        bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2271        assert(success);
2272        return true;
2273    } else if (tryTiming(pkt)) {
2274        cache->recvTimingReq(pkt);
2275        return true;
2276    }
2277    return false;
2278}
2279
2280Tick
2281BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2282{
2283    if (cache->system->bypassCaches()) {
2284        // Forward the request if the system is in cache bypass mode.
2285        return cache->memSidePort.sendAtomic(pkt);
2286    } else {
2287        return cache->recvAtomic(pkt);
2288    }
2289}
2290
2291void
2292BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2293{
2294    if (cache->system->bypassCaches()) {
2295        // The cache should be flushed if we are in cache bypass mode,
2296        // so we don't need to check if we need to update anything.
2297        cache->memSidePort.sendFunctional(pkt);
2298        return;
2299    }
2300
2301    // functional request
2302    cache->functionalAccess(pkt, true);
2303}
2304
2305AddrRangeList
2306BaseCache::CpuSidePort::getAddrRanges() const
2307{
2308    return cache->getAddrRanges();
2309}
2310
2311
2312BaseCache::
2313CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2314                         const std::string &_label)
2315    : CacheSlavePort(_name, _cache, _label), cache(_cache)
2316{
2317}
2318
2319///////////////
2320//
2321// MemSidePort
2322//
2323///////////////
2324bool
2325BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2326{
2327    cache->recvTimingResp(pkt);
2328    return true;
2329}
2330
2331// Express snooping requests to memside port
2332void
2333BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2334{
2335    // Snoops shouldn't happen when bypassing caches
2336    assert(!cache->system->bypassCaches());
2337
2338    // handle snooping requests
2339    cache->recvTimingSnoopReq(pkt);
2340}
2341
2342Tick
2343BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2344{
2345    // Snoops shouldn't happen when bypassing caches
2346    assert(!cache->system->bypassCaches());
2347
2348    return cache->recvAtomicSnoop(pkt);
2349}
2350
2351void
2352BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2353{
2354    // Snoops shouldn't happen when bypassing caches
2355    assert(!cache->system->bypassCaches());
2356
2357    // functional snoop (note that in contrast to atomic we don't have
2358    // a specific functionalSnoop method, as they have the same
2359    // behaviour regardless)
2360    cache->functionalAccess(pkt, false);
2361}
2362
2363void
2364BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2365{
2366    // sanity check
2367    assert(!waitingOnRetry);
2368
2369    // there should never be any deferred request packets in the
2370    // queue, instead we resly on the cache to provide the packets
2371    // from the MSHR queue or write queue
2372    assert(deferredPacketReadyTime() == MaxTick);
2373
2374    // check for request packets (requests & writebacks)
2375    QueueEntry* entry = cache.getNextQueueEntry();
2376
2377    if (!entry) {
2378        // can happen if e.g. we attempt a writeback and fail, but
2379        // before the retry, the writeback is eliminated because
2380        // we snoop another cache's ReadEx.
2381    } else {
2382        // let our snoop responses go first if there are responses to
2383        // the same addresses
2384        if (checkConflictingSnoop(entry->blkAddr)) {
2385            return;
2386        }
2387        waitingOnRetry = entry->sendPacket(cache);
2388    }
2389
2390    // if we succeeded and are not waiting for a retry, schedule the
2391    // next send considering when the next queue is ready, note that
2392    // snoop responses have their own packet queue and thus schedule
2393    // their own events
2394    if (!waitingOnRetry) {
2395        schedSendEvent(cache.nextQueueReadyTime());
2396    }
2397}
2398
2399BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2400                                    BaseCache *_cache,
2401                                    const std::string &_label)
2402    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2403      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2404      _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2405{
2406}
2407
2408void
2409WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2410                           Addr blk_addr)
2411{
2412    // check if we are continuing where the last write ended
2413    if (nextAddr == write_addr) {
2414        delayCtr[blk_addr] = delayThreshold;
2415        // stop if we have already saturated
2416        if (mode != WriteMode::NO_ALLOCATE) {
2417            byteCount += write_size;
2418            // switch to streaming mode if we have passed the lower
2419            // threshold
2420            if (mode == WriteMode::ALLOCATE &&
2421                byteCount > coalesceLimit) {
2422                mode = WriteMode::COALESCE;
2423                DPRINTF(Cache, "Switched to write coalescing\n");
2424            } else if (mode == WriteMode::COALESCE &&
2425                       byteCount > noAllocateLimit) {
2426                // and continue and switch to non-allocating mode if we
2427                // pass the upper threshold
2428                mode = WriteMode::NO_ALLOCATE;
2429                DPRINTF(Cache, "Switched to write-no-allocate\n");
2430            }
2431        }
2432    } else {
2433        // we did not see a write matching the previous one, start
2434        // over again
2435        byteCount = write_size;
2436        mode = WriteMode::ALLOCATE;
2437        resetDelay(blk_addr);
2438    }
2439    nextAddr = write_addr + write_size;
2440}
2441
2442WriteAllocator*
2443WriteAllocatorParams::create()
2444{
2445    return new WriteAllocator(this);
2446}
2447