base.cc revision 14035:60068a2d56e0
1/*
2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CacheComp.hh"
55#include "debug/CachePort.hh"
56#include "debug/CacheRepl.hh"
57#include "debug/CacheVerbose.hh"
58#include "mem/cache/compressors/base.hh"
59#include "mem/cache/mshr.hh"
60#include "mem/cache/prefetch/base.hh"
61#include "mem/cache/queue_entry.hh"
62#include "mem/cache/tags/super_blk.hh"
63#include "params/BaseCache.hh"
64#include "params/WriteAllocator.hh"
65#include "sim/core.hh"
66
67class BaseMasterPort;
68class BaseSlavePort;
69
70using namespace std;
71
72BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
73                                          BaseCache *_cache,
74                                          const std::string &_label)
75    : QueuedSlavePort(_name, _cache, queue),
76      queue(*_cache, *this, true, _label),
77      blocked(false), mustSendRetry(false),
78      sendRetryEvent([this]{ processSendRetry(); }, _name)
79{
80}
81
82BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
83    : ClockedObject(p),
84      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
85      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
86      mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
87      writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
88      tags(p->tags),
89      compressor(p->compressor),
90      prefetcher(p->prefetcher),
91      writeAllocator(p->write_allocator),
92      writebackClean(p->writeback_clean),
93      tempBlockWriteback(nullptr),
94      writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
95                                    name(), false,
96                                    EventBase::Delayed_Writeback_Pri),
97      blkSize(blk_size),
98      lookupLatency(p->tag_latency),
99      dataLatency(p->data_latency),
100      forwardLatency(p->tag_latency),
101      fillLatency(p->data_latency),
102      responseLatency(p->response_latency),
103      sequentialAccess(p->sequential_access),
104      numTarget(p->tgts_per_mshr),
105      forwardSnoops(true),
106      clusivity(p->clusivity),
107      isReadOnly(p->is_read_only),
108      blocked(0),
109      order(0),
110      noTargetMSHR(nullptr),
111      missCount(p->max_miss_count),
112      addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
113      system(p->system)
114{
115    // the MSHR queue has no reserve entries as we check the MSHR
116    // queue on every single allocation, whereas the write queue has
117    // as many reserve entries as we have MSHRs, since every MSHR may
118    // eventually require a writeback, and we do not check the write
119    // buffer before committing to an MSHR
120
121    // forward snoops is overridden in init() once we can query
122    // whether the connected master is actually snooping or not
123
124    tempBlock = new TempCacheBlk(blkSize);
125
126    tags->tagsInit();
127    if (prefetcher)
128        prefetcher->setCache(this);
129}
130
131BaseCache::~BaseCache()
132{
133    delete tempBlock;
134}
135
136void
137BaseCache::CacheSlavePort::setBlocked()
138{
139    assert(!blocked);
140    DPRINTF(CachePort, "Port is blocking new requests\n");
141    blocked = true;
142    // if we already scheduled a retry in this cycle, but it has not yet
143    // happened, cancel it
144    if (sendRetryEvent.scheduled()) {
145        owner.deschedule(sendRetryEvent);
146        DPRINTF(CachePort, "Port descheduled retry\n");
147        mustSendRetry = true;
148    }
149}
150
151void
152BaseCache::CacheSlavePort::clearBlocked()
153{
154    assert(blocked);
155    DPRINTF(CachePort, "Port is accepting new requests\n");
156    blocked = false;
157    if (mustSendRetry) {
158        // @TODO: need to find a better time (next cycle?)
159        owner.schedule(sendRetryEvent, curTick() + 1);
160    }
161}
162
163void
164BaseCache::CacheSlavePort::processSendRetry()
165{
166    DPRINTF(CachePort, "Port is sending retry\n");
167
168    // reset the flag and call retry
169    mustSendRetry = false;
170    sendRetryReq();
171}
172
173Addr
174BaseCache::regenerateBlkAddr(CacheBlk* blk)
175{
176    if (blk != tempBlock) {
177        return tags->regenerateBlkAddr(blk);
178    } else {
179        return tempBlock->getAddr();
180    }
181}
182
183void
184BaseCache::init()
185{
186    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
187        fatal("Cache ports on %s are not connected\n", name());
188    cpuSidePort.sendRangeChange();
189    forwardSnoops = cpuSidePort.isSnooping();
190}
191
192Port &
193BaseCache::getPort(const std::string &if_name, PortID idx)
194{
195    if (if_name == "mem_side") {
196        return memSidePort;
197    } else if (if_name == "cpu_side") {
198        return cpuSidePort;
199    }  else {
200        return ClockedObject::getPort(if_name, idx);
201    }
202}
203
204bool
205BaseCache::inRange(Addr addr) const
206{
207    for (const auto& r : addrRanges) {
208        if (r.contains(addr)) {
209            return true;
210       }
211    }
212    return false;
213}
214
215void
216BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
217{
218    if (pkt->needsResponse()) {
219        // These delays should have been consumed by now
220        assert(pkt->headerDelay == 0);
221        assert(pkt->payloadDelay == 0);
222
223        pkt->makeTimingResponse();
224
225        // In this case we are considering request_time that takes
226        // into account the delay of the xbar, if any, and just
227        // lat, neglecting responseLatency, modelling hit latency
228        // just as the value of lat overriden by access(), which calls
229        // the calculateAccessLatency() function.
230        cpuSidePort.schedTimingResp(pkt, request_time);
231    } else {
232        DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
233                pkt->print());
234
235        // queue the packet for deletion, as the sending cache is
236        // still relying on it; if the block is found in access(),
237        // CleanEvict and Writeback messages will be deleted
238        // here as well
239        pendingDelete.reset(pkt);
240    }
241}
242
243void
244BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
245                               Tick forward_time, Tick request_time)
246{
247    if (writeAllocator &&
248        pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
249        writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
250                                   pkt->getBlockAddr(blkSize));
251    }
252
253    if (mshr) {
254        /// MSHR hit
255        /// @note writebacks will be checked in getNextMSHR()
256        /// for any conflicting requests to the same block
257
258        //@todo remove hw_pf here
259
260        // Coalesce unless it was a software prefetch (see above).
261        if (pkt) {
262            assert(!pkt->isWriteback());
263            // CleanEvicts corresponding to blocks which have
264            // outstanding requests in MSHRs are simply sunk here
265            if (pkt->cmd == MemCmd::CleanEvict) {
266                pendingDelete.reset(pkt);
267            } else if (pkt->cmd == MemCmd::WriteClean) {
268                // A WriteClean should never coalesce with any
269                // outstanding cache maintenance requests.
270
271                // We use forward_time here because there is an
272                // uncached memory write, forwarded to WriteBuffer.
273                allocateWriteBuffer(pkt, forward_time);
274            } else {
275                DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
276                        pkt->print());
277
278                assert(pkt->req->masterId() < system->maxMasters());
279                mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
280
281                // We use forward_time here because it is the same
282                // considering new targets. We have multiple
283                // requests for the same address here. It
284                // specifies the latency to allocate an internal
285                // buffer and to schedule an event to the queued
286                // port and also takes into account the additional
287                // delay of the xbar.
288                mshr->allocateTarget(pkt, forward_time, order++,
289                                     allocOnFill(pkt->cmd));
290                if (mshr->getNumTargets() == numTarget) {
291                    noTargetMSHR = mshr;
292                    setBlocked(Blocked_NoTargets);
293                    // need to be careful with this... if this mshr isn't
294                    // ready yet (i.e. time > curTick()), we don't want to
295                    // move it ahead of mshrs that are ready
296                    // mshrQueue.moveToFront(mshr);
297                }
298            }
299        }
300    } else {
301        // no MSHR
302        assert(pkt->req->masterId() < system->maxMasters());
303        mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
304
305        if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
306            // We use forward_time here because there is an
307            // writeback or writeclean, forwarded to WriteBuffer.
308            allocateWriteBuffer(pkt, forward_time);
309        } else {
310            if (blk && blk->isValid()) {
311                // If we have a write miss to a valid block, we
312                // need to mark the block non-readable.  Otherwise
313                // if we allow reads while there's an outstanding
314                // write miss, the read could return stale data
315                // out of the cache block... a more aggressive
316                // system could detect the overlap (if any) and
317                // forward data out of the MSHRs, but we don't do
318                // that yet.  Note that we do need to leave the
319                // block valid so that it stays in the cache, in
320                // case we get an upgrade response (and hence no
321                // new data) when the write miss completes.
322                // As long as CPUs do proper store/load forwarding
323                // internally, and have a sufficiently weak memory
324                // model, this is probably unnecessary, but at some
325                // point it must have seemed like we needed it...
326                assert((pkt->needsWritable() && !blk->isWritable()) ||
327                       pkt->req->isCacheMaintenance());
328                blk->status &= ~BlkReadable;
329            }
330            // Here we are using forward_time, modelling the latency of
331            // a miss (outbound) just as forwardLatency, neglecting the
332            // lookupLatency component.
333            allocateMissBuffer(pkt, forward_time);
334        }
335    }
336}
337
338void
339BaseCache::recvTimingReq(PacketPtr pkt)
340{
341    // anything that is merely forwarded pays for the forward latency and
342    // the delay provided by the crossbar
343    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
344
345    Cycles lat;
346    CacheBlk *blk = nullptr;
347    bool satisfied = false;
348    {
349        PacketList writebacks;
350        // Note that lat is passed by reference here. The function
351        // access() will set the lat value.
352        satisfied = access(pkt, blk, lat, writebacks);
353
354        // After the evicted blocks are selected, they must be forwarded
355        // to the write buffer to ensure they logically precede anything
356        // happening below
357        doWritebacks(writebacks, clockEdge(lat + forwardLatency));
358    }
359
360    // Here we charge the headerDelay that takes into account the latencies
361    // of the bus, if the packet comes from it.
362    // The latency charged is just the value set by the access() function.
363    // In case of a hit we are neglecting response latency.
364    // In case of a miss we are neglecting forward latency.
365    Tick request_time = clockEdge(lat);
366    // Here we reset the timing of the packet.
367    pkt->headerDelay = pkt->payloadDelay = 0;
368
369    if (satisfied) {
370        // notify before anything else as later handleTimingReqHit might turn
371        // the packet in a response
372        ppHit->notify(pkt);
373
374        if (prefetcher && blk && blk->wasPrefetched()) {
375            blk->status &= ~BlkHWPrefetched;
376        }
377
378        handleTimingReqHit(pkt, blk, request_time);
379    } else {
380        handleTimingReqMiss(pkt, blk, forward_time, request_time);
381
382        ppMiss->notify(pkt);
383    }
384
385    if (prefetcher) {
386        // track time of availability of next prefetch, if any
387        Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
388        if (next_pf_time != MaxTick) {
389            schedMemSideSendEvent(next_pf_time);
390        }
391    }
392}
393
394void
395BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
396{
397    Tick completion_time = clockEdge(responseLatency) +
398        pkt->headerDelay + pkt->payloadDelay;
399
400    // Reset the bus additional time as it is now accounted for
401    pkt->headerDelay = pkt->payloadDelay = 0;
402
403    cpuSidePort.schedTimingResp(pkt, completion_time);
404}
405
406void
407BaseCache::recvTimingResp(PacketPtr pkt)
408{
409    assert(pkt->isResponse());
410
411    // all header delay should be paid for by the crossbar, unless
412    // this is a prefetch response from above
413    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
414             "%s saw a non-zero packet delay\n", name());
415
416    const bool is_error = pkt->isError();
417
418    if (is_error) {
419        DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
420                pkt->print());
421    }
422
423    DPRINTF(Cache, "%s: Handling response %s\n", __func__,
424            pkt->print());
425
426    // if this is a write, we should be looking at an uncacheable
427    // write
428    if (pkt->isWrite()) {
429        assert(pkt->req->isUncacheable());
430        handleUncacheableWriteResp(pkt);
431        return;
432    }
433
434    // we have dealt with any (uncacheable) writes above, from here on
435    // we know we are dealing with an MSHR due to a miss or a prefetch
436    MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
437    assert(mshr);
438
439    if (mshr == noTargetMSHR) {
440        // we always clear at least one target
441        clearBlocked(Blocked_NoTargets);
442        noTargetMSHR = nullptr;
443    }
444
445    // Initial target is used just for stats
446    QueueEntry::Target *initial_tgt = mshr->getTarget();
447    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
448    Tick miss_latency = curTick() - initial_tgt->recvTime;
449
450    if (pkt->req->isUncacheable()) {
451        assert(pkt->req->masterId() < system->maxMasters());
452        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
453            miss_latency;
454    } else {
455        assert(pkt->req->masterId() < system->maxMasters());
456        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
457            miss_latency;
458    }
459
460    PacketList writebacks;
461
462    bool is_fill = !mshr->isForward &&
463        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
464         mshr->wasWholeLineWrite);
465
466    // make sure that if the mshr was due to a whole line write then
467    // the response is an invalidation
468    assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
469
470    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
471
472    if (is_fill && !is_error) {
473        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
474                pkt->getAddr());
475
476        const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
477            writeAllocator->allocate() : mshr->allocOnFill();
478        blk = handleFill(pkt, blk, writebacks, allocate);
479        assert(blk != nullptr);
480        ppFill->notify(pkt);
481    }
482
483    if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
484        // The block was marked not readable while there was a pending
485        // cache maintenance operation, restore its flag.
486        blk->status |= BlkReadable;
487
488        // This was a cache clean operation (without invalidate)
489        // and we have a copy of the block already. Since there
490        // is no invalidation, we can promote targets that don't
491        // require a writable copy
492        mshr->promoteReadable();
493    }
494
495    if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
496        // If at this point the referenced block is writable and the
497        // response is not a cache invalidate, we promote targets that
498        // were deferred as we couldn't guarrantee a writable copy
499        mshr->promoteWritable();
500    }
501
502    serviceMSHRTargets(mshr, pkt, blk);
503
504    if (mshr->promoteDeferredTargets()) {
505        // avoid later read getting stale data while write miss is
506        // outstanding.. see comment in timingAccess()
507        if (blk) {
508            blk->status &= ~BlkReadable;
509        }
510        mshrQueue.markPending(mshr);
511        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
512    } else {
513        // while we deallocate an mshr from the queue we still have to
514        // check the isFull condition before and after as we might
515        // have been using the reserved entries already
516        const bool was_full = mshrQueue.isFull();
517        mshrQueue.deallocate(mshr);
518        if (was_full && !mshrQueue.isFull()) {
519            clearBlocked(Blocked_NoMSHRs);
520        }
521
522        // Request the bus for a prefetch if this deallocation freed enough
523        // MSHRs for a prefetch to take place
524        if (prefetcher && mshrQueue.canPrefetch()) {
525            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
526                                         clockEdge());
527            if (next_pf_time != MaxTick)
528                schedMemSideSendEvent(next_pf_time);
529        }
530    }
531
532    // if we used temp block, check to see if its valid and then clear it out
533    if (blk == tempBlock && tempBlock->isValid()) {
534        evictBlock(blk, writebacks);
535    }
536
537    const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
538    // copy writebacks to write buffer
539    doWritebacks(writebacks, forward_time);
540
541    DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
542    delete pkt;
543}
544
545
546Tick
547BaseCache::recvAtomic(PacketPtr pkt)
548{
549    // should assert here that there are no outstanding MSHRs or
550    // writebacks... that would mean that someone used an atomic
551    // access in timing mode
552
553    // We use lookupLatency here because it is used to specify the latency
554    // to access.
555    Cycles lat = lookupLatency;
556
557    CacheBlk *blk = nullptr;
558    PacketList writebacks;
559    bool satisfied = access(pkt, blk, lat, writebacks);
560
561    if (pkt->isClean() && blk && blk->isDirty()) {
562        // A cache clean opearation is looking for a dirty
563        // block. If a dirty block is encountered a WriteClean
564        // will update any copies to the path to the memory
565        // until the point of reference.
566        DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
567                __func__, pkt->print(), blk->print());
568        PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
569        writebacks.push_back(wb_pkt);
570        pkt->setSatisfied();
571    }
572
573    // handle writebacks resulting from the access here to ensure they
574    // logically precede anything happening below
575    doWritebacksAtomic(writebacks);
576    assert(writebacks.empty());
577
578    if (!satisfied) {
579        lat += handleAtomicReqMiss(pkt, blk, writebacks);
580    }
581
582    // Note that we don't invoke the prefetcher at all in atomic mode.
583    // It's not clear how to do it properly, particularly for
584    // prefetchers that aggressively generate prefetch candidates and
585    // rely on bandwidth contention to throttle them; these will tend
586    // to pollute the cache in atomic mode since there is no bandwidth
587    // contention.  If we ever do want to enable prefetching in atomic
588    // mode, though, this is the place to do it... see timingAccess()
589    // for an example (though we'd want to issue the prefetch(es)
590    // immediately rather than calling requestMemSideBus() as we do
591    // there).
592
593    // do any writebacks resulting from the response handling
594    doWritebacksAtomic(writebacks);
595
596    // if we used temp block, check to see if its valid and if so
597    // clear it out, but only do so after the call to recvAtomic is
598    // finished so that any downstream observers (such as a snoop
599    // filter), first see the fill, and only then see the eviction
600    if (blk == tempBlock && tempBlock->isValid()) {
601        // the atomic CPU calls recvAtomic for fetch and load/store
602        // sequentuially, and we may already have a tempBlock
603        // writeback from the fetch that we have not yet sent
604        if (tempBlockWriteback) {
605            // if that is the case, write the prevoius one back, and
606            // do not schedule any new event
607            writebackTempBlockAtomic();
608        } else {
609            // the writeback/clean eviction happens after the call to
610            // recvAtomic has finished (but before any successive
611            // calls), so that the response handling from the fill is
612            // allowed to happen first
613            schedule(writebackTempBlockAtomicEvent, curTick());
614        }
615
616        tempBlockWriteback = evictBlock(blk);
617    }
618
619    if (pkt->needsResponse()) {
620        pkt->makeAtomicResponse();
621    }
622
623    return lat * clockPeriod();
624}
625
626void
627BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
628{
629    Addr blk_addr = pkt->getBlockAddr(blkSize);
630    bool is_secure = pkt->isSecure();
631    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
632    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
633
634    pkt->pushLabel(name());
635
636    CacheBlkPrintWrapper cbpw(blk);
637
638    // Note that just because an L2/L3 has valid data doesn't mean an
639    // L1 doesn't have a more up-to-date modified copy that still
640    // needs to be found.  As a result we always update the request if
641    // we have it, but only declare it satisfied if we are the owner.
642
643    // see if we have data at all (owned or otherwise)
644    bool have_data = blk && blk->isValid()
645        && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
646                                     blk->data);
647
648    // data we have is dirty if marked as such or if we have an
649    // in-service MSHR that is pending a modified line
650    bool have_dirty =
651        have_data && (blk->isDirty() ||
652                      (mshr && mshr->inService && mshr->isPendingModified()));
653
654    bool done = have_dirty ||
655        cpuSidePort.trySatisfyFunctional(pkt) ||
656        mshrQueue.trySatisfyFunctional(pkt) ||
657        writeBuffer.trySatisfyFunctional(pkt) ||
658        memSidePort.trySatisfyFunctional(pkt);
659
660    DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__,  pkt->print(),
661            (blk && blk->isValid()) ? "valid " : "",
662            have_data ? "data " : "", done ? "done " : "");
663
664    // We're leaving the cache, so pop cache->name() label
665    pkt->popLabel();
666
667    if (done) {
668        pkt->makeResponse();
669    } else {
670        // if it came as a request from the CPU side then make sure it
671        // continues towards the memory side
672        if (from_cpu_side) {
673            memSidePort.sendFunctional(pkt);
674        } else if (cpuSidePort.isSnooping()) {
675            // if it came from the memory side, it must be a snoop request
676            // and we should only forward it if we are forwarding snoops
677            cpuSidePort.sendFunctionalSnoop(pkt);
678        }
679    }
680}
681
682
683void
684BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
685{
686    assert(pkt->isRequest());
687
688    uint64_t overwrite_val;
689    bool overwrite_mem;
690    uint64_t condition_val64;
691    uint32_t condition_val32;
692
693    int offset = pkt->getOffset(blkSize);
694    uint8_t *blk_data = blk->data + offset;
695
696    assert(sizeof(uint64_t) >= pkt->getSize());
697
698    overwrite_mem = true;
699    // keep a copy of our possible write value, and copy what is at the
700    // memory address into the packet
701    pkt->writeData((uint8_t *)&overwrite_val);
702    pkt->setData(blk_data);
703
704    if (pkt->req->isCondSwap()) {
705        if (pkt->getSize() == sizeof(uint64_t)) {
706            condition_val64 = pkt->req->getExtraData();
707            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
708                                         sizeof(uint64_t));
709        } else if (pkt->getSize() == sizeof(uint32_t)) {
710            condition_val32 = (uint32_t)pkt->req->getExtraData();
711            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
712                                         sizeof(uint32_t));
713        } else
714            panic("Invalid size for conditional read/write\n");
715    }
716
717    if (overwrite_mem) {
718        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
719        blk->status |= BlkDirty;
720    }
721}
722
723QueueEntry*
724BaseCache::getNextQueueEntry()
725{
726    // Check both MSHR queue and write buffer for potential requests,
727    // note that null does not mean there is no request, it could
728    // simply be that it is not ready
729    MSHR *miss_mshr  = mshrQueue.getNext();
730    WriteQueueEntry *wq_entry = writeBuffer.getNext();
731
732    // If we got a write buffer request ready, first priority is a
733    // full write buffer, otherwise we favour the miss requests
734    if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
735        // need to search MSHR queue for conflicting earlier miss.
736        MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
737
738        if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
739            // Service misses in order until conflict is cleared.
740            return conflict_mshr;
741
742            // @todo Note that we ignore the ready time of the conflict here
743        }
744
745        // No conflicts; issue write
746        return wq_entry;
747    } else if (miss_mshr) {
748        // need to check for conflicting earlier writeback
749        WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
750        if (conflict_mshr) {
751            // not sure why we don't check order here... it was in the
752            // original code but commented out.
753
754            // The only way this happens is if we are
755            // doing a write and we didn't have permissions
756            // then subsequently saw a writeback (owned got evicted)
757            // We need to make sure to perform the writeback first
758            // To preserve the dirty data, then we can issue the write
759
760            // should we return wq_entry here instead?  I.e. do we
761            // have to flush writes in order?  I don't think so... not
762            // for Alpha anyway.  Maybe for x86?
763            return conflict_mshr;
764
765            // @todo Note that we ignore the ready time of the conflict here
766        }
767
768        // No conflicts; issue read
769        return miss_mshr;
770    }
771
772    // fall through... no pending requests.  Try a prefetch.
773    assert(!miss_mshr && !wq_entry);
774    if (prefetcher && mshrQueue.canPrefetch()) {
775        // If we have a miss queue slot, we can try a prefetch
776        PacketPtr pkt = prefetcher->getPacket();
777        if (pkt) {
778            Addr pf_addr = pkt->getBlockAddr(blkSize);
779            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
780                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
781                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
782                // Update statistic on number of prefetches issued
783                // (hwpf_mshr_misses)
784                assert(pkt->req->masterId() < system->maxMasters());
785                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
786
787                // allocate an MSHR and return it, note
788                // that we send the packet straight away, so do not
789                // schedule the send
790                return allocateMissBuffer(pkt, curTick(), false);
791            } else {
792                // free the request and packet
793                delete pkt;
794            }
795        }
796    }
797
798    return nullptr;
799}
800
801bool
802BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
803                                 PacketList &writebacks)
804{
805    // tempBlock does not exist in the tags, so don't do anything for it.
806    if (blk == tempBlock) {
807        return true;
808    }
809
810    // Get superblock of the given block
811    CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
812    const SuperBlk* superblock = static_cast<const SuperBlk*>(
813        compression_blk->getSectorBlock());
814
815    // The compressor is called to compress the updated data, so that its
816    // metadata can be updated.
817    std::size_t compression_size = 0;
818    Cycles compression_lat = Cycles(0);
819    Cycles decompression_lat = Cycles(0);
820    compressor->compress(data, compression_lat, decompression_lat,
821                         compression_size);
822
823    // If block's compression factor increased, it may not be co-allocatable
824    // anymore. If so, some blocks might need to be evicted to make room for
825    // the bigger block
826
827    // Get previous compressed size
828    const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits();
829
830    // Check if new data is co-allocatable
831    const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
832        superblock->canCoAllocate(compression_size);
833
834    // If block was compressed, possibly co-allocated with other blocks, and
835    // cannot be co-allocated anymore, one or more blocks must be evicted to
836    // make room for the expanded block. As of now we decide to evict the co-
837    // allocated blocks to make room for the expansion, but other approaches
838    // that take the replacement data of the superblock into account may
839    // generate better results
840    std::vector<CacheBlk*> evict_blks;
841    const bool was_compressed = compression_blk->isCompressed();
842    if (was_compressed && !is_co_allocatable) {
843        // Get all co-allocated blocks
844        for (const auto& sub_blk : superblock->blks) {
845            if (sub_blk->isValid() && (compression_blk != sub_blk)) {
846                // Check for transient state allocations. If any of the
847                // entries listed for eviction has a transient state, the
848                // allocation fails
849                const Addr repl_addr = regenerateBlkAddr(sub_blk);
850                const MSHR *repl_mshr =
851                    mshrQueue.findMatch(repl_addr, sub_blk->isSecure());
852                if (repl_mshr) {
853                    DPRINTF(CacheRepl, "Aborting data expansion of %s due " \
854                            "to replacement of block in transient state: %s\n",
855                            compression_blk->print(), sub_blk->print());
856                    // Too hard to replace block with transient state, so it
857                    // cannot be evicted. Mark the update as failed and expect
858                    // the caller to evict this block. Since this is called
859                    // only when writebacks arrive, and packets do not contain
860                    // compressed data, there is no need to decompress
861                    compression_blk->setSizeBits(blkSize * 8);
862                    compression_blk->setDecompressionLatency(Cycles(0));
863                    compression_blk->setUncompressed();
864                    return false;
865                }
866
867                evict_blks.push_back(sub_blk);
868            }
869        }
870
871        // Update the number of data expansions
872        dataExpansions++;
873
874        DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
875                "\n", blk->print(), prev_size, compression_size);
876    }
877
878    // We always store compressed blocks when possible
879    if (is_co_allocatable) {
880        compression_blk->setCompressed();
881    } else {
882        compression_blk->setUncompressed();
883    }
884    compression_blk->setSizeBits(compression_size);
885    compression_blk->setDecompressionLatency(decompression_lat);
886
887    // Evict valid blocks
888    for (const auto& evict_blk : evict_blks) {
889        if (evict_blk->isValid()) {
890            if (evict_blk->wasPrefetched()) {
891                unusedPrefetches++;
892            }
893            evictBlock(evict_blk, writebacks);
894        }
895    }
896
897    return true;
898}
899
900void
901BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
902{
903    assert(pkt->isRequest());
904
905    assert(blk && blk->isValid());
906    // Occasionally this is not true... if we are a lower-level cache
907    // satisfying a string of Read and ReadEx requests from
908    // upper-level caches, a Read will mark the block as shared but we
909    // can satisfy a following ReadEx anyway since we can rely on the
910    // Read requester(s) to have buffered the ReadEx snoop and to
911    // invalidate their blocks after receiving them.
912    // assert(!pkt->needsWritable() || blk->isWritable());
913    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
914
915    // Check RMW operations first since both isRead() and
916    // isWrite() will be true for them
917    if (pkt->cmd == MemCmd::SwapReq) {
918        if (pkt->isAtomicOp()) {
919            // extract data from cache and save it into the data field in
920            // the packet as a return value from this atomic op
921            int offset = tags->extractBlkOffset(pkt->getAddr());
922            uint8_t *blk_data = blk->data + offset;
923            pkt->setData(blk_data);
924
925            // execute AMO operation
926            (*(pkt->getAtomicOp()))(blk_data);
927
928            // set block status to dirty
929            blk->status |= BlkDirty;
930        } else {
931            cmpAndSwap(blk, pkt);
932        }
933    } else if (pkt->isWrite()) {
934        // we have the block in a writable state and can go ahead,
935        // note that the line may be also be considered writable in
936        // downstream caches along the path to memory, but always
937        // Exclusive, and never Modified
938        assert(blk->isWritable());
939        // Write or WriteLine at the first cache with block in writable state
940        if (blk->checkWrite(pkt)) {
941            pkt->writeDataToBlock(blk->data, blkSize);
942        }
943        // Always mark the line as dirty (and thus transition to the
944        // Modified state) even if we are a failed StoreCond so we
945        // supply data to any snoops that have appended themselves to
946        // this cache before knowing the store will fail.
947        blk->status |= BlkDirty;
948        DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
949    } else if (pkt->isRead()) {
950        if (pkt->isLLSC()) {
951            blk->trackLoadLocked(pkt);
952        }
953
954        // all read responses have a data payload
955        assert(pkt->hasRespData());
956        pkt->setDataFromBlock(blk->data, blkSize);
957    } else if (pkt->isUpgrade()) {
958        // sanity check
959        assert(!pkt->hasSharers());
960
961        if (blk->isDirty()) {
962            // we were in the Owned state, and a cache above us that
963            // has the line in Shared state needs to be made aware
964            // that the data it already has is in fact dirty
965            pkt->setCacheResponding();
966            blk->status &= ~BlkDirty;
967        }
968    } else if (pkt->isClean()) {
969        blk->status &= ~BlkDirty;
970    } else {
971        assert(pkt->isInvalidate());
972        invalidateBlock(blk);
973        DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
974                pkt->print());
975    }
976}
977
978/////////////////////////////////////////////////////
979//
980// Access path: requests coming in from the CPU side
981//
982/////////////////////////////////////////////////////
983Cycles
984BaseCache::calculateTagOnlyLatency(const uint32_t delay,
985                                   const Cycles lookup_lat) const
986{
987    // A tag-only access has to wait for the packet to arrive in order to
988    // perform the tag lookup.
989    return ticksToCycles(delay) + lookup_lat;
990}
991
992Cycles
993BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
994                                  const Cycles lookup_lat) const
995{
996    Cycles lat(0);
997
998    if (blk != nullptr) {
999        // As soon as the access arrives, for sequential accesses first access
1000        // tags, then the data entry. In the case of parallel accesses the
1001        // latency is dictated by the slowest of tag and data latencies.
1002        if (sequentialAccess) {
1003            lat = ticksToCycles(delay) + lookup_lat + dataLatency;
1004        } else {
1005            lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
1006        }
1007
1008        // Check if the block to be accessed is available. If not, apply the
1009        // access latency on top of when the block is ready to be accessed.
1010        const Tick tick = curTick() + delay;
1011        const Tick when_ready = blk->getWhenReady();
1012        if (when_ready > tick &&
1013            ticksToCycles(when_ready - tick) > lat) {
1014            lat += ticksToCycles(when_ready - tick);
1015        }
1016    } else {
1017        // In case of a miss, we neglect the data access in a parallel
1018        // configuration (i.e., the data access will be stopped as soon as
1019        // we find out it is a miss), and use the tag-only latency.
1020        lat = calculateTagOnlyLatency(delay, lookup_lat);
1021    }
1022
1023    return lat;
1024}
1025
1026bool
1027BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
1028                  PacketList &writebacks)
1029{
1030    // sanity check
1031    assert(pkt->isRequest());
1032
1033    chatty_assert(!(isReadOnly && pkt->isWrite()),
1034                  "Should never see a write in a read-only cache %s\n",
1035                  name());
1036
1037    // Access block in the tags
1038    Cycles tag_latency(0);
1039    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
1040
1041    DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
1042            blk ? "hit " + blk->print() : "miss");
1043
1044    if (pkt->req->isCacheMaintenance()) {
1045        // A cache maintenance operation is always forwarded to the
1046        // memory below even if the block is found in dirty state.
1047
1048        // We defer any changes to the state of the block until we
1049        // create and mark as in service the mshr for the downstream
1050        // packet.
1051
1052        // Calculate access latency on top of when the packet arrives. This
1053        // takes into account the bus delay.
1054        lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1055
1056        return false;
1057    }
1058
1059    if (pkt->isEviction()) {
1060        // We check for presence of block in above caches before issuing
1061        // Writeback or CleanEvict to write buffer. Therefore the only
1062        // possible cases can be of a CleanEvict packet coming from above
1063        // encountering a Writeback generated in this cache peer cache and
1064        // waiting in the write buffer. Cases of upper level peer caches
1065        // generating CleanEvict and Writeback or simply CleanEvict and
1066        // CleanEvict almost simultaneously will be caught by snoops sent out
1067        // by crossbar.
1068        WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
1069                                                          pkt->isSecure());
1070        if (wb_entry) {
1071            assert(wb_entry->getNumTargets() == 1);
1072            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
1073            assert(wbPkt->isWriteback());
1074
1075            if (pkt->isCleanEviction()) {
1076                // The CleanEvict and WritebackClean snoops into other
1077                // peer caches of the same level while traversing the
1078                // crossbar. If a copy of the block is found, the
1079                // packet is deleted in the crossbar. Hence, none of
1080                // the other upper level caches connected to this
1081                // cache have the block, so we can clear the
1082                // BLOCK_CACHED flag in the Writeback if set and
1083                // discard the CleanEvict by returning true.
1084                wbPkt->clearBlockCached();
1085
1086                // A clean evict does not need to access the data array
1087                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1088
1089                return true;
1090            } else {
1091                assert(pkt->cmd == MemCmd::WritebackDirty);
1092                // Dirty writeback from above trumps our clean
1093                // writeback... discard here
1094                // Note: markInService will remove entry from writeback buffer.
1095                markInService(wb_entry);
1096                delete wbPkt;
1097            }
1098        }
1099    }
1100
1101    // Writeback handling is special case.  We can write the block into
1102    // the cache without having a writeable copy (or any copy at all).
1103    if (pkt->isWriteback()) {
1104        assert(blkSize == pkt->getSize());
1105
1106        // we could get a clean writeback while we are having
1107        // outstanding accesses to a block, do the simple thing for
1108        // now and drop the clean writeback so that we do not upset
1109        // any ordering/decisions about ownership already taken
1110        if (pkt->cmd == MemCmd::WritebackClean &&
1111            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1112            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1113                    "dropping\n", pkt->getAddr());
1114
1115            // A writeback searches for the block, then writes the data.
1116            // As the writeback is being dropped, the data is not touched,
1117            // and we just had to wait for the time to find a match in the
1118            // MSHR. As of now assume a mshr queue search takes as long as
1119            // a tag lookup for simplicity.
1120            lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1121
1122            return true;
1123        }
1124
1125        if (!blk) {
1126            // need to do a replacement
1127            blk = allocateBlock(pkt, writebacks);
1128            if (!blk) {
1129                // no replaceable block available: give up, fwd to next level.
1130                incMissCount(pkt);
1131
1132                // A writeback searches for the block, then writes the data.
1133                // As the block could not be found, it was a tag-only access.
1134                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1135
1136                return false;
1137            }
1138
1139            blk->status |= BlkReadable;
1140        } else if (compressor) {
1141            // This is an overwrite to an existing block, therefore we need
1142            // to check for data expansion (i.e., block was compressed with
1143            // a smaller size, and now it doesn't fit the entry anymore).
1144            // If that is the case we might need to evict blocks.
1145            if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1146                writebacks)) {
1147                // This is a failed data expansion (write), which happened
1148                // after finding the replacement entries and accessing the
1149                // block's data. There were no replaceable entries available
1150                // to make room for the expanded block, and since it does not
1151                // fit anymore and it has been properly updated to contain
1152                // the new data, forward it to the next level
1153                lat = calculateAccessLatency(blk, pkt->headerDelay,
1154                                             tag_latency);
1155                invalidateBlock(blk);
1156                return false;
1157            }
1158        }
1159
1160        // only mark the block dirty if we got a writeback command,
1161        // and leave it as is for a clean writeback
1162        if (pkt->cmd == MemCmd::WritebackDirty) {
1163            // TODO: the coherent cache can assert(!blk->isDirty());
1164            blk->status |= BlkDirty;
1165        }
1166        // if the packet does not have sharers, it is passing
1167        // writable, and we got the writeback in Modified or Exclusive
1168        // state, if not we are in the Owned or Shared state
1169        if (!pkt->hasSharers()) {
1170            blk->status |= BlkWritable;
1171        }
1172        // nothing else to do; writeback doesn't expect response
1173        assert(!pkt->needsResponse());
1174        pkt->writeDataToBlock(blk->data, blkSize);
1175        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1176        incHitCount(pkt);
1177
1178        // A writeback searches for the block, then writes the data
1179        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1180
1181        // When the packet metadata arrives, the tag lookup will be done while
1182        // the payload is arriving. Then the block will be ready to access as
1183        // soon as the fill is done
1184        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1185            std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1186
1187        return true;
1188    } else if (pkt->cmd == MemCmd::CleanEvict) {
1189        // A CleanEvict does not need to access the data array
1190        lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1191
1192        if (blk) {
1193            // Found the block in the tags, need to stop CleanEvict from
1194            // propagating further down the hierarchy. Returning true will
1195            // treat the CleanEvict like a satisfied write request and delete
1196            // it.
1197            return true;
1198        }
1199        // We didn't find the block here, propagate the CleanEvict further
1200        // down the memory hierarchy. Returning false will treat the CleanEvict
1201        // like a Writeback which could not find a replaceable block so has to
1202        // go to next level.
1203        return false;
1204    } else if (pkt->cmd == MemCmd::WriteClean) {
1205        // WriteClean handling is a special case. We can allocate a
1206        // block directly if it doesn't exist and we can update the
1207        // block immediately. The WriteClean transfers the ownership
1208        // of the block as well.
1209        assert(blkSize == pkt->getSize());
1210
1211        if (!blk) {
1212            if (pkt->writeThrough()) {
1213                // A writeback searches for the block, then writes the data.
1214                // As the block could not be found, it was a tag-only access.
1215                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1216
1217                // if this is a write through packet, we don't try to
1218                // allocate if the block is not present
1219                return false;
1220            } else {
1221                // a writeback that misses needs to allocate a new block
1222                blk = allocateBlock(pkt, writebacks);
1223                if (!blk) {
1224                    // no replaceable block available: give up, fwd to
1225                    // next level.
1226                    incMissCount(pkt);
1227
1228                    // A writeback searches for the block, then writes the
1229                    // data. As the block could not be found, it was a tag-only
1230                    // access.
1231                    lat = calculateTagOnlyLatency(pkt->headerDelay,
1232                                                  tag_latency);
1233
1234                    return false;
1235                }
1236
1237                blk->status |= BlkReadable;
1238            }
1239        } else if (compressor) {
1240            // This is an overwrite to an existing block, therefore we need
1241            // to check for data expansion (i.e., block was compressed with
1242            // a smaller size, and now it doesn't fit the entry anymore).
1243            // If that is the case we might need to evict blocks.
1244            if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1245                writebacks)) {
1246                // This is a failed data expansion (write), which happened
1247                // after finding the replacement entries and accessing the
1248                // block's data. There were no replaceable entries available
1249                // to make room for the expanded block, and since it does not
1250                // fit anymore and it has been properly updated to contain
1251                // the new data, forward it to the next level
1252                lat = calculateAccessLatency(blk, pkt->headerDelay,
1253                                             tag_latency);
1254                invalidateBlock(blk);
1255                return false;
1256            }
1257        }
1258
1259        // at this point either this is a writeback or a write-through
1260        // write clean operation and the block is already in this
1261        // cache, we need to update the data and the block flags
1262        assert(blk);
1263        // TODO: the coherent cache can assert(!blk->isDirty());
1264        if (!pkt->writeThrough()) {
1265            blk->status |= BlkDirty;
1266        }
1267        // nothing else to do; writeback doesn't expect response
1268        assert(!pkt->needsResponse());
1269        pkt->writeDataToBlock(blk->data, blkSize);
1270        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1271
1272        incHitCount(pkt);
1273
1274        // A writeback searches for the block, then writes the data
1275        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1276
1277        // When the packet metadata arrives, the tag lookup will be done while
1278        // the payload is arriving. Then the block will be ready to access as
1279        // soon as the fill is done
1280        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1281            std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1282
1283        // If this a write-through packet it will be sent to cache below
1284        return !pkt->writeThrough();
1285    } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1286                       blk->isReadable())) {
1287        // OK to satisfy access
1288        incHitCount(pkt);
1289
1290        // Calculate access latency based on the need to access the data array
1291        if (pkt->isRead() || pkt->isWrite()) {
1292            lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1293
1294            // When a block is compressed, it must first be decompressed
1295            // before being read. This adds to the access latency.
1296            if (compressor && pkt->isRead()) {
1297                lat += compressor->getDecompressionLatency(blk);
1298            }
1299        } else {
1300            lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1301        }
1302
1303        satisfyRequest(pkt, blk);
1304        maintainClusivity(pkt->fromCache(), blk);
1305
1306        return true;
1307    }
1308
1309    // Can't satisfy access normally... either no block (blk == nullptr)
1310    // or have block but need writable
1311
1312    incMissCount(pkt);
1313
1314    lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1315
1316    if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1317        // complete miss on store conditional... just give up now
1318        pkt->req->setExtraData(0);
1319        return true;
1320    }
1321
1322    return false;
1323}
1324
1325void
1326BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1327{
1328    if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1329        clusivity == Enums::mostly_excl) {
1330        // if we have responded to a cache, and our block is still
1331        // valid, but not dirty, and this cache is mostly exclusive
1332        // with respect to the cache above, drop the block
1333        invalidateBlock(blk);
1334    }
1335}
1336
1337CacheBlk*
1338BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1339                      bool allocate)
1340{
1341    assert(pkt->isResponse());
1342    Addr addr = pkt->getAddr();
1343    bool is_secure = pkt->isSecure();
1344#if TRACING_ON
1345    CacheBlk::State old_state = blk ? blk->status : 0;
1346#endif
1347
1348    // When handling a fill, we should have no writes to this line.
1349    assert(addr == pkt->getBlockAddr(blkSize));
1350    assert(!writeBuffer.findMatch(addr, is_secure));
1351
1352    if (!blk) {
1353        // better have read new data...
1354        assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1355
1356        // need to do a replacement if allocating, otherwise we stick
1357        // with the temporary storage
1358        blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1359
1360        if (!blk) {
1361            // No replaceable block or a mostly exclusive
1362            // cache... just use temporary storage to complete the
1363            // current request and then get rid of it
1364            blk = tempBlock;
1365            tempBlock->insert(addr, is_secure);
1366            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1367                    is_secure ? "s" : "ns");
1368        }
1369    } else {
1370        // existing block... probably an upgrade
1371        // don't clear block status... if block is already dirty we
1372        // don't want to lose that
1373    }
1374
1375    // Block is guaranteed to be valid at this point
1376    assert(blk->isValid());
1377    assert(blk->isSecure() == is_secure);
1378    assert(regenerateBlkAddr(blk) == addr);
1379
1380    blk->status |= BlkReadable;
1381
1382    // sanity check for whole-line writes, which should always be
1383    // marked as writable as part of the fill, and then later marked
1384    // dirty as part of satisfyRequest
1385    if (pkt->cmd == MemCmd::InvalidateResp) {
1386        assert(!pkt->hasSharers());
1387    }
1388
1389    // here we deal with setting the appropriate state of the line,
1390    // and we start by looking at the hasSharers flag, and ignore the
1391    // cacheResponding flag (normally signalling dirty data) if the
1392    // packet has sharers, thus the line is never allocated as Owned
1393    // (dirty but not writable), and always ends up being either
1394    // Shared, Exclusive or Modified, see Packet::setCacheResponding
1395    // for more details
1396    if (!pkt->hasSharers()) {
1397        // we could get a writable line from memory (rather than a
1398        // cache) even in a read-only cache, note that we set this bit
1399        // even for a read-only cache, possibly revisit this decision
1400        blk->status |= BlkWritable;
1401
1402        // check if we got this via cache-to-cache transfer (i.e., from a
1403        // cache that had the block in Modified or Owned state)
1404        if (pkt->cacheResponding()) {
1405            // we got the block in Modified state, and invalidated the
1406            // owners copy
1407            blk->status |= BlkDirty;
1408
1409            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1410                          "in read-only cache %s\n", name());
1411
1412        } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) {
1413            // All other copies of the block were invalidated and we
1414            // have an exclusive copy.
1415
1416            // The coherence protocol assumes that if we fetched an
1417            // exclusive copy of the block, we have the intention to
1418            // modify it. Therefore the MSHR for the PrefetchExReq has
1419            // been the point of ordering and this cache has commited
1420            // to respond to snoops for the block.
1421            //
1422            // In most cases this is true anyway - a PrefetchExReq
1423            // will be followed by a WriteReq. However, if that
1424            // doesn't happen, the block is not marked as dirty and
1425            // the cache doesn't respond to snoops that has committed
1426            // to do so.
1427            //
1428            // To avoid deadlocks in cases where there is a snoop
1429            // between the PrefetchExReq and the expected WriteReq, we
1430            // proactively mark the block as Dirty.
1431
1432            blk->status |= BlkDirty;
1433
1434            panic_if(!isReadOnly, "Prefetch exclusive requests from read-only "
1435                     "cache %s\n", name());
1436        }
1437    }
1438
1439    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1440            addr, is_secure ? "s" : "ns", old_state, blk->print());
1441
1442    // if we got new data, copy it in (checking for a read response
1443    // and a response that has data is the same in the end)
1444    if (pkt->isRead()) {
1445        // sanity checks
1446        assert(pkt->hasData());
1447        assert(pkt->getSize() == blkSize);
1448
1449        pkt->writeDataToBlock(blk->data, blkSize);
1450    }
1451    // The block will be ready when the payload arrives and the fill is done
1452    blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1453                      pkt->payloadDelay);
1454
1455    return blk;
1456}
1457
1458CacheBlk*
1459BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1460{
1461    // Get address
1462    const Addr addr = pkt->getAddr();
1463
1464    // Get secure bit
1465    const bool is_secure = pkt->isSecure();
1466
1467    // Block size and compression related access latency. Only relevant if
1468    // using a compressor, otherwise there is no extra delay, and the block
1469    // is fully sized
1470    std::size_t blk_size_bits = blkSize*8;
1471    Cycles compression_lat = Cycles(0);
1472    Cycles decompression_lat = Cycles(0);
1473
1474    // If a compressor is being used, it is called to compress data before
1475    // insertion. Although in Gem5 the data is stored uncompressed, even if a
1476    // compressor is used, the compression/decompression methods are called to
1477    // calculate the amount of extra cycles needed to read or write compressed
1478    // blocks.
1479    if (compressor) {
1480        compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat,
1481                             decompression_lat, blk_size_bits);
1482    }
1483
1484    // Find replacement victim
1485    std::vector<CacheBlk*> evict_blks;
1486    CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
1487                                        evict_blks);
1488
1489    // It is valid to return nullptr if there is no victim
1490    if (!victim)
1491        return nullptr;
1492
1493    // Print victim block's information
1494    DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1495
1496    // Check for transient state allocations. If any of the entries listed
1497    // for eviction has a transient state, the allocation fails
1498    bool replacement = false;
1499    for (const auto& blk : evict_blks) {
1500        if (blk->isValid()) {
1501            replacement = true;
1502
1503            Addr repl_addr = regenerateBlkAddr(blk);
1504            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1505            if (repl_mshr) {
1506                // must be an outstanding upgrade or clean request
1507                // on a block we're about to replace...
1508                assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1509                       repl_mshr->isCleaning());
1510
1511                // too hard to replace block with transient state
1512                // allocation failed, block not inserted
1513                return nullptr;
1514            }
1515        }
1516    }
1517
1518    // The victim will be replaced by a new entry, so increase the replacement
1519    // counter if a valid block is being replaced
1520    if (replacement) {
1521        // Evict valid blocks associated to this victim block
1522        for (const auto& blk : evict_blks) {
1523            if (blk->isValid()) {
1524                DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \
1525                        "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk),
1526                        addr, is_secure);
1527
1528                if (blk->wasPrefetched()) {
1529                    unusedPrefetches++;
1530                }
1531
1532                evictBlock(blk, writebacks);
1533            }
1534        }
1535
1536        replacements++;
1537    }
1538
1539    // If using a compressor, set compression data. This must be done before
1540    // block insertion, as compressed tags use this information.
1541    if (compressor) {
1542        compressor->setSizeBits(victim, blk_size_bits);
1543        compressor->setDecompressionLatency(victim, decompression_lat);
1544    }
1545
1546    // Insert new block at victimized entry
1547    tags->insertBlock(pkt, victim);
1548
1549    return victim;
1550}
1551
1552void
1553BaseCache::invalidateBlock(CacheBlk *blk)
1554{
1555    // If handling a block present in the Tags, let it do its invalidation
1556    // process, which will update stats and invalidate the block itself
1557    if (blk != tempBlock) {
1558        tags->invalidate(blk);
1559    } else {
1560        tempBlock->invalidate();
1561    }
1562}
1563
1564void
1565BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1566{
1567    PacketPtr pkt = evictBlock(blk);
1568    if (pkt) {
1569        writebacks.push_back(pkt);
1570    }
1571}
1572
1573PacketPtr
1574BaseCache::writebackBlk(CacheBlk *blk)
1575{
1576    chatty_assert(!isReadOnly || writebackClean,
1577                  "Writeback from read-only cache");
1578    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1579
1580    writebacks[Request::wbMasterId]++;
1581
1582    RequestPtr req = std::make_shared<Request>(
1583        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1584
1585    if (blk->isSecure())
1586        req->setFlags(Request::SECURE);
1587
1588    req->taskId(blk->task_id);
1589
1590    PacketPtr pkt =
1591        new Packet(req, blk->isDirty() ?
1592                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1593
1594    DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1595            pkt->print(), blk->isWritable(), blk->isDirty());
1596
1597    if (blk->isWritable()) {
1598        // not asserting shared means we pass the block in modified
1599        // state, mark our own block non-writeable
1600        blk->status &= ~BlkWritable;
1601    } else {
1602        // we are in the Owned state, tell the receiver
1603        pkt->setHasSharers();
1604    }
1605
1606    // make sure the block is not marked dirty
1607    blk->status &= ~BlkDirty;
1608
1609    pkt->allocate();
1610    pkt->setDataFromBlock(blk->data, blkSize);
1611
1612    // When a block is compressed, it must first be decompressed before being
1613    // sent for writeback.
1614    if (compressor) {
1615        pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1616    }
1617
1618    return pkt;
1619}
1620
1621PacketPtr
1622BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1623{
1624    RequestPtr req = std::make_shared<Request>(
1625        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1626
1627    if (blk->isSecure()) {
1628        req->setFlags(Request::SECURE);
1629    }
1630    req->taskId(blk->task_id);
1631
1632    PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1633
1634    if (dest) {
1635        req->setFlags(dest);
1636        pkt->setWriteThrough();
1637    }
1638
1639    DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1640            blk->isWritable(), blk->isDirty());
1641
1642    if (blk->isWritable()) {
1643        // not asserting shared means we pass the block in modified
1644        // state, mark our own block non-writeable
1645        blk->status &= ~BlkWritable;
1646    } else {
1647        // we are in the Owned state, tell the receiver
1648        pkt->setHasSharers();
1649    }
1650
1651    // make sure the block is not marked dirty
1652    blk->status &= ~BlkDirty;
1653
1654    pkt->allocate();
1655    pkt->setDataFromBlock(blk->data, blkSize);
1656
1657    // When a block is compressed, it must first be decompressed before being
1658    // sent for writeback.
1659    if (compressor) {
1660        pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1661    }
1662
1663    return pkt;
1664}
1665
1666
1667void
1668BaseCache::memWriteback()
1669{
1670    tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1671}
1672
1673void
1674BaseCache::memInvalidate()
1675{
1676    tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1677}
1678
1679bool
1680BaseCache::isDirty() const
1681{
1682    return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1683}
1684
1685bool
1686BaseCache::coalesce() const
1687{
1688    return writeAllocator && writeAllocator->coalesce();
1689}
1690
1691void
1692BaseCache::writebackVisitor(CacheBlk &blk)
1693{
1694    if (blk.isDirty()) {
1695        assert(blk.isValid());
1696
1697        RequestPtr request = std::make_shared<Request>(
1698            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1699
1700        request->taskId(blk.task_id);
1701        if (blk.isSecure()) {
1702            request->setFlags(Request::SECURE);
1703        }
1704
1705        Packet packet(request, MemCmd::WriteReq);
1706        packet.dataStatic(blk.data);
1707
1708        memSidePort.sendFunctional(&packet);
1709
1710        blk.status &= ~BlkDirty;
1711    }
1712}
1713
1714void
1715BaseCache::invalidateVisitor(CacheBlk &blk)
1716{
1717    if (blk.isDirty())
1718        warn_once("Invalidating dirty cache lines. " \
1719                  "Expect things to break.\n");
1720
1721    if (blk.isValid()) {
1722        assert(!blk.isDirty());
1723        invalidateBlock(&blk);
1724    }
1725}
1726
1727Tick
1728BaseCache::nextQueueReadyTime() const
1729{
1730    Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1731                              writeBuffer.nextReadyTime());
1732
1733    // Don't signal prefetch ready time if no MSHRs available
1734    // Will signal once enoguh MSHRs are deallocated
1735    if (prefetcher && mshrQueue.canPrefetch()) {
1736        nextReady = std::min(nextReady,
1737                             prefetcher->nextPrefetchReadyTime());
1738    }
1739
1740    return nextReady;
1741}
1742
1743
1744bool
1745BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1746{
1747    assert(mshr);
1748
1749    // use request from 1st target
1750    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1751
1752    DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1753
1754    // if the cache is in write coalescing mode or (additionally) in
1755    // no allocation mode, and we have a write packet with an MSHR
1756    // that is not a whole-line write (due to incompatible flags etc),
1757    // then reset the write mode
1758    if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1759        if (!mshr->isWholeLineWrite()) {
1760            // if we are currently write coalescing, hold on the
1761            // MSHR as many cycles extra as we need to completely
1762            // write a cache line
1763            if (writeAllocator->delay(mshr->blkAddr)) {
1764                Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1765                DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1766                        "for write coalescing\n", tgt_pkt->print(), delay);
1767                mshrQueue.delay(mshr, delay);
1768                return false;
1769            } else {
1770                writeAllocator->reset();
1771            }
1772        } else {
1773            writeAllocator->resetDelay(mshr->blkAddr);
1774        }
1775    }
1776
1777    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1778
1779    // either a prefetch that is not present upstream, or a normal
1780    // MSHR request, proceed to get the packet to send downstream
1781    PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1782                                     mshr->isWholeLineWrite());
1783
1784    mshr->isForward = (pkt == nullptr);
1785
1786    if (mshr->isForward) {
1787        // not a cache block request, but a response is expected
1788        // make copy of current packet to forward, keep current
1789        // copy for response handling
1790        pkt = new Packet(tgt_pkt, false, true);
1791        assert(!pkt->isWrite());
1792    }
1793
1794    // play it safe and append (rather than set) the sender state,
1795    // as forwarded packets may already have existing state
1796    pkt->pushSenderState(mshr);
1797
1798    if (pkt->isClean() && blk && blk->isDirty()) {
1799        // A cache clean opearation is looking for a dirty block. Mark
1800        // the packet so that the destination xbar can determine that
1801        // there will be a follow-up write packet as well.
1802        pkt->setSatisfied();
1803    }
1804
1805    if (!memSidePort.sendTimingReq(pkt)) {
1806        // we are awaiting a retry, but we
1807        // delete the packet and will be creating a new packet
1808        // when we get the opportunity
1809        delete pkt;
1810
1811        // note that we have now masked any requestBus and
1812        // schedSendEvent (we will wait for a retry before
1813        // doing anything), and this is so even if we do not
1814        // care about this packet and might override it before
1815        // it gets retried
1816        return true;
1817    } else {
1818        // As part of the call to sendTimingReq the packet is
1819        // forwarded to all neighbouring caches (and any caches
1820        // above them) as a snoop. Thus at this point we know if
1821        // any of the neighbouring caches are responding, and if
1822        // so, we know it is dirty, and we can determine if it is
1823        // being passed as Modified, making our MSHR the ordering
1824        // point
1825        bool pending_modified_resp = !pkt->hasSharers() &&
1826            pkt->cacheResponding();
1827        markInService(mshr, pending_modified_resp);
1828
1829        if (pkt->isClean() && blk && blk->isDirty()) {
1830            // A cache clean opearation is looking for a dirty
1831            // block. If a dirty block is encountered a WriteClean
1832            // will update any copies to the path to the memory
1833            // until the point of reference.
1834            DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1835                    __func__, pkt->print(), blk->print());
1836            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1837                                             pkt->id);
1838            PacketList writebacks;
1839            writebacks.push_back(wb_pkt);
1840            doWritebacks(writebacks, 0);
1841        }
1842
1843        return false;
1844    }
1845}
1846
1847bool
1848BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1849{
1850    assert(wq_entry);
1851
1852    // always a single target for write queue entries
1853    PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1854
1855    DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1856
1857    // forward as is, both for evictions and uncacheable writes
1858    if (!memSidePort.sendTimingReq(tgt_pkt)) {
1859        // note that we have now masked any requestBus and
1860        // schedSendEvent (we will wait for a retry before
1861        // doing anything), and this is so even if we do not
1862        // care about this packet and might override it before
1863        // it gets retried
1864        return true;
1865    } else {
1866        markInService(wq_entry);
1867        return false;
1868    }
1869}
1870
1871void
1872BaseCache::serialize(CheckpointOut &cp) const
1873{
1874    bool dirty(isDirty());
1875
1876    if (dirty) {
1877        warn("*** The cache still contains dirty data. ***\n");
1878        warn("    Make sure to drain the system using the correct flags.\n");
1879        warn("    This checkpoint will not restore correctly " \
1880             "and dirty data in the cache will be lost!\n");
1881    }
1882
1883    // Since we don't checkpoint the data in the cache, any dirty data
1884    // will be lost when restoring from a checkpoint of a system that
1885    // wasn't drained properly. Flag the checkpoint as invalid if the
1886    // cache contains dirty data.
1887    bool bad_checkpoint(dirty);
1888    SERIALIZE_SCALAR(bad_checkpoint);
1889}
1890
1891void
1892BaseCache::unserialize(CheckpointIn &cp)
1893{
1894    bool bad_checkpoint;
1895    UNSERIALIZE_SCALAR(bad_checkpoint);
1896    if (bad_checkpoint) {
1897        fatal("Restoring from checkpoints with dirty caches is not "
1898              "supported in the classic memory system. Please remove any "
1899              "caches or drain them properly before taking checkpoints.\n");
1900    }
1901}
1902
1903void
1904BaseCache::regStats()
1905{
1906    ClockedObject::regStats();
1907
1908    using namespace Stats;
1909
1910    // Hit statistics
1911    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1912        MemCmd cmd(access_idx);
1913        const string &cstr = cmd.toString();
1914
1915        hits[access_idx]
1916            .init(system->maxMasters())
1917            .name(name() + "." + cstr + "_hits")
1918            .desc("number of " + cstr + " hits")
1919            .flags(total | nozero | nonan)
1920            ;
1921        for (int i = 0; i < system->maxMasters(); i++) {
1922            hits[access_idx].subname(i, system->getMasterName(i));
1923        }
1924    }
1925
1926// These macros make it easier to sum the right subset of commands and
1927// to change the subset of commands that are considered "demand" vs
1928// "non-demand"
1929#define SUM_DEMAND(s) \
1930    (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1931     s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1932
1933// should writebacks be included here?  prior code was inconsistent...
1934#define SUM_NON_DEMAND(s) \
1935    (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1936
1937    demandHits
1938        .name(name() + ".demand_hits")
1939        .desc("number of demand (read+write) hits")
1940        .flags(total | nozero | nonan)
1941        ;
1942    demandHits = SUM_DEMAND(hits);
1943    for (int i = 0; i < system->maxMasters(); i++) {
1944        demandHits.subname(i, system->getMasterName(i));
1945    }
1946
1947    overallHits
1948        .name(name() + ".overall_hits")
1949        .desc("number of overall hits")
1950        .flags(total | nozero | nonan)
1951        ;
1952    overallHits = demandHits + SUM_NON_DEMAND(hits);
1953    for (int i = 0; i < system->maxMasters(); i++) {
1954        overallHits.subname(i, system->getMasterName(i));
1955    }
1956
1957    // Miss statistics
1958    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1959        MemCmd cmd(access_idx);
1960        const string &cstr = cmd.toString();
1961
1962        misses[access_idx]
1963            .init(system->maxMasters())
1964            .name(name() + "." + cstr + "_misses")
1965            .desc("number of " + cstr + " misses")
1966            .flags(total | nozero | nonan)
1967            ;
1968        for (int i = 0; i < system->maxMasters(); i++) {
1969            misses[access_idx].subname(i, system->getMasterName(i));
1970        }
1971    }
1972
1973    demandMisses
1974        .name(name() + ".demand_misses")
1975        .desc("number of demand (read+write) misses")
1976        .flags(total | nozero | nonan)
1977        ;
1978    demandMisses = SUM_DEMAND(misses);
1979    for (int i = 0; i < system->maxMasters(); i++) {
1980        demandMisses.subname(i, system->getMasterName(i));
1981    }
1982
1983    overallMisses
1984        .name(name() + ".overall_misses")
1985        .desc("number of overall misses")
1986        .flags(total | nozero | nonan)
1987        ;
1988    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1989    for (int i = 0; i < system->maxMasters(); i++) {
1990        overallMisses.subname(i, system->getMasterName(i));
1991    }
1992
1993    // Miss latency statistics
1994    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1995        MemCmd cmd(access_idx);
1996        const string &cstr = cmd.toString();
1997
1998        missLatency[access_idx]
1999            .init(system->maxMasters())
2000            .name(name() + "." + cstr + "_miss_latency")
2001            .desc("number of " + cstr + " miss cycles")
2002            .flags(total | nozero | nonan)
2003            ;
2004        for (int i = 0; i < system->maxMasters(); i++) {
2005            missLatency[access_idx].subname(i, system->getMasterName(i));
2006        }
2007    }
2008
2009    demandMissLatency
2010        .name(name() + ".demand_miss_latency")
2011        .desc("number of demand (read+write) miss cycles")
2012        .flags(total | nozero | nonan)
2013        ;
2014    demandMissLatency = SUM_DEMAND(missLatency);
2015    for (int i = 0; i < system->maxMasters(); i++) {
2016        demandMissLatency.subname(i, system->getMasterName(i));
2017    }
2018
2019    overallMissLatency
2020        .name(name() + ".overall_miss_latency")
2021        .desc("number of overall miss cycles")
2022        .flags(total | nozero | nonan)
2023        ;
2024    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
2025    for (int i = 0; i < system->maxMasters(); i++) {
2026        overallMissLatency.subname(i, system->getMasterName(i));
2027    }
2028
2029    // access formulas
2030    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2031        MemCmd cmd(access_idx);
2032        const string &cstr = cmd.toString();
2033
2034        accesses[access_idx]
2035            .name(name() + "." + cstr + "_accesses")
2036            .desc("number of " + cstr + " accesses(hits+misses)")
2037            .flags(total | nozero | nonan)
2038            ;
2039        accesses[access_idx] = hits[access_idx] + misses[access_idx];
2040
2041        for (int i = 0; i < system->maxMasters(); i++) {
2042            accesses[access_idx].subname(i, system->getMasterName(i));
2043        }
2044    }
2045
2046    demandAccesses
2047        .name(name() + ".demand_accesses")
2048        .desc("number of demand (read+write) accesses")
2049        .flags(total | nozero | nonan)
2050        ;
2051    demandAccesses = demandHits + demandMisses;
2052    for (int i = 0; i < system->maxMasters(); i++) {
2053        demandAccesses.subname(i, system->getMasterName(i));
2054    }
2055
2056    overallAccesses
2057        .name(name() + ".overall_accesses")
2058        .desc("number of overall (read+write) accesses")
2059        .flags(total | nozero | nonan)
2060        ;
2061    overallAccesses = overallHits + overallMisses;
2062    for (int i = 0; i < system->maxMasters(); i++) {
2063        overallAccesses.subname(i, system->getMasterName(i));
2064    }
2065
2066    // miss rate formulas
2067    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2068        MemCmd cmd(access_idx);
2069        const string &cstr = cmd.toString();
2070
2071        missRate[access_idx]
2072            .name(name() + "." + cstr + "_miss_rate")
2073            .desc("miss rate for " + cstr + " accesses")
2074            .flags(total | nozero | nonan)
2075            ;
2076        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
2077
2078        for (int i = 0; i < system->maxMasters(); i++) {
2079            missRate[access_idx].subname(i, system->getMasterName(i));
2080        }
2081    }
2082
2083    demandMissRate
2084        .name(name() + ".demand_miss_rate")
2085        .desc("miss rate for demand accesses")
2086        .flags(total | nozero | nonan)
2087        ;
2088    demandMissRate = demandMisses / demandAccesses;
2089    for (int i = 0; i < system->maxMasters(); i++) {
2090        demandMissRate.subname(i, system->getMasterName(i));
2091    }
2092
2093    overallMissRate
2094        .name(name() + ".overall_miss_rate")
2095        .desc("miss rate for overall accesses")
2096        .flags(total | nozero | nonan)
2097        ;
2098    overallMissRate = overallMisses / overallAccesses;
2099    for (int i = 0; i < system->maxMasters(); i++) {
2100        overallMissRate.subname(i, system->getMasterName(i));
2101    }
2102
2103    // miss latency formulas
2104    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2105        MemCmd cmd(access_idx);
2106        const string &cstr = cmd.toString();
2107
2108        avgMissLatency[access_idx]
2109            .name(name() + "." + cstr + "_avg_miss_latency")
2110            .desc("average " + cstr + " miss latency")
2111            .flags(total | nozero | nonan)
2112            ;
2113        avgMissLatency[access_idx] =
2114            missLatency[access_idx] / misses[access_idx];
2115
2116        for (int i = 0; i < system->maxMasters(); i++) {
2117            avgMissLatency[access_idx].subname(i, system->getMasterName(i));
2118        }
2119    }
2120
2121    demandAvgMissLatency
2122        .name(name() + ".demand_avg_miss_latency")
2123        .desc("average overall miss latency")
2124        .flags(total | nozero | nonan)
2125        ;
2126    demandAvgMissLatency = demandMissLatency / demandMisses;
2127    for (int i = 0; i < system->maxMasters(); i++) {
2128        demandAvgMissLatency.subname(i, system->getMasterName(i));
2129    }
2130
2131    overallAvgMissLatency
2132        .name(name() + ".overall_avg_miss_latency")
2133        .desc("average overall miss latency")
2134        .flags(total | nozero | nonan)
2135        ;
2136    overallAvgMissLatency = overallMissLatency / overallMisses;
2137    for (int i = 0; i < system->maxMasters(); i++) {
2138        overallAvgMissLatency.subname(i, system->getMasterName(i));
2139    }
2140
2141    blocked_cycles.init(NUM_BLOCKED_CAUSES);
2142    blocked_cycles
2143        .name(name() + ".blocked_cycles")
2144        .desc("number of cycles access was blocked")
2145        .subname(Blocked_NoMSHRs, "no_mshrs")
2146        .subname(Blocked_NoTargets, "no_targets")
2147        ;
2148
2149
2150    blocked_causes.init(NUM_BLOCKED_CAUSES);
2151    blocked_causes
2152        .name(name() + ".blocked")
2153        .desc("number of cycles access was blocked")
2154        .subname(Blocked_NoMSHRs, "no_mshrs")
2155        .subname(Blocked_NoTargets, "no_targets")
2156        ;
2157
2158    avg_blocked
2159        .name(name() + ".avg_blocked_cycles")
2160        .desc("average number of cycles each access was blocked")
2161        .subname(Blocked_NoMSHRs, "no_mshrs")
2162        .subname(Blocked_NoTargets, "no_targets")
2163        ;
2164
2165    avg_blocked = blocked_cycles / blocked_causes;
2166
2167    unusedPrefetches
2168        .name(name() + ".unused_prefetches")
2169        .desc("number of HardPF blocks evicted w/o reference")
2170        .flags(nozero)
2171        ;
2172
2173    writebacks
2174        .init(system->maxMasters())
2175        .name(name() + ".writebacks")
2176        .desc("number of writebacks")
2177        .flags(total | nozero | nonan)
2178        ;
2179    for (int i = 0; i < system->maxMasters(); i++) {
2180        writebacks.subname(i, system->getMasterName(i));
2181    }
2182
2183    // MSHR statistics
2184    // MSHR hit statistics
2185    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2186        MemCmd cmd(access_idx);
2187        const string &cstr = cmd.toString();
2188
2189        mshr_hits[access_idx]
2190            .init(system->maxMasters())
2191            .name(name() + "." + cstr + "_mshr_hits")
2192            .desc("number of " + cstr + " MSHR hits")
2193            .flags(total | nozero | nonan)
2194            ;
2195        for (int i = 0; i < system->maxMasters(); i++) {
2196            mshr_hits[access_idx].subname(i, system->getMasterName(i));
2197        }
2198    }
2199
2200    demandMshrHits
2201        .name(name() + ".demand_mshr_hits")
2202        .desc("number of demand (read+write) MSHR hits")
2203        .flags(total | nozero | nonan)
2204        ;
2205    demandMshrHits = SUM_DEMAND(mshr_hits);
2206    for (int i = 0; i < system->maxMasters(); i++) {
2207        demandMshrHits.subname(i, system->getMasterName(i));
2208    }
2209
2210    overallMshrHits
2211        .name(name() + ".overall_mshr_hits")
2212        .desc("number of overall MSHR hits")
2213        .flags(total | nozero | nonan)
2214        ;
2215    overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
2216    for (int i = 0; i < system->maxMasters(); i++) {
2217        overallMshrHits.subname(i, system->getMasterName(i));
2218    }
2219
2220    // MSHR miss statistics
2221    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2222        MemCmd cmd(access_idx);
2223        const string &cstr = cmd.toString();
2224
2225        mshr_misses[access_idx]
2226            .init(system->maxMasters())
2227            .name(name() + "." + cstr + "_mshr_misses")
2228            .desc("number of " + cstr + " MSHR misses")
2229            .flags(total | nozero | nonan)
2230            ;
2231        for (int i = 0; i < system->maxMasters(); i++) {
2232            mshr_misses[access_idx].subname(i, system->getMasterName(i));
2233        }
2234    }
2235
2236    demandMshrMisses
2237        .name(name() + ".demand_mshr_misses")
2238        .desc("number of demand (read+write) MSHR misses")
2239        .flags(total | nozero | nonan)
2240        ;
2241    demandMshrMisses = SUM_DEMAND(mshr_misses);
2242    for (int i = 0; i < system->maxMasters(); i++) {
2243        demandMshrMisses.subname(i, system->getMasterName(i));
2244    }
2245
2246    overallMshrMisses
2247        .name(name() + ".overall_mshr_misses")
2248        .desc("number of overall MSHR misses")
2249        .flags(total | nozero | nonan)
2250        ;
2251    overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
2252    for (int i = 0; i < system->maxMasters(); i++) {
2253        overallMshrMisses.subname(i, system->getMasterName(i));
2254    }
2255
2256    // MSHR miss latency statistics
2257    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2258        MemCmd cmd(access_idx);
2259        const string &cstr = cmd.toString();
2260
2261        mshr_miss_latency[access_idx]
2262            .init(system->maxMasters())
2263            .name(name() + "." + cstr + "_mshr_miss_latency")
2264            .desc("number of " + cstr + " MSHR miss cycles")
2265            .flags(total | nozero | nonan)
2266            ;
2267        for (int i = 0; i < system->maxMasters(); i++) {
2268            mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
2269        }
2270    }
2271
2272    demandMshrMissLatency
2273        .name(name() + ".demand_mshr_miss_latency")
2274        .desc("number of demand (read+write) MSHR miss cycles")
2275        .flags(total | nozero | nonan)
2276        ;
2277    demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2278    for (int i = 0; i < system->maxMasters(); i++) {
2279        demandMshrMissLatency.subname(i, system->getMasterName(i));
2280    }
2281
2282    overallMshrMissLatency
2283        .name(name() + ".overall_mshr_miss_latency")
2284        .desc("number of overall MSHR miss cycles")
2285        .flags(total | nozero | nonan)
2286        ;
2287    overallMshrMissLatency =
2288        demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2289    for (int i = 0; i < system->maxMasters(); i++) {
2290        overallMshrMissLatency.subname(i, system->getMasterName(i));
2291    }
2292
2293    // MSHR uncacheable statistics
2294    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2295        MemCmd cmd(access_idx);
2296        const string &cstr = cmd.toString();
2297
2298        mshr_uncacheable[access_idx]
2299            .init(system->maxMasters())
2300            .name(name() + "." + cstr + "_mshr_uncacheable")
2301            .desc("number of " + cstr + " MSHR uncacheable")
2302            .flags(total | nozero | nonan)
2303            ;
2304        for (int i = 0; i < system->maxMasters(); i++) {
2305            mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2306        }
2307    }
2308
2309    overallMshrUncacheable
2310        .name(name() + ".overall_mshr_uncacheable_misses")
2311        .desc("number of overall MSHR uncacheable misses")
2312        .flags(total | nozero | nonan)
2313        ;
2314    overallMshrUncacheable =
2315        SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2316    for (int i = 0; i < system->maxMasters(); i++) {
2317        overallMshrUncacheable.subname(i, system->getMasterName(i));
2318    }
2319
2320    // MSHR miss latency statistics
2321    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2322        MemCmd cmd(access_idx);
2323        const string &cstr = cmd.toString();
2324
2325        mshr_uncacheable_lat[access_idx]
2326            .init(system->maxMasters())
2327            .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2328            .desc("number of " + cstr + " MSHR uncacheable cycles")
2329            .flags(total | nozero | nonan)
2330            ;
2331        for (int i = 0; i < system->maxMasters(); i++) {
2332            mshr_uncacheable_lat[access_idx].subname(
2333                i, system->getMasterName(i));
2334        }
2335    }
2336
2337    overallMshrUncacheableLatency
2338        .name(name() + ".overall_mshr_uncacheable_latency")
2339        .desc("number of overall MSHR uncacheable cycles")
2340        .flags(total | nozero | nonan)
2341        ;
2342    overallMshrUncacheableLatency =
2343        SUM_DEMAND(mshr_uncacheable_lat) +
2344        SUM_NON_DEMAND(mshr_uncacheable_lat);
2345    for (int i = 0; i < system->maxMasters(); i++) {
2346        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2347    }
2348
2349    // MSHR miss rate formulas
2350    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2351        MemCmd cmd(access_idx);
2352        const string &cstr = cmd.toString();
2353
2354        mshrMissRate[access_idx]
2355            .name(name() + "." + cstr + "_mshr_miss_rate")
2356            .desc("mshr miss rate for " + cstr + " accesses")
2357            .flags(total | nozero | nonan)
2358            ;
2359        mshrMissRate[access_idx] =
2360            mshr_misses[access_idx] / accesses[access_idx];
2361
2362        for (int i = 0; i < system->maxMasters(); i++) {
2363            mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2364        }
2365    }
2366
2367    demandMshrMissRate
2368        .name(name() + ".demand_mshr_miss_rate")
2369        .desc("mshr miss rate for demand accesses")
2370        .flags(total | nozero | nonan)
2371        ;
2372    demandMshrMissRate = demandMshrMisses / demandAccesses;
2373    for (int i = 0; i < system->maxMasters(); i++) {
2374        demandMshrMissRate.subname(i, system->getMasterName(i));
2375    }
2376
2377    overallMshrMissRate
2378        .name(name() + ".overall_mshr_miss_rate")
2379        .desc("mshr miss rate for overall accesses")
2380        .flags(total | nozero | nonan)
2381        ;
2382    overallMshrMissRate = overallMshrMisses / overallAccesses;
2383    for (int i = 0; i < system->maxMasters(); i++) {
2384        overallMshrMissRate.subname(i, system->getMasterName(i));
2385    }
2386
2387    // mshrMiss latency formulas
2388    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2389        MemCmd cmd(access_idx);
2390        const string &cstr = cmd.toString();
2391
2392        avgMshrMissLatency[access_idx]
2393            .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2394            .desc("average " + cstr + " mshr miss latency")
2395            .flags(total | nozero | nonan)
2396            ;
2397        avgMshrMissLatency[access_idx] =
2398            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2399
2400        for (int i = 0; i < system->maxMasters(); i++) {
2401            avgMshrMissLatency[access_idx].subname(
2402                i, system->getMasterName(i));
2403        }
2404    }
2405
2406    demandAvgMshrMissLatency
2407        .name(name() + ".demand_avg_mshr_miss_latency")
2408        .desc("average overall mshr miss latency")
2409        .flags(total | nozero | nonan)
2410        ;
2411    demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2412    for (int i = 0; i < system->maxMasters(); i++) {
2413        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2414    }
2415
2416    overallAvgMshrMissLatency
2417        .name(name() + ".overall_avg_mshr_miss_latency")
2418        .desc("average overall mshr miss latency")
2419        .flags(total | nozero | nonan)
2420        ;
2421    overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2422    for (int i = 0; i < system->maxMasters(); i++) {
2423        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2424    }
2425
2426    // mshrUncacheable latency formulas
2427    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2428        MemCmd cmd(access_idx);
2429        const string &cstr = cmd.toString();
2430
2431        avgMshrUncacheableLatency[access_idx]
2432            .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2433            .desc("average " + cstr + " mshr uncacheable latency")
2434            .flags(total | nozero | nonan)
2435            ;
2436        avgMshrUncacheableLatency[access_idx] =
2437            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2438
2439        for (int i = 0; i < system->maxMasters(); i++) {
2440            avgMshrUncacheableLatency[access_idx].subname(
2441                i, system->getMasterName(i));
2442        }
2443    }
2444
2445    overallAvgMshrUncacheableLatency
2446        .name(name() + ".overall_avg_mshr_uncacheable_latency")
2447        .desc("average overall mshr uncacheable latency")
2448        .flags(total | nozero | nonan)
2449        ;
2450    overallAvgMshrUncacheableLatency =
2451        overallMshrUncacheableLatency / overallMshrUncacheable;
2452    for (int i = 0; i < system->maxMasters(); i++) {
2453        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2454    }
2455
2456    replacements
2457        .name(name() + ".replacements")
2458        .desc("number of replacements")
2459        ;
2460
2461    dataExpansions
2462        .name(name() + ".data_expansions")
2463        .desc("number of data expansions")
2464        .flags(nozero | nonan)
2465        ;
2466}
2467
2468void
2469BaseCache::regProbePoints()
2470{
2471    ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2472    ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2473    ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2474}
2475
2476///////////////
2477//
2478// CpuSidePort
2479//
2480///////////////
2481bool
2482BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2483{
2484    // Snoops shouldn't happen when bypassing caches
2485    assert(!cache->system->bypassCaches());
2486
2487    assert(pkt->isResponse());
2488
2489    // Express snoop responses from master to slave, e.g., from L1 to L2
2490    cache->recvTimingSnoopResp(pkt);
2491    return true;
2492}
2493
2494
2495bool
2496BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2497{
2498    if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2499        // always let express snoop packets through even if blocked
2500        return true;
2501    } else if (blocked || mustSendRetry) {
2502        // either already committed to send a retry, or blocked
2503        mustSendRetry = true;
2504        return false;
2505    }
2506    mustSendRetry = false;
2507    return true;
2508}
2509
2510bool
2511BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2512{
2513    assert(pkt->isRequest());
2514
2515    if (cache->system->bypassCaches()) {
2516        // Just forward the packet if caches are disabled.
2517        // @todo This should really enqueue the packet rather
2518        bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2519        assert(success);
2520        return true;
2521    } else if (tryTiming(pkt)) {
2522        cache->recvTimingReq(pkt);
2523        return true;
2524    }
2525    return false;
2526}
2527
2528Tick
2529BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2530{
2531    if (cache->system->bypassCaches()) {
2532        // Forward the request if the system is in cache bypass mode.
2533        return cache->memSidePort.sendAtomic(pkt);
2534    } else {
2535        return cache->recvAtomic(pkt);
2536    }
2537}
2538
2539void
2540BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2541{
2542    if (cache->system->bypassCaches()) {
2543        // The cache should be flushed if we are in cache bypass mode,
2544        // so we don't need to check if we need to update anything.
2545        cache->memSidePort.sendFunctional(pkt);
2546        return;
2547    }
2548
2549    // functional request
2550    cache->functionalAccess(pkt, true);
2551}
2552
2553AddrRangeList
2554BaseCache::CpuSidePort::getAddrRanges() const
2555{
2556    return cache->getAddrRanges();
2557}
2558
2559
2560BaseCache::
2561CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2562                         const std::string &_label)
2563    : CacheSlavePort(_name, _cache, _label), cache(_cache)
2564{
2565}
2566
2567///////////////
2568//
2569// MemSidePort
2570//
2571///////////////
2572bool
2573BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2574{
2575    cache->recvTimingResp(pkt);
2576    return true;
2577}
2578
2579// Express snooping requests to memside port
2580void
2581BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2582{
2583    // Snoops shouldn't happen when bypassing caches
2584    assert(!cache->system->bypassCaches());
2585
2586    // handle snooping requests
2587    cache->recvTimingSnoopReq(pkt);
2588}
2589
2590Tick
2591BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2592{
2593    // Snoops shouldn't happen when bypassing caches
2594    assert(!cache->system->bypassCaches());
2595
2596    return cache->recvAtomicSnoop(pkt);
2597}
2598
2599void
2600BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2601{
2602    // Snoops shouldn't happen when bypassing caches
2603    assert(!cache->system->bypassCaches());
2604
2605    // functional snoop (note that in contrast to atomic we don't have
2606    // a specific functionalSnoop method, as they have the same
2607    // behaviour regardless)
2608    cache->functionalAccess(pkt, false);
2609}
2610
2611void
2612BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2613{
2614    // sanity check
2615    assert(!waitingOnRetry);
2616
2617    // there should never be any deferred request packets in the
2618    // queue, instead we resly on the cache to provide the packets
2619    // from the MSHR queue or write queue
2620    assert(deferredPacketReadyTime() == MaxTick);
2621
2622    // check for request packets (requests & writebacks)
2623    QueueEntry* entry = cache.getNextQueueEntry();
2624
2625    if (!entry) {
2626        // can happen if e.g. we attempt a writeback and fail, but
2627        // before the retry, the writeback is eliminated because
2628        // we snoop another cache's ReadEx.
2629    } else {
2630        // let our snoop responses go first if there are responses to
2631        // the same addresses
2632        if (checkConflictingSnoop(entry->getTarget()->pkt)) {
2633            return;
2634        }
2635        waitingOnRetry = entry->sendPacket(cache);
2636    }
2637
2638    // if we succeeded and are not waiting for a retry, schedule the
2639    // next send considering when the next queue is ready, note that
2640    // snoop responses have their own packet queue and thus schedule
2641    // their own events
2642    if (!waitingOnRetry) {
2643        schedSendEvent(cache.nextQueueReadyTime());
2644    }
2645}
2646
2647BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2648                                    BaseCache *_cache,
2649                                    const std::string &_label)
2650    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2651      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2652      _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2653{
2654}
2655
2656void
2657WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2658                           Addr blk_addr)
2659{
2660    // check if we are continuing where the last write ended
2661    if (nextAddr == write_addr) {
2662        delayCtr[blk_addr] = delayThreshold;
2663        // stop if we have already saturated
2664        if (mode != WriteMode::NO_ALLOCATE) {
2665            byteCount += write_size;
2666            // switch to streaming mode if we have passed the lower
2667            // threshold
2668            if (mode == WriteMode::ALLOCATE &&
2669                byteCount > coalesceLimit) {
2670                mode = WriteMode::COALESCE;
2671                DPRINTF(Cache, "Switched to write coalescing\n");
2672            } else if (mode == WriteMode::COALESCE &&
2673                       byteCount > noAllocateLimit) {
2674                // and continue and switch to non-allocating mode if we
2675                // pass the upper threshold
2676                mode = WriteMode::NO_ALLOCATE;
2677                DPRINTF(Cache, "Switched to write-no-allocate\n");
2678            }
2679        }
2680    } else {
2681        // we did not see a write matching the previous one, start
2682        // over again
2683        byteCount = write_size;
2684        mode = WriteMode::ALLOCATE;
2685        resetDelay(blk_addr);
2686    }
2687    nextAddr = write_addr + write_size;
2688}
2689
2690WriteAllocator*
2691WriteAllocatorParams::create()
2692{
2693    return new WriteAllocator(this);
2694}
2695