base.cc revision 14118:3d2ee7721eb0
1/*
2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CacheComp.hh"
55#include "debug/CachePort.hh"
56#include "debug/CacheRepl.hh"
57#include "debug/CacheVerbose.hh"
58#include "mem/cache/compressors/base.hh"
59#include "mem/cache/mshr.hh"
60#include "mem/cache/prefetch/base.hh"
61#include "mem/cache/queue_entry.hh"
62#include "mem/cache/tags/super_blk.hh"
63#include "params/BaseCache.hh"
64#include "params/WriteAllocator.hh"
65#include "sim/core.hh"
66
67class BaseMasterPort;
68class BaseSlavePort;
69
70using namespace std;
71
72BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
73                                          BaseCache *_cache,
74                                          const std::string &_label)
75    : QueuedSlavePort(_name, _cache, queue),
76      queue(*_cache, *this, true, _label),
77      blocked(false), mustSendRetry(false),
78      sendRetryEvent([this]{ processSendRetry(); }, _name)
79{
80}
81
82BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
83    : ClockedObject(p),
84      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
85      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
86      mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
87      writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
88      tags(p->tags),
89      compressor(p->compressor),
90      prefetcher(p->prefetcher),
91      writeAllocator(p->write_allocator),
92      writebackClean(p->writeback_clean),
93      tempBlockWriteback(nullptr),
94      writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
95                                    name(), false,
96                                    EventBase::Delayed_Writeback_Pri),
97      blkSize(blk_size),
98      lookupLatency(p->tag_latency),
99      dataLatency(p->data_latency),
100      forwardLatency(p->tag_latency),
101      fillLatency(p->data_latency),
102      responseLatency(p->response_latency),
103      sequentialAccess(p->sequential_access),
104      numTarget(p->tgts_per_mshr),
105      forwardSnoops(true),
106      clusivity(p->clusivity),
107      isReadOnly(p->is_read_only),
108      blocked(0),
109      order(0),
110      noTargetMSHR(nullptr),
111      missCount(p->max_miss_count),
112      addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
113      system(p->system)
114{
115    // the MSHR queue has no reserve entries as we check the MSHR
116    // queue on every single allocation, whereas the write queue has
117    // as many reserve entries as we have MSHRs, since every MSHR may
118    // eventually require a writeback, and we do not check the write
119    // buffer before committing to an MSHR
120
121    // forward snoops is overridden in init() once we can query
122    // whether the connected master is actually snooping or not
123
124    tempBlock = new TempCacheBlk(blkSize);
125
126    tags->tagsInit();
127    if (prefetcher)
128        prefetcher->setCache(this);
129}
130
131BaseCache::~BaseCache()
132{
133    delete tempBlock;
134}
135
136void
137BaseCache::CacheSlavePort::setBlocked()
138{
139    assert(!blocked);
140    DPRINTF(CachePort, "Port is blocking new requests\n");
141    blocked = true;
142    // if we already scheduled a retry in this cycle, but it has not yet
143    // happened, cancel it
144    if (sendRetryEvent.scheduled()) {
145        owner.deschedule(sendRetryEvent);
146        DPRINTF(CachePort, "Port descheduled retry\n");
147        mustSendRetry = true;
148    }
149}
150
151void
152BaseCache::CacheSlavePort::clearBlocked()
153{
154    assert(blocked);
155    DPRINTF(CachePort, "Port is accepting new requests\n");
156    blocked = false;
157    if (mustSendRetry) {
158        // @TODO: need to find a better time (next cycle?)
159        owner.schedule(sendRetryEvent, curTick() + 1);
160    }
161}
162
163void
164BaseCache::CacheSlavePort::processSendRetry()
165{
166    DPRINTF(CachePort, "Port is sending retry\n");
167
168    // reset the flag and call retry
169    mustSendRetry = false;
170    sendRetryReq();
171}
172
173Addr
174BaseCache::regenerateBlkAddr(CacheBlk* blk)
175{
176    if (blk != tempBlock) {
177        return tags->regenerateBlkAddr(blk);
178    } else {
179        return tempBlock->getAddr();
180    }
181}
182
183void
184BaseCache::init()
185{
186    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
187        fatal("Cache ports on %s are not connected\n", name());
188    cpuSidePort.sendRangeChange();
189    forwardSnoops = cpuSidePort.isSnooping();
190}
191
192Port &
193BaseCache::getPort(const std::string &if_name, PortID idx)
194{
195    if (if_name == "mem_side") {
196        return memSidePort;
197    } else if (if_name == "cpu_side") {
198        return cpuSidePort;
199    }  else {
200        return ClockedObject::getPort(if_name, idx);
201    }
202}
203
204bool
205BaseCache::inRange(Addr addr) const
206{
207    for (const auto& r : addrRanges) {
208        if (r.contains(addr)) {
209            return true;
210       }
211    }
212    return false;
213}
214
215void
216BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
217{
218    if (pkt->needsResponse()) {
219        // These delays should have been consumed by now
220        assert(pkt->headerDelay == 0);
221        assert(pkt->payloadDelay == 0);
222
223        pkt->makeTimingResponse();
224
225        // In this case we are considering request_time that takes
226        // into account the delay of the xbar, if any, and just
227        // lat, neglecting responseLatency, modelling hit latency
228        // just as the value of lat overriden by access(), which calls
229        // the calculateAccessLatency() function.
230        cpuSidePort.schedTimingResp(pkt, request_time);
231    } else {
232        DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
233                pkt->print());
234
235        // queue the packet for deletion, as the sending cache is
236        // still relying on it; if the block is found in access(),
237        // CleanEvict and Writeback messages will be deleted
238        // here as well
239        pendingDelete.reset(pkt);
240    }
241}
242
243void
244BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
245                               Tick forward_time, Tick request_time)
246{
247    if (writeAllocator &&
248        pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
249        writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
250                                   pkt->getBlockAddr(blkSize));
251    }
252
253    if (mshr) {
254        /// MSHR hit
255        /// @note writebacks will be checked in getNextMSHR()
256        /// for any conflicting requests to the same block
257
258        //@todo remove hw_pf here
259
260        // Coalesce unless it was a software prefetch (see above).
261        if (pkt) {
262            assert(!pkt->isWriteback());
263            // CleanEvicts corresponding to blocks which have
264            // outstanding requests in MSHRs are simply sunk here
265            if (pkt->cmd == MemCmd::CleanEvict) {
266                pendingDelete.reset(pkt);
267            } else if (pkt->cmd == MemCmd::WriteClean) {
268                // A WriteClean should never coalesce with any
269                // outstanding cache maintenance requests.
270
271                // We use forward_time here because there is an
272                // uncached memory write, forwarded to WriteBuffer.
273                allocateWriteBuffer(pkt, forward_time);
274            } else {
275                DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
276                        pkt->print());
277
278                assert(pkt->req->masterId() < system->maxMasters());
279                mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
280
281                // We use forward_time here because it is the same
282                // considering new targets. We have multiple
283                // requests for the same address here. It
284                // specifies the latency to allocate an internal
285                // buffer and to schedule an event to the queued
286                // port and also takes into account the additional
287                // delay of the xbar.
288                mshr->allocateTarget(pkt, forward_time, order++,
289                                     allocOnFill(pkt->cmd));
290                if (mshr->getNumTargets() == numTarget) {
291                    noTargetMSHR = mshr;
292                    setBlocked(Blocked_NoTargets);
293                    // need to be careful with this... if this mshr isn't
294                    // ready yet (i.e. time > curTick()), we don't want to
295                    // move it ahead of mshrs that are ready
296                    // mshrQueue.moveToFront(mshr);
297                }
298            }
299        }
300    } else {
301        // no MSHR
302        assert(pkt->req->masterId() < system->maxMasters());
303        mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
304
305        if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
306            // We use forward_time here because there is an
307            // writeback or writeclean, forwarded to WriteBuffer.
308            allocateWriteBuffer(pkt, forward_time);
309        } else {
310            if (blk && blk->isValid()) {
311                // If we have a write miss to a valid block, we
312                // need to mark the block non-readable.  Otherwise
313                // if we allow reads while there's an outstanding
314                // write miss, the read could return stale data
315                // out of the cache block... a more aggressive
316                // system could detect the overlap (if any) and
317                // forward data out of the MSHRs, but we don't do
318                // that yet.  Note that we do need to leave the
319                // block valid so that it stays in the cache, in
320                // case we get an upgrade response (and hence no
321                // new data) when the write miss completes.
322                // As long as CPUs do proper store/load forwarding
323                // internally, and have a sufficiently weak memory
324                // model, this is probably unnecessary, but at some
325                // point it must have seemed like we needed it...
326                assert((pkt->needsWritable() && !blk->isWritable()) ||
327                       pkt->req->isCacheMaintenance());
328                blk->status &= ~BlkReadable;
329            }
330            // Here we are using forward_time, modelling the latency of
331            // a miss (outbound) just as forwardLatency, neglecting the
332            // lookupLatency component.
333            allocateMissBuffer(pkt, forward_time);
334        }
335    }
336}
337
338void
339BaseCache::recvTimingReq(PacketPtr pkt)
340{
341    // anything that is merely forwarded pays for the forward latency and
342    // the delay provided by the crossbar
343    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
344
345    Cycles lat;
346    CacheBlk *blk = nullptr;
347    bool satisfied = false;
348    {
349        PacketList writebacks;
350        // Note that lat is passed by reference here. The function
351        // access() will set the lat value.
352        satisfied = access(pkt, blk, lat, writebacks);
353
354        // After the evicted blocks are selected, they must be forwarded
355        // to the write buffer to ensure they logically precede anything
356        // happening below
357        doWritebacks(writebacks, clockEdge(lat + forwardLatency));
358    }
359
360    // Here we charge the headerDelay that takes into account the latencies
361    // of the bus, if the packet comes from it.
362    // The latency charged is just the value set by the access() function.
363    // In case of a hit we are neglecting response latency.
364    // In case of a miss we are neglecting forward latency.
365    Tick request_time = clockEdge(lat);
366    // Here we reset the timing of the packet.
367    pkt->headerDelay = pkt->payloadDelay = 0;
368
369    if (satisfied) {
370        // notify before anything else as later handleTimingReqHit might turn
371        // the packet in a response
372        ppHit->notify(pkt);
373
374        if (prefetcher && blk && blk->wasPrefetched()) {
375            blk->status &= ~BlkHWPrefetched;
376        }
377
378        handleTimingReqHit(pkt, blk, request_time);
379    } else {
380        handleTimingReqMiss(pkt, blk, forward_time, request_time);
381
382        ppMiss->notify(pkt);
383    }
384
385    if (prefetcher) {
386        // track time of availability of next prefetch, if any
387        Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
388        if (next_pf_time != MaxTick) {
389            schedMemSideSendEvent(next_pf_time);
390        }
391    }
392}
393
394void
395BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
396{
397    Tick completion_time = clockEdge(responseLatency) +
398        pkt->headerDelay + pkt->payloadDelay;
399
400    // Reset the bus additional time as it is now accounted for
401    pkt->headerDelay = pkt->payloadDelay = 0;
402
403    cpuSidePort.schedTimingResp(pkt, completion_time);
404}
405
406void
407BaseCache::recvTimingResp(PacketPtr pkt)
408{
409    assert(pkt->isResponse());
410
411    // all header delay should be paid for by the crossbar, unless
412    // this is a prefetch response from above
413    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
414             "%s saw a non-zero packet delay\n", name());
415
416    const bool is_error = pkt->isError();
417
418    if (is_error) {
419        DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
420                pkt->print());
421    }
422
423    DPRINTF(Cache, "%s: Handling response %s\n", __func__,
424            pkt->print());
425
426    // if this is a write, we should be looking at an uncacheable
427    // write
428    if (pkt->isWrite()) {
429        assert(pkt->req->isUncacheable());
430        handleUncacheableWriteResp(pkt);
431        return;
432    }
433
434    // we have dealt with any (uncacheable) writes above, from here on
435    // we know we are dealing with an MSHR due to a miss or a prefetch
436    MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
437    assert(mshr);
438
439    if (mshr == noTargetMSHR) {
440        // we always clear at least one target
441        clearBlocked(Blocked_NoTargets);
442        noTargetMSHR = nullptr;
443    }
444
445    // Initial target is used just for stats
446    QueueEntry::Target *initial_tgt = mshr->getTarget();
447    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
448    Tick miss_latency = curTick() - initial_tgt->recvTime;
449
450    if (pkt->req->isUncacheable()) {
451        assert(pkt->req->masterId() < system->maxMasters());
452        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
453            miss_latency;
454    } else {
455        assert(pkt->req->masterId() < system->maxMasters());
456        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
457            miss_latency;
458    }
459
460    PacketList writebacks;
461
462    bool is_fill = !mshr->isForward &&
463        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
464         mshr->wasWholeLineWrite);
465
466    // make sure that if the mshr was due to a whole line write then
467    // the response is an invalidation
468    assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
469
470    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
471
472    if (is_fill && !is_error) {
473        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
474                pkt->getAddr());
475
476        const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
477            writeAllocator->allocate() : mshr->allocOnFill();
478        blk = handleFill(pkt, blk, writebacks, allocate);
479        assert(blk != nullptr);
480        ppFill->notify(pkt);
481    }
482
483    if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
484        // The block was marked not readable while there was a pending
485        // cache maintenance operation, restore its flag.
486        blk->status |= BlkReadable;
487
488        // This was a cache clean operation (without invalidate)
489        // and we have a copy of the block already. Since there
490        // is no invalidation, we can promote targets that don't
491        // require a writable copy
492        mshr->promoteReadable();
493    }
494
495    if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
496        // If at this point the referenced block is writable and the
497        // response is not a cache invalidate, we promote targets that
498        // were deferred as we couldn't guarrantee a writable copy
499        mshr->promoteWritable();
500    }
501
502    serviceMSHRTargets(mshr, pkt, blk);
503
504    if (mshr->promoteDeferredTargets()) {
505        // avoid later read getting stale data while write miss is
506        // outstanding.. see comment in timingAccess()
507        if (blk) {
508            blk->status &= ~BlkReadable;
509        }
510        mshrQueue.markPending(mshr);
511        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
512    } else {
513        // while we deallocate an mshr from the queue we still have to
514        // check the isFull condition before and after as we might
515        // have been using the reserved entries already
516        const bool was_full = mshrQueue.isFull();
517        mshrQueue.deallocate(mshr);
518        if (was_full && !mshrQueue.isFull()) {
519            clearBlocked(Blocked_NoMSHRs);
520        }
521
522        // Request the bus for a prefetch if this deallocation freed enough
523        // MSHRs for a prefetch to take place
524        if (prefetcher && mshrQueue.canPrefetch()) {
525            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
526                                         clockEdge());
527            if (next_pf_time != MaxTick)
528                schedMemSideSendEvent(next_pf_time);
529        }
530    }
531
532    // if we used temp block, check to see if its valid and then clear it out
533    if (blk == tempBlock && tempBlock->isValid()) {
534        evictBlock(blk, writebacks);
535    }
536
537    const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
538    // copy writebacks to write buffer
539    doWritebacks(writebacks, forward_time);
540
541    DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
542    delete pkt;
543}
544
545
546Tick
547BaseCache::recvAtomic(PacketPtr pkt)
548{
549    // should assert here that there are no outstanding MSHRs or
550    // writebacks... that would mean that someone used an atomic
551    // access in timing mode
552
553    // We use lookupLatency here because it is used to specify the latency
554    // to access.
555    Cycles lat = lookupLatency;
556
557    CacheBlk *blk = nullptr;
558    PacketList writebacks;
559    bool satisfied = access(pkt, blk, lat, writebacks);
560
561    if (pkt->isClean() && blk && blk->isDirty()) {
562        // A cache clean opearation is looking for a dirty
563        // block. If a dirty block is encountered a WriteClean
564        // will update any copies to the path to the memory
565        // until the point of reference.
566        DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
567                __func__, pkt->print(), blk->print());
568        PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
569        writebacks.push_back(wb_pkt);
570        pkt->setSatisfied();
571    }
572
573    // handle writebacks resulting from the access here to ensure they
574    // logically precede anything happening below
575    doWritebacksAtomic(writebacks);
576    assert(writebacks.empty());
577
578    if (!satisfied) {
579        lat += handleAtomicReqMiss(pkt, blk, writebacks);
580    }
581
582    // Note that we don't invoke the prefetcher at all in atomic mode.
583    // It's not clear how to do it properly, particularly for
584    // prefetchers that aggressively generate prefetch candidates and
585    // rely on bandwidth contention to throttle them; these will tend
586    // to pollute the cache in atomic mode since there is no bandwidth
587    // contention.  If we ever do want to enable prefetching in atomic
588    // mode, though, this is the place to do it... see timingAccess()
589    // for an example (though we'd want to issue the prefetch(es)
590    // immediately rather than calling requestMemSideBus() as we do
591    // there).
592
593    // do any writebacks resulting from the response handling
594    doWritebacksAtomic(writebacks);
595
596    // if we used temp block, check to see if its valid and if so
597    // clear it out, but only do so after the call to recvAtomic is
598    // finished so that any downstream observers (such as a snoop
599    // filter), first see the fill, and only then see the eviction
600    if (blk == tempBlock && tempBlock->isValid()) {
601        // the atomic CPU calls recvAtomic for fetch and load/store
602        // sequentuially, and we may already have a tempBlock
603        // writeback from the fetch that we have not yet sent
604        if (tempBlockWriteback) {
605            // if that is the case, write the prevoius one back, and
606            // do not schedule any new event
607            writebackTempBlockAtomic();
608        } else {
609            // the writeback/clean eviction happens after the call to
610            // recvAtomic has finished (but before any successive
611            // calls), so that the response handling from the fill is
612            // allowed to happen first
613            schedule(writebackTempBlockAtomicEvent, curTick());
614        }
615
616        tempBlockWriteback = evictBlock(blk);
617    }
618
619    if (pkt->needsResponse()) {
620        pkt->makeAtomicResponse();
621    }
622
623    return lat * clockPeriod();
624}
625
626void
627BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
628{
629    Addr blk_addr = pkt->getBlockAddr(blkSize);
630    bool is_secure = pkt->isSecure();
631    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
632    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
633
634    pkt->pushLabel(name());
635
636    CacheBlkPrintWrapper cbpw(blk);
637
638    // Note that just because an L2/L3 has valid data doesn't mean an
639    // L1 doesn't have a more up-to-date modified copy that still
640    // needs to be found.  As a result we always update the request if
641    // we have it, but only declare it satisfied if we are the owner.
642
643    // see if we have data at all (owned or otherwise)
644    bool have_data = blk && blk->isValid()
645        && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
646                                     blk->data);
647
648    // data we have is dirty if marked as such or if we have an
649    // in-service MSHR that is pending a modified line
650    bool have_dirty =
651        have_data && (blk->isDirty() ||
652                      (mshr && mshr->inService && mshr->isPendingModified()));
653
654    bool done = have_dirty ||
655        cpuSidePort.trySatisfyFunctional(pkt) ||
656        mshrQueue.trySatisfyFunctional(pkt) ||
657        writeBuffer.trySatisfyFunctional(pkt) ||
658        memSidePort.trySatisfyFunctional(pkt);
659
660    DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__,  pkt->print(),
661            (blk && blk->isValid()) ? "valid " : "",
662            have_data ? "data " : "", done ? "done " : "");
663
664    // We're leaving the cache, so pop cache->name() label
665    pkt->popLabel();
666
667    if (done) {
668        pkt->makeResponse();
669    } else {
670        // if it came as a request from the CPU side then make sure it
671        // continues towards the memory side
672        if (from_cpu_side) {
673            memSidePort.sendFunctional(pkt);
674        } else if (cpuSidePort.isSnooping()) {
675            // if it came from the memory side, it must be a snoop request
676            // and we should only forward it if we are forwarding snoops
677            cpuSidePort.sendFunctionalSnoop(pkt);
678        }
679    }
680}
681
682
683void
684BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
685{
686    assert(pkt->isRequest());
687
688    uint64_t overwrite_val;
689    bool overwrite_mem;
690    uint64_t condition_val64;
691    uint32_t condition_val32;
692
693    int offset = pkt->getOffset(blkSize);
694    uint8_t *blk_data = blk->data + offset;
695
696    assert(sizeof(uint64_t) >= pkt->getSize());
697
698    overwrite_mem = true;
699    // keep a copy of our possible write value, and copy what is at the
700    // memory address into the packet
701    pkt->writeData((uint8_t *)&overwrite_val);
702    pkt->setData(blk_data);
703
704    if (pkt->req->isCondSwap()) {
705        if (pkt->getSize() == sizeof(uint64_t)) {
706            condition_val64 = pkt->req->getExtraData();
707            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
708                                         sizeof(uint64_t));
709        } else if (pkt->getSize() == sizeof(uint32_t)) {
710            condition_val32 = (uint32_t)pkt->req->getExtraData();
711            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
712                                         sizeof(uint32_t));
713        } else
714            panic("Invalid size for conditional read/write\n");
715    }
716
717    if (overwrite_mem) {
718        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
719        blk->status |= BlkDirty;
720    }
721}
722
723QueueEntry*
724BaseCache::getNextQueueEntry()
725{
726    // Check both MSHR queue and write buffer for potential requests,
727    // note that null does not mean there is no request, it could
728    // simply be that it is not ready
729    MSHR *miss_mshr  = mshrQueue.getNext();
730    WriteQueueEntry *wq_entry = writeBuffer.getNext();
731
732    // If we got a write buffer request ready, first priority is a
733    // full write buffer, otherwise we favour the miss requests
734    if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
735        // need to search MSHR queue for conflicting earlier miss.
736        MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
737
738        if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
739            // Service misses in order until conflict is cleared.
740            return conflict_mshr;
741
742            // @todo Note that we ignore the ready time of the conflict here
743        }
744
745        // No conflicts; issue write
746        return wq_entry;
747    } else if (miss_mshr) {
748        // need to check for conflicting earlier writeback
749        WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
750        if (conflict_mshr) {
751            // not sure why we don't check order here... it was in the
752            // original code but commented out.
753
754            // The only way this happens is if we are
755            // doing a write and we didn't have permissions
756            // then subsequently saw a writeback (owned got evicted)
757            // We need to make sure to perform the writeback first
758            // To preserve the dirty data, then we can issue the write
759
760            // should we return wq_entry here instead?  I.e. do we
761            // have to flush writes in order?  I don't think so... not
762            // for Alpha anyway.  Maybe for x86?
763            return conflict_mshr;
764
765            // @todo Note that we ignore the ready time of the conflict here
766        }
767
768        // No conflicts; issue read
769        return miss_mshr;
770    }
771
772    // fall through... no pending requests.  Try a prefetch.
773    assert(!miss_mshr && !wq_entry);
774    if (prefetcher && mshrQueue.canPrefetch()) {
775        // If we have a miss queue slot, we can try a prefetch
776        PacketPtr pkt = prefetcher->getPacket();
777        if (pkt) {
778            Addr pf_addr = pkt->getBlockAddr(blkSize);
779            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
780                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
781                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
782                // Update statistic on number of prefetches issued
783                // (hwpf_mshr_misses)
784                assert(pkt->req->masterId() < system->maxMasters());
785                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
786
787                // allocate an MSHR and return it, note
788                // that we send the packet straight away, so do not
789                // schedule the send
790                return allocateMissBuffer(pkt, curTick(), false);
791            } else {
792                // free the request and packet
793                delete pkt;
794            }
795        }
796    }
797
798    return nullptr;
799}
800
801bool
802BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
803                                 PacketList &writebacks)
804{
805    // tempBlock does not exist in the tags, so don't do anything for it.
806    if (blk == tempBlock) {
807        return true;
808    }
809
810    // Get superblock of the given block
811    CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
812    const SuperBlk* superblock = static_cast<const SuperBlk*>(
813        compression_blk->getSectorBlock());
814
815    // The compressor is called to compress the updated data, so that its
816    // metadata can be updated.
817    std::size_t compression_size = 0;
818    Cycles compression_lat = Cycles(0);
819    Cycles decompression_lat = Cycles(0);
820    compressor->compress(data, compression_lat, decompression_lat,
821                         compression_size);
822
823    // If block's compression factor increased, it may not be co-allocatable
824    // anymore. If so, some blocks might need to be evicted to make room for
825    // the bigger block
826
827    // Get previous compressed size
828    const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits();
829
830    // Check if new data is co-allocatable
831    const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
832        superblock->canCoAllocate(compression_size);
833
834    // If block was compressed, possibly co-allocated with other blocks, and
835    // cannot be co-allocated anymore, one or more blocks must be evicted to
836    // make room for the expanded block. As of now we decide to evict the co-
837    // allocated blocks to make room for the expansion, but other approaches
838    // that take the replacement data of the superblock into account may
839    // generate better results
840    std::vector<CacheBlk*> evict_blks;
841    const bool was_compressed = compression_blk->isCompressed();
842    if (was_compressed && !is_co_allocatable) {
843        // Get all co-allocated blocks
844        for (const auto& sub_blk : superblock->blks) {
845            if (sub_blk->isValid() && (compression_blk != sub_blk)) {
846                // Check for transient state allocations. If any of the
847                // entries listed for eviction has a transient state, the
848                // allocation fails
849                const Addr repl_addr = regenerateBlkAddr(sub_blk);
850                const MSHR *repl_mshr =
851                    mshrQueue.findMatch(repl_addr, sub_blk->isSecure());
852                if (repl_mshr) {
853                    DPRINTF(CacheRepl, "Aborting data expansion of %s due " \
854                            "to replacement of block in transient state: %s\n",
855                            compression_blk->print(), sub_blk->print());
856                    // Too hard to replace block with transient state, so it
857                    // cannot be evicted. Mark the update as failed and expect
858                    // the caller to evict this block. Since this is called
859                    // only when writebacks arrive, and packets do not contain
860                    // compressed data, there is no need to decompress
861                    compression_blk->setSizeBits(blkSize * 8);
862                    compression_blk->setDecompressionLatency(Cycles(0));
863                    compression_blk->setUncompressed();
864                    return false;
865                }
866
867                evict_blks.push_back(sub_blk);
868            }
869        }
870
871        // Update the number of data expansions
872        dataExpansions++;
873
874        DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
875                "\n", blk->print(), prev_size, compression_size);
876    }
877
878    // We always store compressed blocks when possible
879    if (is_co_allocatable) {
880        compression_blk->setCompressed();
881    } else {
882        compression_blk->setUncompressed();
883    }
884    compression_blk->setSizeBits(compression_size);
885    compression_blk->setDecompressionLatency(decompression_lat);
886
887    // Evict valid blocks
888    for (const auto& evict_blk : evict_blks) {
889        if (evict_blk->isValid()) {
890            if (evict_blk->wasPrefetched()) {
891                unusedPrefetches++;
892            }
893            evictBlock(evict_blk, writebacks);
894        }
895    }
896
897    return true;
898}
899
900void
901BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
902{
903    assert(pkt->isRequest());
904
905    assert(blk && blk->isValid());
906    // Occasionally this is not true... if we are a lower-level cache
907    // satisfying a string of Read and ReadEx requests from
908    // upper-level caches, a Read will mark the block as shared but we
909    // can satisfy a following ReadEx anyway since we can rely on the
910    // Read requester(s) to have buffered the ReadEx snoop and to
911    // invalidate their blocks after receiving them.
912    // assert(!pkt->needsWritable() || blk->isWritable());
913    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
914
915    // Check RMW operations first since both isRead() and
916    // isWrite() will be true for them
917    if (pkt->cmd == MemCmd::SwapReq) {
918        if (pkt->isAtomicOp()) {
919            // extract data from cache and save it into the data field in
920            // the packet as a return value from this atomic op
921            int offset = tags->extractBlkOffset(pkt->getAddr());
922            uint8_t *blk_data = blk->data + offset;
923            pkt->setData(blk_data);
924
925            // execute AMO operation
926            (*(pkt->getAtomicOp()))(blk_data);
927
928            // set block status to dirty
929            blk->status |= BlkDirty;
930        } else {
931            cmpAndSwap(blk, pkt);
932        }
933    } else if (pkt->isWrite()) {
934        // we have the block in a writable state and can go ahead,
935        // note that the line may be also be considered writable in
936        // downstream caches along the path to memory, but always
937        // Exclusive, and never Modified
938        assert(blk->isWritable());
939        // Write or WriteLine at the first cache with block in writable state
940        if (blk->checkWrite(pkt)) {
941            pkt->writeDataToBlock(blk->data, blkSize);
942        }
943        // Always mark the line as dirty (and thus transition to the
944        // Modified state) even if we are a failed StoreCond so we
945        // supply data to any snoops that have appended themselves to
946        // this cache before knowing the store will fail.
947        blk->status |= BlkDirty;
948        DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
949    } else if (pkt->isRead()) {
950        if (pkt->isLLSC()) {
951            blk->trackLoadLocked(pkt);
952        }
953
954        // all read responses have a data payload
955        assert(pkt->hasRespData());
956        pkt->setDataFromBlock(blk->data, blkSize);
957    } else if (pkt->isUpgrade()) {
958        // sanity check
959        assert(!pkt->hasSharers());
960
961        if (blk->isDirty()) {
962            // we were in the Owned state, and a cache above us that
963            // has the line in Shared state needs to be made aware
964            // that the data it already has is in fact dirty
965            pkt->setCacheResponding();
966            blk->status &= ~BlkDirty;
967        }
968    } else if (pkt->isClean()) {
969        blk->status &= ~BlkDirty;
970    } else {
971        assert(pkt->isInvalidate());
972        invalidateBlock(blk);
973        DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
974                pkt->print());
975    }
976}
977
978/////////////////////////////////////////////////////
979//
980// Access path: requests coming in from the CPU side
981//
982/////////////////////////////////////////////////////
983Cycles
984BaseCache::calculateTagOnlyLatency(const uint32_t delay,
985                                   const Cycles lookup_lat) const
986{
987    // A tag-only access has to wait for the packet to arrive in order to
988    // perform the tag lookup.
989    return ticksToCycles(delay) + lookup_lat;
990}
991
992Cycles
993BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
994                                  const Cycles lookup_lat) const
995{
996    Cycles lat(0);
997
998    if (blk != nullptr) {
999        // As soon as the access arrives, for sequential accesses first access
1000        // tags, then the data entry. In the case of parallel accesses the
1001        // latency is dictated by the slowest of tag and data latencies.
1002        if (sequentialAccess) {
1003            lat = ticksToCycles(delay) + lookup_lat + dataLatency;
1004        } else {
1005            lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
1006        }
1007
1008        // Check if the block to be accessed is available. If not, apply the
1009        // access latency on top of when the block is ready to be accessed.
1010        const Tick tick = curTick() + delay;
1011        const Tick when_ready = blk->getWhenReady();
1012        if (when_ready > tick &&
1013            ticksToCycles(when_ready - tick) > lat) {
1014            lat += ticksToCycles(when_ready - tick);
1015        }
1016    } else {
1017        // In case of a miss, we neglect the data access in a parallel
1018        // configuration (i.e., the data access will be stopped as soon as
1019        // we find out it is a miss), and use the tag-only latency.
1020        lat = calculateTagOnlyLatency(delay, lookup_lat);
1021    }
1022
1023    return lat;
1024}
1025
1026bool
1027BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
1028                  PacketList &writebacks)
1029{
1030    // sanity check
1031    assert(pkt->isRequest());
1032
1033    chatty_assert(!(isReadOnly && pkt->isWrite()),
1034                  "Should never see a write in a read-only cache %s\n",
1035                  name());
1036
1037    // Access block in the tags
1038    Cycles tag_latency(0);
1039    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
1040
1041    DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
1042            blk ? "hit " + blk->print() : "miss");
1043
1044    if (pkt->req->isCacheMaintenance()) {
1045        // A cache maintenance operation is always forwarded to the
1046        // memory below even if the block is found in dirty state.
1047
1048        // We defer any changes to the state of the block until we
1049        // create and mark as in service the mshr for the downstream
1050        // packet.
1051
1052        // Calculate access latency on top of when the packet arrives. This
1053        // takes into account the bus delay.
1054        lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1055
1056        return false;
1057    }
1058
1059    if (pkt->isEviction()) {
1060        // We check for presence of block in above caches before issuing
1061        // Writeback or CleanEvict to write buffer. Therefore the only
1062        // possible cases can be of a CleanEvict packet coming from above
1063        // encountering a Writeback generated in this cache peer cache and
1064        // waiting in the write buffer. Cases of upper level peer caches
1065        // generating CleanEvict and Writeback or simply CleanEvict and
1066        // CleanEvict almost simultaneously will be caught by snoops sent out
1067        // by crossbar.
1068        WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
1069                                                          pkt->isSecure());
1070        if (wb_entry) {
1071            assert(wb_entry->getNumTargets() == 1);
1072            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
1073            assert(wbPkt->isWriteback());
1074
1075            if (pkt->isCleanEviction()) {
1076                // The CleanEvict and WritebackClean snoops into other
1077                // peer caches of the same level while traversing the
1078                // crossbar. If a copy of the block is found, the
1079                // packet is deleted in the crossbar. Hence, none of
1080                // the other upper level caches connected to this
1081                // cache have the block, so we can clear the
1082                // BLOCK_CACHED flag in the Writeback if set and
1083                // discard the CleanEvict by returning true.
1084                wbPkt->clearBlockCached();
1085
1086                // A clean evict does not need to access the data array
1087                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1088
1089                return true;
1090            } else {
1091                assert(pkt->cmd == MemCmd::WritebackDirty);
1092                // Dirty writeback from above trumps our clean
1093                // writeback... discard here
1094                // Note: markInService will remove entry from writeback buffer.
1095                markInService(wb_entry);
1096                delete wbPkt;
1097            }
1098        }
1099    }
1100
1101    // Writeback handling is special case.  We can write the block into
1102    // the cache without having a writeable copy (or any copy at all).
1103    if (pkt->isWriteback()) {
1104        assert(blkSize == pkt->getSize());
1105
1106        // we could get a clean writeback while we are having
1107        // outstanding accesses to a block, do the simple thing for
1108        // now and drop the clean writeback so that we do not upset
1109        // any ordering/decisions about ownership already taken
1110        if (pkt->cmd == MemCmd::WritebackClean &&
1111            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1112            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1113                    "dropping\n", pkt->getAddr());
1114
1115            // A writeback searches for the block, then writes the data.
1116            // As the writeback is being dropped, the data is not touched,
1117            // and we just had to wait for the time to find a match in the
1118            // MSHR. As of now assume a mshr queue search takes as long as
1119            // a tag lookup for simplicity.
1120            lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1121
1122            return true;
1123        }
1124
1125        if (!blk) {
1126            // need to do a replacement
1127            blk = allocateBlock(pkt, writebacks);
1128            if (!blk) {
1129                // no replaceable block available: give up, fwd to next level.
1130                incMissCount(pkt);
1131
1132                // A writeback searches for the block, then writes the data.
1133                // As the block could not be found, it was a tag-only access.
1134                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1135
1136                return false;
1137            }
1138
1139            blk->status |= BlkReadable;
1140        } else if (compressor) {
1141            // This is an overwrite to an existing block, therefore we need
1142            // to check for data expansion (i.e., block was compressed with
1143            // a smaller size, and now it doesn't fit the entry anymore).
1144            // If that is the case we might need to evict blocks.
1145            if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1146                writebacks)) {
1147                // This is a failed data expansion (write), which happened
1148                // after finding the replacement entries and accessing the
1149                // block's data. There were no replaceable entries available
1150                // to make room for the expanded block, and since it does not
1151                // fit anymore and it has been properly updated to contain
1152                // the new data, forward it to the next level
1153                lat = calculateAccessLatency(blk, pkt->headerDelay,
1154                                             tag_latency);
1155                invalidateBlock(blk);
1156                return false;
1157            }
1158        }
1159
1160        // only mark the block dirty if we got a writeback command,
1161        // and leave it as is for a clean writeback
1162        if (pkt->cmd == MemCmd::WritebackDirty) {
1163            // TODO: the coherent cache can assert(!blk->isDirty());
1164            blk->status |= BlkDirty;
1165        }
1166        // if the packet does not have sharers, it is passing
1167        // writable, and we got the writeback in Modified or Exclusive
1168        // state, if not we are in the Owned or Shared state
1169        if (!pkt->hasSharers()) {
1170            blk->status |= BlkWritable;
1171        }
1172        // nothing else to do; writeback doesn't expect response
1173        assert(!pkt->needsResponse());
1174        pkt->writeDataToBlock(blk->data, blkSize);
1175        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1176        incHitCount(pkt);
1177
1178        // A writeback searches for the block, then writes the data
1179        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1180
1181        // When the packet metadata arrives, the tag lookup will be done while
1182        // the payload is arriving. Then the block will be ready to access as
1183        // soon as the fill is done
1184        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1185            std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1186
1187        return true;
1188    } else if (pkt->cmd == MemCmd::CleanEvict) {
1189        // A CleanEvict does not need to access the data array
1190        lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1191
1192        if (blk) {
1193            // Found the block in the tags, need to stop CleanEvict from
1194            // propagating further down the hierarchy. Returning true will
1195            // treat the CleanEvict like a satisfied write request and delete
1196            // it.
1197            return true;
1198        }
1199        // We didn't find the block here, propagate the CleanEvict further
1200        // down the memory hierarchy. Returning false will treat the CleanEvict
1201        // like a Writeback which could not find a replaceable block so has to
1202        // go to next level.
1203        return false;
1204    } else if (pkt->cmd == MemCmd::WriteClean) {
1205        // WriteClean handling is a special case. We can allocate a
1206        // block directly if it doesn't exist and we can update the
1207        // block immediately. The WriteClean transfers the ownership
1208        // of the block as well.
1209        assert(blkSize == pkt->getSize());
1210
1211        if (!blk) {
1212            if (pkt->writeThrough()) {
1213                // A writeback searches for the block, then writes the data.
1214                // As the block could not be found, it was a tag-only access.
1215                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1216
1217                // if this is a write through packet, we don't try to
1218                // allocate if the block is not present
1219                return false;
1220            } else {
1221                // a writeback that misses needs to allocate a new block
1222                blk = allocateBlock(pkt, writebacks);
1223                if (!blk) {
1224                    // no replaceable block available: give up, fwd to
1225                    // next level.
1226                    incMissCount(pkt);
1227
1228                    // A writeback searches for the block, then writes the
1229                    // data. As the block could not be found, it was a tag-only
1230                    // access.
1231                    lat = calculateTagOnlyLatency(pkt->headerDelay,
1232                                                  tag_latency);
1233
1234                    return false;
1235                }
1236
1237                blk->status |= BlkReadable;
1238            }
1239        } else if (compressor) {
1240            // This is an overwrite to an existing block, therefore we need
1241            // to check for data expansion (i.e., block was compressed with
1242            // a smaller size, and now it doesn't fit the entry anymore).
1243            // If that is the case we might need to evict blocks.
1244            if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1245                writebacks)) {
1246                // This is a failed data expansion (write), which happened
1247                // after finding the replacement entries and accessing the
1248                // block's data. There were no replaceable entries available
1249                // to make room for the expanded block, and since it does not
1250                // fit anymore and it has been properly updated to contain
1251                // the new data, forward it to the next level
1252                lat = calculateAccessLatency(blk, pkt->headerDelay,
1253                                             tag_latency);
1254                invalidateBlock(blk);
1255                return false;
1256            }
1257        }
1258
1259        // at this point either this is a writeback or a write-through
1260        // write clean operation and the block is already in this
1261        // cache, we need to update the data and the block flags
1262        assert(blk);
1263        // TODO: the coherent cache can assert(!blk->isDirty());
1264        if (!pkt->writeThrough()) {
1265            blk->status |= BlkDirty;
1266        }
1267        // nothing else to do; writeback doesn't expect response
1268        assert(!pkt->needsResponse());
1269        pkt->writeDataToBlock(blk->data, blkSize);
1270        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1271
1272        incHitCount(pkt);
1273
1274        // A writeback searches for the block, then writes the data
1275        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1276
1277        // When the packet metadata arrives, the tag lookup will be done while
1278        // the payload is arriving. Then the block will be ready to access as
1279        // soon as the fill is done
1280        blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1281            std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1282
1283        // If this a write-through packet it will be sent to cache below
1284        return !pkt->writeThrough();
1285    } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1286                       blk->isReadable())) {
1287        // OK to satisfy access
1288        incHitCount(pkt);
1289
1290        // Calculate access latency based on the need to access the data array
1291        if (pkt->isRead() || pkt->isWrite()) {
1292            lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1293
1294            // When a block is compressed, it must first be decompressed
1295            // before being read. This adds to the access latency.
1296            if (compressor && pkt->isRead()) {
1297                lat += compressor->getDecompressionLatency(blk);
1298            }
1299        } else {
1300            lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1301        }
1302
1303        satisfyRequest(pkt, blk);
1304        maintainClusivity(pkt->fromCache(), blk);
1305
1306        return true;
1307    }
1308
1309    // Can't satisfy access normally... either no block (blk == nullptr)
1310    // or have block but need writable
1311
1312    incMissCount(pkt);
1313
1314    lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1315
1316    if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1317        // complete miss on store conditional... just give up now
1318        pkt->req->setExtraData(0);
1319        return true;
1320    }
1321
1322    return false;
1323}
1324
1325void
1326BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1327{
1328    if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1329        clusivity == Enums::mostly_excl) {
1330        // if we have responded to a cache, and our block is still
1331        // valid, but not dirty, and this cache is mostly exclusive
1332        // with respect to the cache above, drop the block
1333        invalidateBlock(blk);
1334    }
1335}
1336
1337CacheBlk*
1338BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1339                      bool allocate)
1340{
1341    assert(pkt->isResponse());
1342    Addr addr = pkt->getAddr();
1343    bool is_secure = pkt->isSecure();
1344#if TRACING_ON
1345    CacheBlk::State old_state = blk ? blk->status : 0;
1346#endif
1347
1348    // When handling a fill, we should have no writes to this line.
1349    assert(addr == pkt->getBlockAddr(blkSize));
1350    assert(!writeBuffer.findMatch(addr, is_secure));
1351
1352    if (!blk) {
1353        // better have read new data...
1354        assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1355
1356        // need to do a replacement if allocating, otherwise we stick
1357        // with the temporary storage
1358        blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1359
1360        if (!blk) {
1361            // No replaceable block or a mostly exclusive
1362            // cache... just use temporary storage to complete the
1363            // current request and then get rid of it
1364            blk = tempBlock;
1365            tempBlock->insert(addr, is_secure);
1366            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1367                    is_secure ? "s" : "ns");
1368        }
1369    } else {
1370        // existing block... probably an upgrade
1371        // don't clear block status... if block is already dirty we
1372        // don't want to lose that
1373    }
1374
1375    // Block is guaranteed to be valid at this point
1376    assert(blk->isValid());
1377    assert(blk->isSecure() == is_secure);
1378    assert(regenerateBlkAddr(blk) == addr);
1379
1380    blk->status |= BlkReadable;
1381
1382    // sanity check for whole-line writes, which should always be
1383    // marked as writable as part of the fill, and then later marked
1384    // dirty as part of satisfyRequest
1385    if (pkt->cmd == MemCmd::InvalidateResp) {
1386        assert(!pkt->hasSharers());
1387    }
1388
1389    // here we deal with setting the appropriate state of the line,
1390    // and we start by looking at the hasSharers flag, and ignore the
1391    // cacheResponding flag (normally signalling dirty data) if the
1392    // packet has sharers, thus the line is never allocated as Owned
1393    // (dirty but not writable), and always ends up being either
1394    // Shared, Exclusive or Modified, see Packet::setCacheResponding
1395    // for more details
1396    if (!pkt->hasSharers()) {
1397        // we could get a writable line from memory (rather than a
1398        // cache) even in a read-only cache, note that we set this bit
1399        // even for a read-only cache, possibly revisit this decision
1400        blk->status |= BlkWritable;
1401
1402        // check if we got this via cache-to-cache transfer (i.e., from a
1403        // cache that had the block in Modified or Owned state)
1404        if (pkt->cacheResponding()) {
1405            // we got the block in Modified state, and invalidated the
1406            // owners copy
1407            blk->status |= BlkDirty;
1408
1409            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1410                          "in read-only cache %s\n", name());
1411
1412        }
1413    }
1414
1415    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1416            addr, is_secure ? "s" : "ns", old_state, blk->print());
1417
1418    // if we got new data, copy it in (checking for a read response
1419    // and a response that has data is the same in the end)
1420    if (pkt->isRead()) {
1421        // sanity checks
1422        assert(pkt->hasData());
1423        assert(pkt->getSize() == blkSize);
1424
1425        pkt->writeDataToBlock(blk->data, blkSize);
1426    }
1427    // The block will be ready when the payload arrives and the fill is done
1428    blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1429                      pkt->payloadDelay);
1430
1431    return blk;
1432}
1433
1434CacheBlk*
1435BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1436{
1437    // Get address
1438    const Addr addr = pkt->getAddr();
1439
1440    // Get secure bit
1441    const bool is_secure = pkt->isSecure();
1442
1443    // Block size and compression related access latency. Only relevant if
1444    // using a compressor, otherwise there is no extra delay, and the block
1445    // is fully sized
1446    std::size_t blk_size_bits = blkSize*8;
1447    Cycles compression_lat = Cycles(0);
1448    Cycles decompression_lat = Cycles(0);
1449
1450    // If a compressor is being used, it is called to compress data before
1451    // insertion. Although in Gem5 the data is stored uncompressed, even if a
1452    // compressor is used, the compression/decompression methods are called to
1453    // calculate the amount of extra cycles needed to read or write compressed
1454    // blocks.
1455    if (compressor) {
1456        compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat,
1457                             decompression_lat, blk_size_bits);
1458    }
1459
1460    // Find replacement victim
1461    std::vector<CacheBlk*> evict_blks;
1462    CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
1463                                        evict_blks);
1464
1465    // It is valid to return nullptr if there is no victim
1466    if (!victim)
1467        return nullptr;
1468
1469    // Print victim block's information
1470    DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1471
1472    // Check for transient state allocations. If any of the entries listed
1473    // for eviction has a transient state, the allocation fails
1474    bool replacement = false;
1475    for (const auto& blk : evict_blks) {
1476        if (blk->isValid()) {
1477            replacement = true;
1478
1479            Addr repl_addr = regenerateBlkAddr(blk);
1480            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1481            if (repl_mshr) {
1482                // must be an outstanding upgrade or clean request
1483                // on a block we're about to replace...
1484                assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1485                       repl_mshr->isCleaning());
1486
1487                // too hard to replace block with transient state
1488                // allocation failed, block not inserted
1489                return nullptr;
1490            }
1491        }
1492    }
1493
1494    // The victim will be replaced by a new entry, so increase the replacement
1495    // counter if a valid block is being replaced
1496    if (replacement) {
1497        // Evict valid blocks associated to this victim block
1498        for (const auto& blk : evict_blks) {
1499            if (blk->isValid()) {
1500                DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \
1501                        "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk),
1502                        addr, is_secure);
1503
1504                if (blk->wasPrefetched()) {
1505                    unusedPrefetches++;
1506                }
1507
1508                evictBlock(blk, writebacks);
1509            }
1510        }
1511
1512        replacements++;
1513    }
1514
1515    // If using a compressor, set compression data. This must be done before
1516    // block insertion, as compressed tags use this information.
1517    if (compressor) {
1518        compressor->setSizeBits(victim, blk_size_bits);
1519        compressor->setDecompressionLatency(victim, decompression_lat);
1520    }
1521
1522    // Insert new block at victimized entry
1523    tags->insertBlock(pkt, victim);
1524
1525    return victim;
1526}
1527
1528void
1529BaseCache::invalidateBlock(CacheBlk *blk)
1530{
1531    // If handling a block present in the Tags, let it do its invalidation
1532    // process, which will update stats and invalidate the block itself
1533    if (blk != tempBlock) {
1534        tags->invalidate(blk);
1535    } else {
1536        tempBlock->invalidate();
1537    }
1538}
1539
1540void
1541BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1542{
1543    PacketPtr pkt = evictBlock(blk);
1544    if (pkt) {
1545        writebacks.push_back(pkt);
1546    }
1547}
1548
1549PacketPtr
1550BaseCache::writebackBlk(CacheBlk *blk)
1551{
1552    chatty_assert(!isReadOnly || writebackClean,
1553                  "Writeback from read-only cache");
1554    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1555
1556    writebacks[Request::wbMasterId]++;
1557
1558    RequestPtr req = std::make_shared<Request>(
1559        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1560
1561    if (blk->isSecure())
1562        req->setFlags(Request::SECURE);
1563
1564    req->taskId(blk->task_id);
1565
1566    PacketPtr pkt =
1567        new Packet(req, blk->isDirty() ?
1568                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1569
1570    DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1571            pkt->print(), blk->isWritable(), blk->isDirty());
1572
1573    if (blk->isWritable()) {
1574        // not asserting shared means we pass the block in modified
1575        // state, mark our own block non-writeable
1576        blk->status &= ~BlkWritable;
1577    } else {
1578        // we are in the Owned state, tell the receiver
1579        pkt->setHasSharers();
1580    }
1581
1582    // make sure the block is not marked dirty
1583    blk->status &= ~BlkDirty;
1584
1585    pkt->allocate();
1586    pkt->setDataFromBlock(blk->data, blkSize);
1587
1588    // When a block is compressed, it must first be decompressed before being
1589    // sent for writeback.
1590    if (compressor) {
1591        pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1592    }
1593
1594    return pkt;
1595}
1596
1597PacketPtr
1598BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1599{
1600    RequestPtr req = std::make_shared<Request>(
1601        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1602
1603    if (blk->isSecure()) {
1604        req->setFlags(Request::SECURE);
1605    }
1606    req->taskId(blk->task_id);
1607
1608    PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1609
1610    if (dest) {
1611        req->setFlags(dest);
1612        pkt->setWriteThrough();
1613    }
1614
1615    DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1616            blk->isWritable(), blk->isDirty());
1617
1618    if (blk->isWritable()) {
1619        // not asserting shared means we pass the block in modified
1620        // state, mark our own block non-writeable
1621        blk->status &= ~BlkWritable;
1622    } else {
1623        // we are in the Owned state, tell the receiver
1624        pkt->setHasSharers();
1625    }
1626
1627    // make sure the block is not marked dirty
1628    blk->status &= ~BlkDirty;
1629
1630    pkt->allocate();
1631    pkt->setDataFromBlock(blk->data, blkSize);
1632
1633    // When a block is compressed, it must first be decompressed before being
1634    // sent for writeback.
1635    if (compressor) {
1636        pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1637    }
1638
1639    return pkt;
1640}
1641
1642
1643void
1644BaseCache::memWriteback()
1645{
1646    tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1647}
1648
1649void
1650BaseCache::memInvalidate()
1651{
1652    tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1653}
1654
1655bool
1656BaseCache::isDirty() const
1657{
1658    return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1659}
1660
1661bool
1662BaseCache::coalesce() const
1663{
1664    return writeAllocator && writeAllocator->coalesce();
1665}
1666
1667void
1668BaseCache::writebackVisitor(CacheBlk &blk)
1669{
1670    if (blk.isDirty()) {
1671        assert(blk.isValid());
1672
1673        RequestPtr request = std::make_shared<Request>(
1674            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1675
1676        request->taskId(blk.task_id);
1677        if (blk.isSecure()) {
1678            request->setFlags(Request::SECURE);
1679        }
1680
1681        Packet packet(request, MemCmd::WriteReq);
1682        packet.dataStatic(blk.data);
1683
1684        memSidePort.sendFunctional(&packet);
1685
1686        blk.status &= ~BlkDirty;
1687    }
1688}
1689
1690void
1691BaseCache::invalidateVisitor(CacheBlk &blk)
1692{
1693    if (blk.isDirty())
1694        warn_once("Invalidating dirty cache lines. " \
1695                  "Expect things to break.\n");
1696
1697    if (blk.isValid()) {
1698        assert(!blk.isDirty());
1699        invalidateBlock(&blk);
1700    }
1701}
1702
1703Tick
1704BaseCache::nextQueueReadyTime() const
1705{
1706    Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1707                              writeBuffer.nextReadyTime());
1708
1709    // Don't signal prefetch ready time if no MSHRs available
1710    // Will signal once enoguh MSHRs are deallocated
1711    if (prefetcher && mshrQueue.canPrefetch()) {
1712        nextReady = std::min(nextReady,
1713                             prefetcher->nextPrefetchReadyTime());
1714    }
1715
1716    return nextReady;
1717}
1718
1719
1720bool
1721BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1722{
1723    assert(mshr);
1724
1725    // use request from 1st target
1726    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1727
1728    DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1729
1730    // if the cache is in write coalescing mode or (additionally) in
1731    // no allocation mode, and we have a write packet with an MSHR
1732    // that is not a whole-line write (due to incompatible flags etc),
1733    // then reset the write mode
1734    if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1735        if (!mshr->isWholeLineWrite()) {
1736            // if we are currently write coalescing, hold on the
1737            // MSHR as many cycles extra as we need to completely
1738            // write a cache line
1739            if (writeAllocator->delay(mshr->blkAddr)) {
1740                Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1741                DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1742                        "for write coalescing\n", tgt_pkt->print(), delay);
1743                mshrQueue.delay(mshr, delay);
1744                return false;
1745            } else {
1746                writeAllocator->reset();
1747            }
1748        } else {
1749            writeAllocator->resetDelay(mshr->blkAddr);
1750        }
1751    }
1752
1753    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1754
1755    // either a prefetch that is not present upstream, or a normal
1756    // MSHR request, proceed to get the packet to send downstream
1757    PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1758                                     mshr->isWholeLineWrite());
1759
1760    mshr->isForward = (pkt == nullptr);
1761
1762    if (mshr->isForward) {
1763        // not a cache block request, but a response is expected
1764        // make copy of current packet to forward, keep current
1765        // copy for response handling
1766        pkt = new Packet(tgt_pkt, false, true);
1767        assert(!pkt->isWrite());
1768    }
1769
1770    // play it safe and append (rather than set) the sender state,
1771    // as forwarded packets may already have existing state
1772    pkt->pushSenderState(mshr);
1773
1774    if (pkt->isClean() && blk && blk->isDirty()) {
1775        // A cache clean opearation is looking for a dirty block. Mark
1776        // the packet so that the destination xbar can determine that
1777        // there will be a follow-up write packet as well.
1778        pkt->setSatisfied();
1779    }
1780
1781    if (!memSidePort.sendTimingReq(pkt)) {
1782        // we are awaiting a retry, but we
1783        // delete the packet and will be creating a new packet
1784        // when we get the opportunity
1785        delete pkt;
1786
1787        // note that we have now masked any requestBus and
1788        // schedSendEvent (we will wait for a retry before
1789        // doing anything), and this is so even if we do not
1790        // care about this packet and might override it before
1791        // it gets retried
1792        return true;
1793    } else {
1794        // As part of the call to sendTimingReq the packet is
1795        // forwarded to all neighbouring caches (and any caches
1796        // above them) as a snoop. Thus at this point we know if
1797        // any of the neighbouring caches are responding, and if
1798        // so, we know it is dirty, and we can determine if it is
1799        // being passed as Modified, making our MSHR the ordering
1800        // point
1801        bool pending_modified_resp = !pkt->hasSharers() &&
1802            pkt->cacheResponding();
1803        markInService(mshr, pending_modified_resp);
1804
1805        if (pkt->isClean() && blk && blk->isDirty()) {
1806            // A cache clean opearation is looking for a dirty
1807            // block. If a dirty block is encountered a WriteClean
1808            // will update any copies to the path to the memory
1809            // until the point of reference.
1810            DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1811                    __func__, pkt->print(), blk->print());
1812            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1813                                             pkt->id);
1814            PacketList writebacks;
1815            writebacks.push_back(wb_pkt);
1816            doWritebacks(writebacks, 0);
1817        }
1818
1819        return false;
1820    }
1821}
1822
1823bool
1824BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1825{
1826    assert(wq_entry);
1827
1828    // always a single target for write queue entries
1829    PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1830
1831    DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1832
1833    // forward as is, both for evictions and uncacheable writes
1834    if (!memSidePort.sendTimingReq(tgt_pkt)) {
1835        // note that we have now masked any requestBus and
1836        // schedSendEvent (we will wait for a retry before
1837        // doing anything), and this is so even if we do not
1838        // care about this packet and might override it before
1839        // it gets retried
1840        return true;
1841    } else {
1842        markInService(wq_entry);
1843        return false;
1844    }
1845}
1846
1847void
1848BaseCache::serialize(CheckpointOut &cp) const
1849{
1850    bool dirty(isDirty());
1851
1852    if (dirty) {
1853        warn("*** The cache still contains dirty data. ***\n");
1854        warn("    Make sure to drain the system using the correct flags.\n");
1855        warn("    This checkpoint will not restore correctly " \
1856             "and dirty data in the cache will be lost!\n");
1857    }
1858
1859    // Since we don't checkpoint the data in the cache, any dirty data
1860    // will be lost when restoring from a checkpoint of a system that
1861    // wasn't drained properly. Flag the checkpoint as invalid if the
1862    // cache contains dirty data.
1863    bool bad_checkpoint(dirty);
1864    SERIALIZE_SCALAR(bad_checkpoint);
1865}
1866
1867void
1868BaseCache::unserialize(CheckpointIn &cp)
1869{
1870    bool bad_checkpoint;
1871    UNSERIALIZE_SCALAR(bad_checkpoint);
1872    if (bad_checkpoint) {
1873        fatal("Restoring from checkpoints with dirty caches is not "
1874              "supported in the classic memory system. Please remove any "
1875              "caches or drain them properly before taking checkpoints.\n");
1876    }
1877}
1878
1879void
1880BaseCache::regStats()
1881{
1882    ClockedObject::regStats();
1883
1884    using namespace Stats;
1885
1886    // Hit statistics
1887    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1888        MemCmd cmd(access_idx);
1889        const string &cstr = cmd.toString();
1890
1891        hits[access_idx]
1892            .init(system->maxMasters())
1893            .name(name() + "." + cstr + "_hits")
1894            .desc("number of " + cstr + " hits")
1895            .flags(total | nozero | nonan)
1896            ;
1897        for (int i = 0; i < system->maxMasters(); i++) {
1898            hits[access_idx].subname(i, system->getMasterName(i));
1899        }
1900    }
1901
1902// These macros make it easier to sum the right subset of commands and
1903// to change the subset of commands that are considered "demand" vs
1904// "non-demand"
1905#define SUM_DEMAND(s) \
1906    (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1907     s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1908
1909// should writebacks be included here?  prior code was inconsistent...
1910#define SUM_NON_DEMAND(s) \
1911    (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1912
1913    demandHits
1914        .name(name() + ".demand_hits")
1915        .desc("number of demand (read+write) hits")
1916        .flags(total | nozero | nonan)
1917        ;
1918    demandHits = SUM_DEMAND(hits);
1919    for (int i = 0; i < system->maxMasters(); i++) {
1920        demandHits.subname(i, system->getMasterName(i));
1921    }
1922
1923    overallHits
1924        .name(name() + ".overall_hits")
1925        .desc("number of overall hits")
1926        .flags(total | nozero | nonan)
1927        ;
1928    overallHits = demandHits + SUM_NON_DEMAND(hits);
1929    for (int i = 0; i < system->maxMasters(); i++) {
1930        overallHits.subname(i, system->getMasterName(i));
1931    }
1932
1933    // Miss statistics
1934    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1935        MemCmd cmd(access_idx);
1936        const string &cstr = cmd.toString();
1937
1938        misses[access_idx]
1939            .init(system->maxMasters())
1940            .name(name() + "." + cstr + "_misses")
1941            .desc("number of " + cstr + " misses")
1942            .flags(total | nozero | nonan)
1943            ;
1944        for (int i = 0; i < system->maxMasters(); i++) {
1945            misses[access_idx].subname(i, system->getMasterName(i));
1946        }
1947    }
1948
1949    demandMisses
1950        .name(name() + ".demand_misses")
1951        .desc("number of demand (read+write) misses")
1952        .flags(total | nozero | nonan)
1953        ;
1954    demandMisses = SUM_DEMAND(misses);
1955    for (int i = 0; i < system->maxMasters(); i++) {
1956        demandMisses.subname(i, system->getMasterName(i));
1957    }
1958
1959    overallMisses
1960        .name(name() + ".overall_misses")
1961        .desc("number of overall misses")
1962        .flags(total | nozero | nonan)
1963        ;
1964    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1965    for (int i = 0; i < system->maxMasters(); i++) {
1966        overallMisses.subname(i, system->getMasterName(i));
1967    }
1968
1969    // Miss latency statistics
1970    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1971        MemCmd cmd(access_idx);
1972        const string &cstr = cmd.toString();
1973
1974        missLatency[access_idx]
1975            .init(system->maxMasters())
1976            .name(name() + "." + cstr + "_miss_latency")
1977            .desc("number of " + cstr + " miss cycles")
1978            .flags(total | nozero | nonan)
1979            ;
1980        for (int i = 0; i < system->maxMasters(); i++) {
1981            missLatency[access_idx].subname(i, system->getMasterName(i));
1982        }
1983    }
1984
1985    demandMissLatency
1986        .name(name() + ".demand_miss_latency")
1987        .desc("number of demand (read+write) miss cycles")
1988        .flags(total | nozero | nonan)
1989        ;
1990    demandMissLatency = SUM_DEMAND(missLatency);
1991    for (int i = 0; i < system->maxMasters(); i++) {
1992        demandMissLatency.subname(i, system->getMasterName(i));
1993    }
1994
1995    overallMissLatency
1996        .name(name() + ".overall_miss_latency")
1997        .desc("number of overall miss cycles")
1998        .flags(total | nozero | nonan)
1999        ;
2000    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
2001    for (int i = 0; i < system->maxMasters(); i++) {
2002        overallMissLatency.subname(i, system->getMasterName(i));
2003    }
2004
2005    // access formulas
2006    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2007        MemCmd cmd(access_idx);
2008        const string &cstr = cmd.toString();
2009
2010        accesses[access_idx]
2011            .name(name() + "." + cstr + "_accesses")
2012            .desc("number of " + cstr + " accesses(hits+misses)")
2013            .flags(total | nozero | nonan)
2014            ;
2015        accesses[access_idx] = hits[access_idx] + misses[access_idx];
2016
2017        for (int i = 0; i < system->maxMasters(); i++) {
2018            accesses[access_idx].subname(i, system->getMasterName(i));
2019        }
2020    }
2021
2022    demandAccesses
2023        .name(name() + ".demand_accesses")
2024        .desc("number of demand (read+write) accesses")
2025        .flags(total | nozero | nonan)
2026        ;
2027    demandAccesses = demandHits + demandMisses;
2028    for (int i = 0; i < system->maxMasters(); i++) {
2029        demandAccesses.subname(i, system->getMasterName(i));
2030    }
2031
2032    overallAccesses
2033        .name(name() + ".overall_accesses")
2034        .desc("number of overall (read+write) accesses")
2035        .flags(total | nozero | nonan)
2036        ;
2037    overallAccesses = overallHits + overallMisses;
2038    for (int i = 0; i < system->maxMasters(); i++) {
2039        overallAccesses.subname(i, system->getMasterName(i));
2040    }
2041
2042    // miss rate formulas
2043    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2044        MemCmd cmd(access_idx);
2045        const string &cstr = cmd.toString();
2046
2047        missRate[access_idx]
2048            .name(name() + "." + cstr + "_miss_rate")
2049            .desc("miss rate for " + cstr + " accesses")
2050            .flags(total | nozero | nonan)
2051            ;
2052        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
2053
2054        for (int i = 0; i < system->maxMasters(); i++) {
2055            missRate[access_idx].subname(i, system->getMasterName(i));
2056        }
2057    }
2058
2059    demandMissRate
2060        .name(name() + ".demand_miss_rate")
2061        .desc("miss rate for demand accesses")
2062        .flags(total | nozero | nonan)
2063        ;
2064    demandMissRate = demandMisses / demandAccesses;
2065    for (int i = 0; i < system->maxMasters(); i++) {
2066        demandMissRate.subname(i, system->getMasterName(i));
2067    }
2068
2069    overallMissRate
2070        .name(name() + ".overall_miss_rate")
2071        .desc("miss rate for overall accesses")
2072        .flags(total | nozero | nonan)
2073        ;
2074    overallMissRate = overallMisses / overallAccesses;
2075    for (int i = 0; i < system->maxMasters(); i++) {
2076        overallMissRate.subname(i, system->getMasterName(i));
2077    }
2078
2079    // miss latency formulas
2080    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2081        MemCmd cmd(access_idx);
2082        const string &cstr = cmd.toString();
2083
2084        avgMissLatency[access_idx]
2085            .name(name() + "." + cstr + "_avg_miss_latency")
2086            .desc("average " + cstr + " miss latency")
2087            .flags(total | nozero | nonan)
2088            ;
2089        avgMissLatency[access_idx] =
2090            missLatency[access_idx] / misses[access_idx];
2091
2092        for (int i = 0; i < system->maxMasters(); i++) {
2093            avgMissLatency[access_idx].subname(i, system->getMasterName(i));
2094        }
2095    }
2096
2097    demandAvgMissLatency
2098        .name(name() + ".demand_avg_miss_latency")
2099        .desc("average overall miss latency")
2100        .flags(total | nozero | nonan)
2101        ;
2102    demandAvgMissLatency = demandMissLatency / demandMisses;
2103    for (int i = 0; i < system->maxMasters(); i++) {
2104        demandAvgMissLatency.subname(i, system->getMasterName(i));
2105    }
2106
2107    overallAvgMissLatency
2108        .name(name() + ".overall_avg_miss_latency")
2109        .desc("average overall miss latency")
2110        .flags(total | nozero | nonan)
2111        ;
2112    overallAvgMissLatency = overallMissLatency / overallMisses;
2113    for (int i = 0; i < system->maxMasters(); i++) {
2114        overallAvgMissLatency.subname(i, system->getMasterName(i));
2115    }
2116
2117    blocked_cycles.init(NUM_BLOCKED_CAUSES);
2118    blocked_cycles
2119        .name(name() + ".blocked_cycles")
2120        .desc("number of cycles access was blocked")
2121        .subname(Blocked_NoMSHRs, "no_mshrs")
2122        .subname(Blocked_NoTargets, "no_targets")
2123        ;
2124
2125
2126    blocked_causes.init(NUM_BLOCKED_CAUSES);
2127    blocked_causes
2128        .name(name() + ".blocked")
2129        .desc("number of cycles access was blocked")
2130        .subname(Blocked_NoMSHRs, "no_mshrs")
2131        .subname(Blocked_NoTargets, "no_targets")
2132        ;
2133
2134    avg_blocked
2135        .name(name() + ".avg_blocked_cycles")
2136        .desc("average number of cycles each access was blocked")
2137        .subname(Blocked_NoMSHRs, "no_mshrs")
2138        .subname(Blocked_NoTargets, "no_targets")
2139        ;
2140
2141    avg_blocked = blocked_cycles / blocked_causes;
2142
2143    unusedPrefetches
2144        .name(name() + ".unused_prefetches")
2145        .desc("number of HardPF blocks evicted w/o reference")
2146        .flags(nozero)
2147        ;
2148
2149    writebacks
2150        .init(system->maxMasters())
2151        .name(name() + ".writebacks")
2152        .desc("number of writebacks")
2153        .flags(total | nozero | nonan)
2154        ;
2155    for (int i = 0; i < system->maxMasters(); i++) {
2156        writebacks.subname(i, system->getMasterName(i));
2157    }
2158
2159    // MSHR statistics
2160    // MSHR hit statistics
2161    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2162        MemCmd cmd(access_idx);
2163        const string &cstr = cmd.toString();
2164
2165        mshr_hits[access_idx]
2166            .init(system->maxMasters())
2167            .name(name() + "." + cstr + "_mshr_hits")
2168            .desc("number of " + cstr + " MSHR hits")
2169            .flags(total | nozero | nonan)
2170            ;
2171        for (int i = 0; i < system->maxMasters(); i++) {
2172            mshr_hits[access_idx].subname(i, system->getMasterName(i));
2173        }
2174    }
2175
2176    demandMshrHits
2177        .name(name() + ".demand_mshr_hits")
2178        .desc("number of demand (read+write) MSHR hits")
2179        .flags(total | nozero | nonan)
2180        ;
2181    demandMshrHits = SUM_DEMAND(mshr_hits);
2182    for (int i = 0; i < system->maxMasters(); i++) {
2183        demandMshrHits.subname(i, system->getMasterName(i));
2184    }
2185
2186    overallMshrHits
2187        .name(name() + ".overall_mshr_hits")
2188        .desc("number of overall MSHR hits")
2189        .flags(total | nozero | nonan)
2190        ;
2191    overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
2192    for (int i = 0; i < system->maxMasters(); i++) {
2193        overallMshrHits.subname(i, system->getMasterName(i));
2194    }
2195
2196    // MSHR miss statistics
2197    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2198        MemCmd cmd(access_idx);
2199        const string &cstr = cmd.toString();
2200
2201        mshr_misses[access_idx]
2202            .init(system->maxMasters())
2203            .name(name() + "." + cstr + "_mshr_misses")
2204            .desc("number of " + cstr + " MSHR misses")
2205            .flags(total | nozero | nonan)
2206            ;
2207        for (int i = 0; i < system->maxMasters(); i++) {
2208            mshr_misses[access_idx].subname(i, system->getMasterName(i));
2209        }
2210    }
2211
2212    demandMshrMisses
2213        .name(name() + ".demand_mshr_misses")
2214        .desc("number of demand (read+write) MSHR misses")
2215        .flags(total | nozero | nonan)
2216        ;
2217    demandMshrMisses = SUM_DEMAND(mshr_misses);
2218    for (int i = 0; i < system->maxMasters(); i++) {
2219        demandMshrMisses.subname(i, system->getMasterName(i));
2220    }
2221
2222    overallMshrMisses
2223        .name(name() + ".overall_mshr_misses")
2224        .desc("number of overall MSHR misses")
2225        .flags(total | nozero | nonan)
2226        ;
2227    overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
2228    for (int i = 0; i < system->maxMasters(); i++) {
2229        overallMshrMisses.subname(i, system->getMasterName(i));
2230    }
2231
2232    // MSHR miss latency statistics
2233    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2234        MemCmd cmd(access_idx);
2235        const string &cstr = cmd.toString();
2236
2237        mshr_miss_latency[access_idx]
2238            .init(system->maxMasters())
2239            .name(name() + "." + cstr + "_mshr_miss_latency")
2240            .desc("number of " + cstr + " MSHR miss cycles")
2241            .flags(total | nozero | nonan)
2242            ;
2243        for (int i = 0; i < system->maxMasters(); i++) {
2244            mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
2245        }
2246    }
2247
2248    demandMshrMissLatency
2249        .name(name() + ".demand_mshr_miss_latency")
2250        .desc("number of demand (read+write) MSHR miss cycles")
2251        .flags(total | nozero | nonan)
2252        ;
2253    demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2254    for (int i = 0; i < system->maxMasters(); i++) {
2255        demandMshrMissLatency.subname(i, system->getMasterName(i));
2256    }
2257
2258    overallMshrMissLatency
2259        .name(name() + ".overall_mshr_miss_latency")
2260        .desc("number of overall MSHR miss cycles")
2261        .flags(total | nozero | nonan)
2262        ;
2263    overallMshrMissLatency =
2264        demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2265    for (int i = 0; i < system->maxMasters(); i++) {
2266        overallMshrMissLatency.subname(i, system->getMasterName(i));
2267    }
2268
2269    // MSHR uncacheable statistics
2270    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2271        MemCmd cmd(access_idx);
2272        const string &cstr = cmd.toString();
2273
2274        mshr_uncacheable[access_idx]
2275            .init(system->maxMasters())
2276            .name(name() + "." + cstr + "_mshr_uncacheable")
2277            .desc("number of " + cstr + " MSHR uncacheable")
2278            .flags(total | nozero | nonan)
2279            ;
2280        for (int i = 0; i < system->maxMasters(); i++) {
2281            mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2282        }
2283    }
2284
2285    overallMshrUncacheable
2286        .name(name() + ".overall_mshr_uncacheable_misses")
2287        .desc("number of overall MSHR uncacheable misses")
2288        .flags(total | nozero | nonan)
2289        ;
2290    overallMshrUncacheable =
2291        SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2292    for (int i = 0; i < system->maxMasters(); i++) {
2293        overallMshrUncacheable.subname(i, system->getMasterName(i));
2294    }
2295
2296    // MSHR miss latency statistics
2297    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2298        MemCmd cmd(access_idx);
2299        const string &cstr = cmd.toString();
2300
2301        mshr_uncacheable_lat[access_idx]
2302            .init(system->maxMasters())
2303            .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2304            .desc("number of " + cstr + " MSHR uncacheable cycles")
2305            .flags(total | nozero | nonan)
2306            ;
2307        for (int i = 0; i < system->maxMasters(); i++) {
2308            mshr_uncacheable_lat[access_idx].subname(
2309                i, system->getMasterName(i));
2310        }
2311    }
2312
2313    overallMshrUncacheableLatency
2314        .name(name() + ".overall_mshr_uncacheable_latency")
2315        .desc("number of overall MSHR uncacheable cycles")
2316        .flags(total | nozero | nonan)
2317        ;
2318    overallMshrUncacheableLatency =
2319        SUM_DEMAND(mshr_uncacheable_lat) +
2320        SUM_NON_DEMAND(mshr_uncacheable_lat);
2321    for (int i = 0; i < system->maxMasters(); i++) {
2322        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2323    }
2324
2325    // MSHR miss rate formulas
2326    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2327        MemCmd cmd(access_idx);
2328        const string &cstr = cmd.toString();
2329
2330        mshrMissRate[access_idx]
2331            .name(name() + "." + cstr + "_mshr_miss_rate")
2332            .desc("mshr miss rate for " + cstr + " accesses")
2333            .flags(total | nozero | nonan)
2334            ;
2335        mshrMissRate[access_idx] =
2336            mshr_misses[access_idx] / accesses[access_idx];
2337
2338        for (int i = 0; i < system->maxMasters(); i++) {
2339            mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2340        }
2341    }
2342
2343    demandMshrMissRate
2344        .name(name() + ".demand_mshr_miss_rate")
2345        .desc("mshr miss rate for demand accesses")
2346        .flags(total | nozero | nonan)
2347        ;
2348    demandMshrMissRate = demandMshrMisses / demandAccesses;
2349    for (int i = 0; i < system->maxMasters(); i++) {
2350        demandMshrMissRate.subname(i, system->getMasterName(i));
2351    }
2352
2353    overallMshrMissRate
2354        .name(name() + ".overall_mshr_miss_rate")
2355        .desc("mshr miss rate for overall accesses")
2356        .flags(total | nozero | nonan)
2357        ;
2358    overallMshrMissRate = overallMshrMisses / overallAccesses;
2359    for (int i = 0; i < system->maxMasters(); i++) {
2360        overallMshrMissRate.subname(i, system->getMasterName(i));
2361    }
2362
2363    // mshrMiss latency formulas
2364    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2365        MemCmd cmd(access_idx);
2366        const string &cstr = cmd.toString();
2367
2368        avgMshrMissLatency[access_idx]
2369            .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2370            .desc("average " + cstr + " mshr miss latency")
2371            .flags(total | nozero | nonan)
2372            ;
2373        avgMshrMissLatency[access_idx] =
2374            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2375
2376        for (int i = 0; i < system->maxMasters(); i++) {
2377            avgMshrMissLatency[access_idx].subname(
2378                i, system->getMasterName(i));
2379        }
2380    }
2381
2382    demandAvgMshrMissLatency
2383        .name(name() + ".demand_avg_mshr_miss_latency")
2384        .desc("average overall mshr miss latency")
2385        .flags(total | nozero | nonan)
2386        ;
2387    demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2388    for (int i = 0; i < system->maxMasters(); i++) {
2389        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2390    }
2391
2392    overallAvgMshrMissLatency
2393        .name(name() + ".overall_avg_mshr_miss_latency")
2394        .desc("average overall mshr miss latency")
2395        .flags(total | nozero | nonan)
2396        ;
2397    overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2398    for (int i = 0; i < system->maxMasters(); i++) {
2399        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2400    }
2401
2402    // mshrUncacheable latency formulas
2403    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2404        MemCmd cmd(access_idx);
2405        const string &cstr = cmd.toString();
2406
2407        avgMshrUncacheableLatency[access_idx]
2408            .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2409            .desc("average " + cstr + " mshr uncacheable latency")
2410            .flags(total | nozero | nonan)
2411            ;
2412        avgMshrUncacheableLatency[access_idx] =
2413            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2414
2415        for (int i = 0; i < system->maxMasters(); i++) {
2416            avgMshrUncacheableLatency[access_idx].subname(
2417                i, system->getMasterName(i));
2418        }
2419    }
2420
2421    overallAvgMshrUncacheableLatency
2422        .name(name() + ".overall_avg_mshr_uncacheable_latency")
2423        .desc("average overall mshr uncacheable latency")
2424        .flags(total | nozero | nonan)
2425        ;
2426    overallAvgMshrUncacheableLatency =
2427        overallMshrUncacheableLatency / overallMshrUncacheable;
2428    for (int i = 0; i < system->maxMasters(); i++) {
2429        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2430    }
2431
2432    replacements
2433        .name(name() + ".replacements")
2434        .desc("number of replacements")
2435        ;
2436
2437    dataExpansions
2438        .name(name() + ".data_expansions")
2439        .desc("number of data expansions")
2440        .flags(nozero | nonan)
2441        ;
2442}
2443
2444void
2445BaseCache::regProbePoints()
2446{
2447    ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2448    ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2449    ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2450}
2451
2452///////////////
2453//
2454// CpuSidePort
2455//
2456///////////////
2457bool
2458BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2459{
2460    // Snoops shouldn't happen when bypassing caches
2461    assert(!cache->system->bypassCaches());
2462
2463    assert(pkt->isResponse());
2464
2465    // Express snoop responses from master to slave, e.g., from L1 to L2
2466    cache->recvTimingSnoopResp(pkt);
2467    return true;
2468}
2469
2470
2471bool
2472BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2473{
2474    if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2475        // always let express snoop packets through even if blocked
2476        return true;
2477    } else if (blocked || mustSendRetry) {
2478        // either already committed to send a retry, or blocked
2479        mustSendRetry = true;
2480        return false;
2481    }
2482    mustSendRetry = false;
2483    return true;
2484}
2485
2486bool
2487BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2488{
2489    assert(pkt->isRequest());
2490
2491    if (cache->system->bypassCaches()) {
2492        // Just forward the packet if caches are disabled.
2493        // @todo This should really enqueue the packet rather
2494        bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2495        assert(success);
2496        return true;
2497    } else if (tryTiming(pkt)) {
2498        cache->recvTimingReq(pkt);
2499        return true;
2500    }
2501    return false;
2502}
2503
2504Tick
2505BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2506{
2507    if (cache->system->bypassCaches()) {
2508        // Forward the request if the system is in cache bypass mode.
2509        return cache->memSidePort.sendAtomic(pkt);
2510    } else {
2511        return cache->recvAtomic(pkt);
2512    }
2513}
2514
2515void
2516BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2517{
2518    if (cache->system->bypassCaches()) {
2519        // The cache should be flushed if we are in cache bypass mode,
2520        // so we don't need to check if we need to update anything.
2521        cache->memSidePort.sendFunctional(pkt);
2522        return;
2523    }
2524
2525    // functional request
2526    cache->functionalAccess(pkt, true);
2527}
2528
2529AddrRangeList
2530BaseCache::CpuSidePort::getAddrRanges() const
2531{
2532    return cache->getAddrRanges();
2533}
2534
2535
2536BaseCache::
2537CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2538                         const std::string &_label)
2539    : CacheSlavePort(_name, _cache, _label), cache(_cache)
2540{
2541}
2542
2543///////////////
2544//
2545// MemSidePort
2546//
2547///////////////
2548bool
2549BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2550{
2551    cache->recvTimingResp(pkt);
2552    return true;
2553}
2554
2555// Express snooping requests to memside port
2556void
2557BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2558{
2559    // Snoops shouldn't happen when bypassing caches
2560    assert(!cache->system->bypassCaches());
2561
2562    // handle snooping requests
2563    cache->recvTimingSnoopReq(pkt);
2564}
2565
2566Tick
2567BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2568{
2569    // Snoops shouldn't happen when bypassing caches
2570    assert(!cache->system->bypassCaches());
2571
2572    return cache->recvAtomicSnoop(pkt);
2573}
2574
2575void
2576BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2577{
2578    // Snoops shouldn't happen when bypassing caches
2579    assert(!cache->system->bypassCaches());
2580
2581    // functional snoop (note that in contrast to atomic we don't have
2582    // a specific functionalSnoop method, as they have the same
2583    // behaviour regardless)
2584    cache->functionalAccess(pkt, false);
2585}
2586
2587void
2588BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2589{
2590    // sanity check
2591    assert(!waitingOnRetry);
2592
2593    // there should never be any deferred request packets in the
2594    // queue, instead we resly on the cache to provide the packets
2595    // from the MSHR queue or write queue
2596    assert(deferredPacketReadyTime() == MaxTick);
2597
2598    // check for request packets (requests & writebacks)
2599    QueueEntry* entry = cache.getNextQueueEntry();
2600
2601    if (!entry) {
2602        // can happen if e.g. we attempt a writeback and fail, but
2603        // before the retry, the writeback is eliminated because
2604        // we snoop another cache's ReadEx.
2605    } else {
2606        // let our snoop responses go first if there are responses to
2607        // the same addresses
2608        if (checkConflictingSnoop(entry->getTarget()->pkt)) {
2609            return;
2610        }
2611        waitingOnRetry = entry->sendPacket(cache);
2612    }
2613
2614    // if we succeeded and are not waiting for a retry, schedule the
2615    // next send considering when the next queue is ready, note that
2616    // snoop responses have their own packet queue and thus schedule
2617    // their own events
2618    if (!waitingOnRetry) {
2619        schedSendEvent(cache.nextQueueReadyTime());
2620    }
2621}
2622
2623BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2624                                    BaseCache *_cache,
2625                                    const std::string &_label)
2626    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2627      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2628      _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2629{
2630}
2631
2632void
2633WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2634                           Addr blk_addr)
2635{
2636    // check if we are continuing where the last write ended
2637    if (nextAddr == write_addr) {
2638        delayCtr[blk_addr] = delayThreshold;
2639        // stop if we have already saturated
2640        if (mode != WriteMode::NO_ALLOCATE) {
2641            byteCount += write_size;
2642            // switch to streaming mode if we have passed the lower
2643            // threshold
2644            if (mode == WriteMode::ALLOCATE &&
2645                byteCount > coalesceLimit) {
2646                mode = WriteMode::COALESCE;
2647                DPRINTF(Cache, "Switched to write coalescing\n");
2648            } else if (mode == WriteMode::COALESCE &&
2649                       byteCount > noAllocateLimit) {
2650                // and continue and switch to non-allocating mode if we
2651                // pass the upper threshold
2652                mode = WriteMode::NO_ALLOCATE;
2653                DPRINTF(Cache, "Switched to write-no-allocate\n");
2654            }
2655        }
2656    } else {
2657        // we did not see a write matching the previous one, start
2658        // over again
2659        byteCount = write_size;
2660        mode = WriteMode::ALLOCATE;
2661        resetDelay(blk_addr);
2662    }
2663    nextAddr = write_addr + write_size;
2664}
2665
2666WriteAllocator*
2667WriteAllocatorParams::create()
2668{
2669    return new WriteAllocator(this);
2670}
2671