base.cc revision 13416
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheRepl.hh"
56#include "debug/CacheVerbose.hh"
57#include "mem/cache/mshr.hh"
58#include "mem/cache/prefetch/base.hh"
59#include "mem/cache/queue_entry.hh"
60#include "params/BaseCache.hh"
61#include "params/WriteAllocator.hh"
62#include "sim/core.hh"
63
64class BaseMasterPort;
65class BaseSlavePort;
66
67using namespace std;
68
69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70                                          BaseCache *_cache,
71                                          const std::string &_label)
72    : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label),
73      blocked(false), mustSendRetry(false),
74      sendRetryEvent([this]{ processSendRetry(); }, _name)
75{
76}
77
78BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
79    : MemObject(p),
80      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
81      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
82      mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
83      writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
84      tags(p->tags),
85      prefetcher(p->prefetcher),
86      writeAllocator(p->write_allocator),
87      writebackClean(p->writeback_clean),
88      tempBlockWriteback(nullptr),
89      writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
90                                    name(), false,
91                                    EventBase::Delayed_Writeback_Pri),
92      blkSize(blk_size),
93      lookupLatency(p->tag_latency),
94      dataLatency(p->data_latency),
95      forwardLatency(p->tag_latency),
96      fillLatency(p->data_latency),
97      responseLatency(p->response_latency),
98      numTarget(p->tgts_per_mshr),
99      forwardSnoops(true),
100      clusivity(p->clusivity),
101      isReadOnly(p->is_read_only),
102      blocked(0),
103      order(0),
104      noTargetMSHR(nullptr),
105      missCount(p->max_miss_count),
106      addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
107      system(p->system)
108{
109    // the MSHR queue has no reserve entries as we check the MSHR
110    // queue on every single allocation, whereas the write queue has
111    // as many reserve entries as we have MSHRs, since every MSHR may
112    // eventually require a writeback, and we do not check the write
113    // buffer before committing to an MSHR
114
115    // forward snoops is overridden in init() once we can query
116    // whether the connected master is actually snooping or not
117
118    tempBlock = new TempCacheBlk(blkSize);
119
120    tags->tagsInit(this);
121    if (prefetcher)
122        prefetcher->setCache(this);
123}
124
125BaseCache::~BaseCache()
126{
127    delete tempBlock;
128}
129
130void
131BaseCache::CacheSlavePort::setBlocked()
132{
133    assert(!blocked);
134    DPRINTF(CachePort, "Port is blocking new requests\n");
135    blocked = true;
136    // if we already scheduled a retry in this cycle, but it has not yet
137    // happened, cancel it
138    if (sendRetryEvent.scheduled()) {
139        owner.deschedule(sendRetryEvent);
140        DPRINTF(CachePort, "Port descheduled retry\n");
141        mustSendRetry = true;
142    }
143}
144
145void
146BaseCache::CacheSlavePort::clearBlocked()
147{
148    assert(blocked);
149    DPRINTF(CachePort, "Port is accepting new requests\n");
150    blocked = false;
151    if (mustSendRetry) {
152        // @TODO: need to find a better time (next cycle?)
153        owner.schedule(sendRetryEvent, curTick() + 1);
154    }
155}
156
157void
158BaseCache::CacheSlavePort::processSendRetry()
159{
160    DPRINTF(CachePort, "Port is sending retry\n");
161
162    // reset the flag and call retry
163    mustSendRetry = false;
164    sendRetryReq();
165}
166
167Addr
168BaseCache::regenerateBlkAddr(CacheBlk* blk)
169{
170    if (blk != tempBlock) {
171        return tags->regenerateBlkAddr(blk);
172    } else {
173        return tempBlock->getAddr();
174    }
175}
176
177void
178BaseCache::init()
179{
180    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
181        fatal("Cache ports on %s are not connected\n", name());
182    cpuSidePort.sendRangeChange();
183    forwardSnoops = cpuSidePort.isSnooping();
184}
185
186BaseMasterPort &
187BaseCache::getMasterPort(const std::string &if_name, PortID idx)
188{
189    if (if_name == "mem_side") {
190        return memSidePort;
191    }  else {
192        return MemObject::getMasterPort(if_name, idx);
193    }
194}
195
196BaseSlavePort &
197BaseCache::getSlavePort(const std::string &if_name, PortID idx)
198{
199    if (if_name == "cpu_side") {
200        return cpuSidePort;
201    } else {
202        return MemObject::getSlavePort(if_name, idx);
203    }
204}
205
206bool
207BaseCache::inRange(Addr addr) const
208{
209    for (const auto& r : addrRanges) {
210        if (r.contains(addr)) {
211            return true;
212       }
213    }
214    return false;
215}
216
217void
218BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
219{
220    if (pkt->needsResponse()) {
221        pkt->makeTimingResponse();
222        // @todo: Make someone pay for this
223        pkt->headerDelay = pkt->payloadDelay = 0;
224
225        // In this case we are considering request_time that takes
226        // into account the delay of the xbar, if any, and just
227        // lat, neglecting responseLatency, modelling hit latency
228        // just as lookupLatency or or the value of lat overriden
229        // by access(), that calls accessBlock() function.
230        cpuSidePort.schedTimingResp(pkt, request_time, true);
231    } else {
232        DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
233                pkt->print());
234
235        // queue the packet for deletion, as the sending cache is
236        // still relying on it; if the block is found in access(),
237        // CleanEvict and Writeback messages will be deleted
238        // here as well
239        pendingDelete.reset(pkt);
240    }
241}
242
243void
244BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
245                               Tick forward_time, Tick request_time)
246{
247    if (writeAllocator &&
248        pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
249        writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
250                                   pkt->getBlockAddr(blkSize));
251    }
252
253    if (mshr) {
254        /// MSHR hit
255        /// @note writebacks will be checked in getNextMSHR()
256        /// for any conflicting requests to the same block
257
258        //@todo remove hw_pf here
259
260        // Coalesce unless it was a software prefetch (see above).
261        if (pkt) {
262            assert(!pkt->isWriteback());
263            // CleanEvicts corresponding to blocks which have
264            // outstanding requests in MSHRs are simply sunk here
265            if (pkt->cmd == MemCmd::CleanEvict) {
266                pendingDelete.reset(pkt);
267            } else if (pkt->cmd == MemCmd::WriteClean) {
268                // A WriteClean should never coalesce with any
269                // outstanding cache maintenance requests.
270
271                // We use forward_time here because there is an
272                // uncached memory write, forwarded to WriteBuffer.
273                allocateWriteBuffer(pkt, forward_time);
274            } else {
275                DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
276                        pkt->print());
277
278                assert(pkt->req->masterId() < system->maxMasters());
279                mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
280
281                // We use forward_time here because it is the same
282                // considering new targets. We have multiple
283                // requests for the same address here. It
284                // specifies the latency to allocate an internal
285                // buffer and to schedule an event to the queued
286                // port and also takes into account the additional
287                // delay of the xbar.
288                mshr->allocateTarget(pkt, forward_time, order++,
289                                     allocOnFill(pkt->cmd));
290                if (mshr->getNumTargets() == numTarget) {
291                    noTargetMSHR = mshr;
292                    setBlocked(Blocked_NoTargets);
293                    // need to be careful with this... if this mshr isn't
294                    // ready yet (i.e. time > curTick()), we don't want to
295                    // move it ahead of mshrs that are ready
296                    // mshrQueue.moveToFront(mshr);
297                }
298            }
299        }
300    } else {
301        // no MSHR
302        assert(pkt->req->masterId() < system->maxMasters());
303        mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
304
305        if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
306            // We use forward_time here because there is an
307            // writeback or writeclean, forwarded to WriteBuffer.
308            allocateWriteBuffer(pkt, forward_time);
309        } else {
310            if (blk && blk->isValid()) {
311                // If we have a write miss to a valid block, we
312                // need to mark the block non-readable.  Otherwise
313                // if we allow reads while there's an outstanding
314                // write miss, the read could return stale data
315                // out of the cache block... a more aggressive
316                // system could detect the overlap (if any) and
317                // forward data out of the MSHRs, but we don't do
318                // that yet.  Note that we do need to leave the
319                // block valid so that it stays in the cache, in
320                // case we get an upgrade response (and hence no
321                // new data) when the write miss completes.
322                // As long as CPUs do proper store/load forwarding
323                // internally, and have a sufficiently weak memory
324                // model, this is probably unnecessary, but at some
325                // point it must have seemed like we needed it...
326                assert((pkt->needsWritable() && !blk->isWritable()) ||
327                       pkt->req->isCacheMaintenance());
328                blk->status &= ~BlkReadable;
329            }
330            // Here we are using forward_time, modelling the latency of
331            // a miss (outbound) just as forwardLatency, neglecting the
332            // lookupLatency component.
333            allocateMissBuffer(pkt, forward_time);
334        }
335    }
336}
337
338void
339BaseCache::recvTimingReq(PacketPtr pkt)
340{
341    // anything that is merely forwarded pays for the forward latency and
342    // the delay provided by the crossbar
343    Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
344
345    // We use lookupLatency here because it is used to specify the latency
346    // to access.
347    Cycles lat = lookupLatency;
348    CacheBlk *blk = nullptr;
349    bool satisfied = false;
350    {
351        PacketList writebacks;
352        // Note that lat is passed by reference here. The function
353        // access() calls accessBlock() which can modify lat value.
354        satisfied = access(pkt, blk, lat, writebacks);
355
356        // copy writebacks to write buffer here to ensure they logically
357        // precede anything happening below
358        doWritebacks(writebacks, forward_time);
359    }
360
361    // Here we charge the headerDelay that takes into account the latencies
362    // of the bus, if the packet comes from it.
363    // The latency charged it is just lat that is the value of lookupLatency
364    // modified by access() function, or if not just lookupLatency.
365    // In case of a hit we are neglecting response latency.
366    // In case of a miss we are neglecting forward latency.
367    Tick request_time = clockEdge(lat) + pkt->headerDelay;
368    // Here we reset the timing of the packet.
369    pkt->headerDelay = pkt->payloadDelay = 0;
370
371    if (satisfied) {
372        // notify before anything else as later handleTimingReqHit might turn
373        // the packet in a response
374        ppHit->notify(pkt);
375
376        if (prefetcher && blk && blk->wasPrefetched()) {
377            blk->status &= ~BlkHWPrefetched;
378        }
379
380        handleTimingReqHit(pkt, blk, request_time);
381    } else {
382        handleTimingReqMiss(pkt, blk, forward_time, request_time);
383
384        ppMiss->notify(pkt);
385    }
386
387    if (prefetcher) {
388        // track time of availability of next prefetch, if any
389        Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
390        if (next_pf_time != MaxTick) {
391            schedMemSideSendEvent(next_pf_time);
392        }
393    }
394}
395
396void
397BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
398{
399    Tick completion_time = clockEdge(responseLatency) +
400        pkt->headerDelay + pkt->payloadDelay;
401
402    // Reset the bus additional time as it is now accounted for
403    pkt->headerDelay = pkt->payloadDelay = 0;
404
405    cpuSidePort.schedTimingResp(pkt, completion_time, true);
406}
407
408void
409BaseCache::recvTimingResp(PacketPtr pkt)
410{
411    assert(pkt->isResponse());
412
413    // all header delay should be paid for by the crossbar, unless
414    // this is a prefetch response from above
415    panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
416             "%s saw a non-zero packet delay\n", name());
417
418    const bool is_error = pkt->isError();
419
420    if (is_error) {
421        DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
422                pkt->print());
423    }
424
425    DPRINTF(Cache, "%s: Handling response %s\n", __func__,
426            pkt->print());
427
428    // if this is a write, we should be looking at an uncacheable
429    // write
430    if (pkt->isWrite()) {
431        assert(pkt->req->isUncacheable());
432        handleUncacheableWriteResp(pkt);
433        return;
434    }
435
436    // we have dealt with any (uncacheable) writes above, from here on
437    // we know we are dealing with an MSHR due to a miss or a prefetch
438    MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
439    assert(mshr);
440
441    if (mshr == noTargetMSHR) {
442        // we always clear at least one target
443        clearBlocked(Blocked_NoTargets);
444        noTargetMSHR = nullptr;
445    }
446
447    // Initial target is used just for stats
448    MSHR::Target *initial_tgt = mshr->getTarget();
449    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
450    Tick miss_latency = curTick() - initial_tgt->recvTime;
451
452    if (pkt->req->isUncacheable()) {
453        assert(pkt->req->masterId() < system->maxMasters());
454        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
455            miss_latency;
456    } else {
457        assert(pkt->req->masterId() < system->maxMasters());
458        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
459            miss_latency;
460    }
461
462    PacketList writebacks;
463
464    bool is_fill = !mshr->isForward &&
465        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
466         mshr->wasWholeLineWrite);
467
468    // make sure that if the mshr was due to a whole line write then
469    // the response is an invalidation
470    assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
471
472    CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
473
474    if (is_fill && !is_error) {
475        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
476                pkt->getAddr());
477
478        const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
479            writeAllocator->allocate() : mshr->allocOnFill();
480        blk = handleFill(pkt, blk, writebacks, allocate);
481        assert(blk != nullptr);
482    }
483
484    if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
485        // The block was marked not readable while there was a pending
486        // cache maintenance operation, restore its flag.
487        blk->status |= BlkReadable;
488
489        // This was a cache clean operation (without invalidate)
490        // and we have a copy of the block already. Since there
491        // is no invalidation, we can promote targets that don't
492        // require a writable copy
493        mshr->promoteReadable();
494    }
495
496    if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
497        // If at this point the referenced block is writable and the
498        // response is not a cache invalidate, we promote targets that
499        // were deferred as we couldn't guarrantee a writable copy
500        mshr->promoteWritable();
501    }
502
503    serviceMSHRTargets(mshr, pkt, blk, writebacks);
504
505    if (mshr->promoteDeferredTargets()) {
506        // avoid later read getting stale data while write miss is
507        // outstanding.. see comment in timingAccess()
508        if (blk) {
509            blk->status &= ~BlkReadable;
510        }
511        mshrQueue.markPending(mshr);
512        schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
513    } else {
514        // while we deallocate an mshr from the queue we still have to
515        // check the isFull condition before and after as we might
516        // have been using the reserved entries already
517        const bool was_full = mshrQueue.isFull();
518        mshrQueue.deallocate(mshr);
519        if (was_full && !mshrQueue.isFull()) {
520            clearBlocked(Blocked_NoMSHRs);
521        }
522
523        // Request the bus for a prefetch if this deallocation freed enough
524        // MSHRs for a prefetch to take place
525        if (prefetcher && mshrQueue.canPrefetch()) {
526            Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
527                                         clockEdge());
528            if (next_pf_time != MaxTick)
529                schedMemSideSendEvent(next_pf_time);
530        }
531    }
532
533    // if we used temp block, check to see if its valid and then clear it out
534    if (blk == tempBlock && tempBlock->isValid()) {
535        evictBlock(blk, writebacks);
536    }
537
538    const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
539    // copy writebacks to write buffer
540    doWritebacks(writebacks, forward_time);
541
542    DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
543    delete pkt;
544}
545
546
547Tick
548BaseCache::recvAtomic(PacketPtr pkt)
549{
550    // should assert here that there are no outstanding MSHRs or
551    // writebacks... that would mean that someone used an atomic
552    // access in timing mode
553
554    // We use lookupLatency here because it is used to specify the latency
555    // to access.
556    Cycles lat = lookupLatency;
557
558    CacheBlk *blk = nullptr;
559    PacketList writebacks;
560    bool satisfied = access(pkt, blk, lat, writebacks);
561
562    if (pkt->isClean() && blk && blk->isDirty()) {
563        // A cache clean opearation is looking for a dirty
564        // block. If a dirty block is encountered a WriteClean
565        // will update any copies to the path to the memory
566        // until the point of reference.
567        DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
568                __func__, pkt->print(), blk->print());
569        PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
570        writebacks.push_back(wb_pkt);
571        pkt->setSatisfied();
572    }
573
574    // handle writebacks resulting from the access here to ensure they
575    // logically precede anything happening below
576    doWritebacksAtomic(writebacks);
577    assert(writebacks.empty());
578
579    if (!satisfied) {
580        lat += handleAtomicReqMiss(pkt, blk, writebacks);
581    }
582
583    // Note that we don't invoke the prefetcher at all in atomic mode.
584    // It's not clear how to do it properly, particularly for
585    // prefetchers that aggressively generate prefetch candidates and
586    // rely on bandwidth contention to throttle them; these will tend
587    // to pollute the cache in atomic mode since there is no bandwidth
588    // contention.  If we ever do want to enable prefetching in atomic
589    // mode, though, this is the place to do it... see timingAccess()
590    // for an example (though we'd want to issue the prefetch(es)
591    // immediately rather than calling requestMemSideBus() as we do
592    // there).
593
594    // do any writebacks resulting from the response handling
595    doWritebacksAtomic(writebacks);
596
597    // if we used temp block, check to see if its valid and if so
598    // clear it out, but only do so after the call to recvAtomic is
599    // finished so that any downstream observers (such as a snoop
600    // filter), first see the fill, and only then see the eviction
601    if (blk == tempBlock && tempBlock->isValid()) {
602        // the atomic CPU calls recvAtomic for fetch and load/store
603        // sequentuially, and we may already have a tempBlock
604        // writeback from the fetch that we have not yet sent
605        if (tempBlockWriteback) {
606            // if that is the case, write the prevoius one back, and
607            // do not schedule any new event
608            writebackTempBlockAtomic();
609        } else {
610            // the writeback/clean eviction happens after the call to
611            // recvAtomic has finished (but before any successive
612            // calls), so that the response handling from the fill is
613            // allowed to happen first
614            schedule(writebackTempBlockAtomicEvent, curTick());
615        }
616
617        tempBlockWriteback = evictBlock(blk);
618    }
619
620    if (pkt->needsResponse()) {
621        pkt->makeAtomicResponse();
622    }
623
624    return lat * clockPeriod();
625}
626
627void
628BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
629{
630    Addr blk_addr = pkt->getBlockAddr(blkSize);
631    bool is_secure = pkt->isSecure();
632    CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
633    MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
634
635    pkt->pushLabel(name());
636
637    CacheBlkPrintWrapper cbpw(blk);
638
639    // Note that just because an L2/L3 has valid data doesn't mean an
640    // L1 doesn't have a more up-to-date modified copy that still
641    // needs to be found.  As a result we always update the request if
642    // we have it, but only declare it satisfied if we are the owner.
643
644    // see if we have data at all (owned or otherwise)
645    bool have_data = blk && blk->isValid()
646        && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
647                                     blk->data);
648
649    // data we have is dirty if marked as such or if we have an
650    // in-service MSHR that is pending a modified line
651    bool have_dirty =
652        have_data && (blk->isDirty() ||
653                      (mshr && mshr->inService && mshr->isPendingModified()));
654
655    bool done = have_dirty ||
656        cpuSidePort.trySatisfyFunctional(pkt) ||
657        mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
658        writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
659        memSidePort.trySatisfyFunctional(pkt);
660
661    DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__,  pkt->print(),
662            (blk && blk->isValid()) ? "valid " : "",
663            have_data ? "data " : "", done ? "done " : "");
664
665    // We're leaving the cache, so pop cache->name() label
666    pkt->popLabel();
667
668    if (done) {
669        pkt->makeResponse();
670    } else {
671        // if it came as a request from the CPU side then make sure it
672        // continues towards the memory side
673        if (from_cpu_side) {
674            memSidePort.sendFunctional(pkt);
675        } else if (cpuSidePort.isSnooping()) {
676            // if it came from the memory side, it must be a snoop request
677            // and we should only forward it if we are forwarding snoops
678            cpuSidePort.sendFunctionalSnoop(pkt);
679        }
680    }
681}
682
683
684void
685BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
686{
687    assert(pkt->isRequest());
688
689    uint64_t overwrite_val;
690    bool overwrite_mem;
691    uint64_t condition_val64;
692    uint32_t condition_val32;
693
694    int offset = pkt->getOffset(blkSize);
695    uint8_t *blk_data = blk->data + offset;
696
697    assert(sizeof(uint64_t) >= pkt->getSize());
698
699    overwrite_mem = true;
700    // keep a copy of our possible write value, and copy what is at the
701    // memory address into the packet
702    pkt->writeData((uint8_t *)&overwrite_val);
703    pkt->setData(blk_data);
704
705    if (pkt->req->isCondSwap()) {
706        if (pkt->getSize() == sizeof(uint64_t)) {
707            condition_val64 = pkt->req->getExtraData();
708            overwrite_mem = !std::memcmp(&condition_val64, blk_data,
709                                         sizeof(uint64_t));
710        } else if (pkt->getSize() == sizeof(uint32_t)) {
711            condition_val32 = (uint32_t)pkt->req->getExtraData();
712            overwrite_mem = !std::memcmp(&condition_val32, blk_data,
713                                         sizeof(uint32_t));
714        } else
715            panic("Invalid size for conditional read/write\n");
716    }
717
718    if (overwrite_mem) {
719        std::memcpy(blk_data, &overwrite_val, pkt->getSize());
720        blk->status |= BlkDirty;
721    }
722}
723
724QueueEntry*
725BaseCache::getNextQueueEntry()
726{
727    // Check both MSHR queue and write buffer for potential requests,
728    // note that null does not mean there is no request, it could
729    // simply be that it is not ready
730    MSHR *miss_mshr  = mshrQueue.getNext();
731    WriteQueueEntry *wq_entry = writeBuffer.getNext();
732
733    // If we got a write buffer request ready, first priority is a
734    // full write buffer, otherwise we favour the miss requests
735    if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
736        // need to search MSHR queue for conflicting earlier miss.
737        MSHR *conflict_mshr =
738            mshrQueue.findPending(wq_entry->blkAddr,
739                                  wq_entry->isSecure);
740
741        if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
742            // Service misses in order until conflict is cleared.
743            return conflict_mshr;
744
745            // @todo Note that we ignore the ready time of the conflict here
746        }
747
748        // No conflicts; issue write
749        return wq_entry;
750    } else if (miss_mshr) {
751        // need to check for conflicting earlier writeback
752        WriteQueueEntry *conflict_mshr =
753            writeBuffer.findPending(miss_mshr->blkAddr,
754                                    miss_mshr->isSecure);
755        if (conflict_mshr) {
756            // not sure why we don't check order here... it was in the
757            // original code but commented out.
758
759            // The only way this happens is if we are
760            // doing a write and we didn't have permissions
761            // then subsequently saw a writeback (owned got evicted)
762            // We need to make sure to perform the writeback first
763            // To preserve the dirty data, then we can issue the write
764
765            // should we return wq_entry here instead?  I.e. do we
766            // have to flush writes in order?  I don't think so... not
767            // for Alpha anyway.  Maybe for x86?
768            return conflict_mshr;
769
770            // @todo Note that we ignore the ready time of the conflict here
771        }
772
773        // No conflicts; issue read
774        return miss_mshr;
775    }
776
777    // fall through... no pending requests.  Try a prefetch.
778    assert(!miss_mshr && !wq_entry);
779    if (prefetcher && mshrQueue.canPrefetch()) {
780        // If we have a miss queue slot, we can try a prefetch
781        PacketPtr pkt = prefetcher->getPacket();
782        if (pkt) {
783            Addr pf_addr = pkt->getBlockAddr(blkSize);
784            if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
785                !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
786                !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
787                // Update statistic on number of prefetches issued
788                // (hwpf_mshr_misses)
789                assert(pkt->req->masterId() < system->maxMasters());
790                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
791
792                // allocate an MSHR and return it, note
793                // that we send the packet straight away, so do not
794                // schedule the send
795                return allocateMissBuffer(pkt, curTick(), false);
796            } else {
797                // free the request and packet
798                delete pkt;
799            }
800        }
801    }
802
803    return nullptr;
804}
805
806void
807BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
808{
809    assert(pkt->isRequest());
810
811    assert(blk && blk->isValid());
812    // Occasionally this is not true... if we are a lower-level cache
813    // satisfying a string of Read and ReadEx requests from
814    // upper-level caches, a Read will mark the block as shared but we
815    // can satisfy a following ReadEx anyway since we can rely on the
816    // Read requester(s) to have buffered the ReadEx snoop and to
817    // invalidate their blocks after receiving them.
818    // assert(!pkt->needsWritable() || blk->isWritable());
819    assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
820
821    // Check RMW operations first since both isRead() and
822    // isWrite() will be true for them
823    if (pkt->cmd == MemCmd::SwapReq) {
824        if (pkt->isAtomicOp()) {
825            // extract data from cache and save it into the data field in
826            // the packet as a return value from this atomic op
827            int offset = tags->extractBlkOffset(pkt->getAddr());
828            uint8_t *blk_data = blk->data + offset;
829            pkt->setData(blk_data);
830
831            // execute AMO operation
832            (*(pkt->getAtomicOp()))(blk_data);
833
834            // set block status to dirty
835            blk->status |= BlkDirty;
836        } else {
837            cmpAndSwap(blk, pkt);
838        }
839    } else if (pkt->isWrite()) {
840        // we have the block in a writable state and can go ahead,
841        // note that the line may be also be considered writable in
842        // downstream caches along the path to memory, but always
843        // Exclusive, and never Modified
844        assert(blk->isWritable());
845        // Write or WriteLine at the first cache with block in writable state
846        if (blk->checkWrite(pkt)) {
847            pkt->writeDataToBlock(blk->data, blkSize);
848        }
849        // Always mark the line as dirty (and thus transition to the
850        // Modified state) even if we are a failed StoreCond so we
851        // supply data to any snoops that have appended themselves to
852        // this cache before knowing the store will fail.
853        blk->status |= BlkDirty;
854        DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
855    } else if (pkt->isRead()) {
856        if (pkt->isLLSC()) {
857            blk->trackLoadLocked(pkt);
858        }
859
860        // all read responses have a data payload
861        assert(pkt->hasRespData());
862        pkt->setDataFromBlock(blk->data, blkSize);
863    } else if (pkt->isUpgrade()) {
864        // sanity check
865        assert(!pkt->hasSharers());
866
867        if (blk->isDirty()) {
868            // we were in the Owned state, and a cache above us that
869            // has the line in Shared state needs to be made aware
870            // that the data it already has is in fact dirty
871            pkt->setCacheResponding();
872            blk->status &= ~BlkDirty;
873        }
874    } else if (pkt->isClean()) {
875        blk->status &= ~BlkDirty;
876    } else {
877        assert(pkt->isInvalidate());
878        invalidateBlock(blk);
879        DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
880                pkt->print());
881    }
882}
883
884/////////////////////////////////////////////////////
885//
886// Access path: requests coming in from the CPU side
887//
888/////////////////////////////////////////////////////
889
890bool
891BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
892                  PacketList &writebacks)
893{
894    // sanity check
895    assert(pkt->isRequest());
896
897    chatty_assert(!(isReadOnly && pkt->isWrite()),
898                  "Should never see a write in a read-only cache %s\n",
899                  name());
900
901    // Here lat is the value passed as parameter to accessBlock() function
902    // that can modify its value.
903    blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
904
905    DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
906            blk ? "hit " + blk->print() : "miss");
907
908    if (pkt->req->isCacheMaintenance()) {
909        // A cache maintenance operation is always forwarded to the
910        // memory below even if the block is found in dirty state.
911
912        // We defer any changes to the state of the block until we
913        // create and mark as in service the mshr for the downstream
914        // packet.
915        return false;
916    }
917
918    if (pkt->isEviction()) {
919        // We check for presence of block in above caches before issuing
920        // Writeback or CleanEvict to write buffer. Therefore the only
921        // possible cases can be of a CleanEvict packet coming from above
922        // encountering a Writeback generated in this cache peer cache and
923        // waiting in the write buffer. Cases of upper level peer caches
924        // generating CleanEvict and Writeback or simply CleanEvict and
925        // CleanEvict almost simultaneously will be caught by snoops sent out
926        // by crossbar.
927        WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
928                                                          pkt->isSecure());
929        if (wb_entry) {
930            assert(wb_entry->getNumTargets() == 1);
931            PacketPtr wbPkt = wb_entry->getTarget()->pkt;
932            assert(wbPkt->isWriteback());
933
934            if (pkt->isCleanEviction()) {
935                // The CleanEvict and WritebackClean snoops into other
936                // peer caches of the same level while traversing the
937                // crossbar. If a copy of the block is found, the
938                // packet is deleted in the crossbar. Hence, none of
939                // the other upper level caches connected to this
940                // cache have the block, so we can clear the
941                // BLOCK_CACHED flag in the Writeback if set and
942                // discard the CleanEvict by returning true.
943                wbPkt->clearBlockCached();
944                return true;
945            } else {
946                assert(pkt->cmd == MemCmd::WritebackDirty);
947                // Dirty writeback from above trumps our clean
948                // writeback... discard here
949                // Note: markInService will remove entry from writeback buffer.
950                markInService(wb_entry);
951                delete wbPkt;
952            }
953        }
954    }
955
956    // Writeback handling is special case.  We can write the block into
957    // the cache without having a writeable copy (or any copy at all).
958    if (pkt->isWriteback()) {
959        assert(blkSize == pkt->getSize());
960
961        // we could get a clean writeback while we are having
962        // outstanding accesses to a block, do the simple thing for
963        // now and drop the clean writeback so that we do not upset
964        // any ordering/decisions about ownership already taken
965        if (pkt->cmd == MemCmd::WritebackClean &&
966            mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
967            DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
968                    "dropping\n", pkt->getAddr());
969            return true;
970        }
971
972        if (!blk) {
973            // need to do a replacement
974            blk = allocateBlock(pkt, writebacks);
975            if (!blk) {
976                // no replaceable block available: give up, fwd to next level.
977                incMissCount(pkt);
978                return false;
979            }
980
981            blk->status |= (BlkValid | BlkReadable);
982        }
983        // only mark the block dirty if we got a writeback command,
984        // and leave it as is for a clean writeback
985        if (pkt->cmd == MemCmd::WritebackDirty) {
986            // TODO: the coherent cache can assert(!blk->isDirty());
987            blk->status |= BlkDirty;
988        }
989        // if the packet does not have sharers, it is passing
990        // writable, and we got the writeback in Modified or Exclusive
991        // state, if not we are in the Owned or Shared state
992        if (!pkt->hasSharers()) {
993            blk->status |= BlkWritable;
994        }
995        // nothing else to do; writeback doesn't expect response
996        assert(!pkt->needsResponse());
997        pkt->writeDataToBlock(blk->data, blkSize);
998        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
999        incHitCount(pkt);
1000        // populate the time when the block will be ready to access.
1001        blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
1002            pkt->payloadDelay;
1003        return true;
1004    } else if (pkt->cmd == MemCmd::CleanEvict) {
1005        if (blk) {
1006            // Found the block in the tags, need to stop CleanEvict from
1007            // propagating further down the hierarchy. Returning true will
1008            // treat the CleanEvict like a satisfied write request and delete
1009            // it.
1010            return true;
1011        }
1012        // We didn't find the block here, propagate the CleanEvict further
1013        // down the memory hierarchy. Returning false will treat the CleanEvict
1014        // like a Writeback which could not find a replaceable block so has to
1015        // go to next level.
1016        return false;
1017    } else if (pkt->cmd == MemCmd::WriteClean) {
1018        // WriteClean handling is a special case. We can allocate a
1019        // block directly if it doesn't exist and we can update the
1020        // block immediately. The WriteClean transfers the ownership
1021        // of the block as well.
1022        assert(blkSize == pkt->getSize());
1023
1024        if (!blk) {
1025            if (pkt->writeThrough()) {
1026                // if this is a write through packet, we don't try to
1027                // allocate if the block is not present
1028                return false;
1029            } else {
1030                // a writeback that misses needs to allocate a new block
1031                blk = allocateBlock(pkt, writebacks);
1032                if (!blk) {
1033                    // no replaceable block available: give up, fwd to
1034                    // next level.
1035                    incMissCount(pkt);
1036                    return false;
1037                }
1038
1039                blk->status |= (BlkValid | BlkReadable);
1040            }
1041        }
1042
1043        // at this point either this is a writeback or a write-through
1044        // write clean operation and the block is already in this
1045        // cache, we need to update the data and the block flags
1046        assert(blk);
1047        // TODO: the coherent cache can assert(!blk->isDirty());
1048        if (!pkt->writeThrough()) {
1049            blk->status |= BlkDirty;
1050        }
1051        // nothing else to do; writeback doesn't expect response
1052        assert(!pkt->needsResponse());
1053        pkt->writeDataToBlock(blk->data, blkSize);
1054        DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1055
1056        incHitCount(pkt);
1057        // populate the time when the block will be ready to access.
1058        blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
1059            pkt->payloadDelay;
1060        // if this a write-through packet it will be sent to cache
1061        // below
1062        return !pkt->writeThrough();
1063    } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1064                       blk->isReadable())) {
1065        // OK to satisfy access
1066        incHitCount(pkt);
1067        satisfyRequest(pkt, blk);
1068        maintainClusivity(pkt->fromCache(), blk);
1069
1070        return true;
1071    }
1072
1073    // Can't satisfy access normally... either no block (blk == nullptr)
1074    // or have block but need writable
1075
1076    incMissCount(pkt);
1077
1078    if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1079        // complete miss on store conditional... just give up now
1080        pkt->req->setExtraData(0);
1081        return true;
1082    }
1083
1084    return false;
1085}
1086
1087void
1088BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1089{
1090    if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1091        clusivity == Enums::mostly_excl) {
1092        // if we have responded to a cache, and our block is still
1093        // valid, but not dirty, and this cache is mostly exclusive
1094        // with respect to the cache above, drop the block
1095        invalidateBlock(blk);
1096    }
1097}
1098
1099CacheBlk*
1100BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1101                      bool allocate)
1102{
1103    assert(pkt->isResponse());
1104    Addr addr = pkt->getAddr();
1105    bool is_secure = pkt->isSecure();
1106#if TRACING_ON
1107    CacheBlk::State old_state = blk ? blk->status : 0;
1108#endif
1109
1110    // When handling a fill, we should have no writes to this line.
1111    assert(addr == pkt->getBlockAddr(blkSize));
1112    assert(!writeBuffer.findMatch(addr, is_secure));
1113
1114    if (!blk) {
1115        // better have read new data...
1116        assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1117
1118        // need to do a replacement if allocating, otherwise we stick
1119        // with the temporary storage
1120        blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1121
1122        if (!blk) {
1123            // No replaceable block or a mostly exclusive
1124            // cache... just use temporary storage to complete the
1125            // current request and then get rid of it
1126            assert(!tempBlock->isValid());
1127            blk = tempBlock;
1128            tempBlock->insert(addr, is_secure);
1129            DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1130                    is_secure ? "s" : "ns");
1131        }
1132
1133        // we should never be overwriting a valid block
1134        assert(!blk->isValid());
1135    } else {
1136        // existing block... probably an upgrade
1137        assert(regenerateBlkAddr(blk) == addr);
1138        assert(blk->isSecure() == is_secure);
1139        // either we're getting new data or the block should already be valid
1140        assert(pkt->hasData() || blk->isValid());
1141        // don't clear block status... if block is already dirty we
1142        // don't want to lose that
1143    }
1144
1145    blk->status |= BlkValid | BlkReadable;
1146
1147    // sanity check for whole-line writes, which should always be
1148    // marked as writable as part of the fill, and then later marked
1149    // dirty as part of satisfyRequest
1150    if (pkt->cmd == MemCmd::InvalidateResp) {
1151        assert(!pkt->hasSharers());
1152    }
1153
1154    // here we deal with setting the appropriate state of the line,
1155    // and we start by looking at the hasSharers flag, and ignore the
1156    // cacheResponding flag (normally signalling dirty data) if the
1157    // packet has sharers, thus the line is never allocated as Owned
1158    // (dirty but not writable), and always ends up being either
1159    // Shared, Exclusive or Modified, see Packet::setCacheResponding
1160    // for more details
1161    if (!pkt->hasSharers()) {
1162        // we could get a writable line from memory (rather than a
1163        // cache) even in a read-only cache, note that we set this bit
1164        // even for a read-only cache, possibly revisit this decision
1165        blk->status |= BlkWritable;
1166
1167        // check if we got this via cache-to-cache transfer (i.e., from a
1168        // cache that had the block in Modified or Owned state)
1169        if (pkt->cacheResponding()) {
1170            // we got the block in Modified state, and invalidated the
1171            // owners copy
1172            blk->status |= BlkDirty;
1173
1174            chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1175                          "in read-only cache %s\n", name());
1176        }
1177    }
1178
1179    DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1180            addr, is_secure ? "s" : "ns", old_state, blk->print());
1181
1182    // if we got new data, copy it in (checking for a read response
1183    // and a response that has data is the same in the end)
1184    if (pkt->isRead()) {
1185        // sanity checks
1186        assert(pkt->hasData());
1187        assert(pkt->getSize() == blkSize);
1188
1189        pkt->writeDataToBlock(blk->data, blkSize);
1190    }
1191    // We pay for fillLatency here.
1192    blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1193        pkt->payloadDelay;
1194
1195    return blk;
1196}
1197
1198CacheBlk*
1199BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1200{
1201    // Get address
1202    const Addr addr = pkt->getAddr();
1203
1204    // Get secure bit
1205    const bool is_secure = pkt->isSecure();
1206
1207    // Find replacement victim
1208    std::vector<CacheBlk*> evict_blks;
1209    CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1210
1211    // It is valid to return nullptr if there is no victim
1212    if (!victim)
1213        return nullptr;
1214
1215    // Print victim block's information
1216    DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1217
1218    // Check for transient state allocations. If any of the entries listed
1219    // for eviction has a transient state, the allocation fails
1220    for (const auto& blk : evict_blks) {
1221        if (blk->isValid()) {
1222            Addr repl_addr = regenerateBlkAddr(blk);
1223            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1224            if (repl_mshr) {
1225                // must be an outstanding upgrade or clean request
1226                // on a block we're about to replace...
1227                assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1228                       repl_mshr->isCleaning());
1229
1230                // too hard to replace block with transient state
1231                // allocation failed, block not inserted
1232                return nullptr;
1233            }
1234        }
1235    }
1236
1237    // The victim will be replaced by a new entry, so increase the replacement
1238    // counter if a valid block is being replaced
1239    if (victim->isValid()) {
1240        DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1241                "(%s): %s\n", regenerateBlkAddr(victim),
1242                victim->isSecure() ? "s" : "ns",
1243                addr, is_secure ? "s" : "ns",
1244                victim->isDirty() ? "writeback" : "clean");
1245
1246        replacements++;
1247    }
1248
1249    // Evict valid blocks associated to this victim block
1250    for (const auto& blk : evict_blks) {
1251        if (blk->isValid()) {
1252            if (blk->wasPrefetched()) {
1253                unusedPrefetches++;
1254            }
1255
1256            evictBlock(blk, writebacks);
1257        }
1258    }
1259
1260    // Insert new block at victimized entry
1261    tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1262                      pkt->req->taskId(), victim);
1263
1264    return victim;
1265}
1266
1267void
1268BaseCache::invalidateBlock(CacheBlk *blk)
1269{
1270    // If handling a block present in the Tags, let it do its invalidation
1271    // process, which will update stats and invalidate the block itself
1272    if (blk != tempBlock) {
1273        tags->invalidate(blk);
1274    } else {
1275        tempBlock->invalidate();
1276    }
1277}
1278
1279void
1280BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1281{
1282    PacketPtr pkt = evictBlock(blk);
1283    if (pkt) {
1284        writebacks.push_back(pkt);
1285    }
1286}
1287
1288PacketPtr
1289BaseCache::writebackBlk(CacheBlk *blk)
1290{
1291    chatty_assert(!isReadOnly || writebackClean,
1292                  "Writeback from read-only cache");
1293    assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1294
1295    writebacks[Request::wbMasterId]++;
1296
1297    RequestPtr req = std::make_shared<Request>(
1298        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1299
1300    if (blk->isSecure())
1301        req->setFlags(Request::SECURE);
1302
1303    req->taskId(blk->task_id);
1304
1305    PacketPtr pkt =
1306        new Packet(req, blk->isDirty() ?
1307                   MemCmd::WritebackDirty : MemCmd::WritebackClean);
1308
1309    DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1310            pkt->print(), blk->isWritable(), blk->isDirty());
1311
1312    if (blk->isWritable()) {
1313        // not asserting shared means we pass the block in modified
1314        // state, mark our own block non-writeable
1315        blk->status &= ~BlkWritable;
1316    } else {
1317        // we are in the Owned state, tell the receiver
1318        pkt->setHasSharers();
1319    }
1320
1321    // make sure the block is not marked dirty
1322    blk->status &= ~BlkDirty;
1323
1324    pkt->allocate();
1325    pkt->setDataFromBlock(blk->data, blkSize);
1326
1327    return pkt;
1328}
1329
1330PacketPtr
1331BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1332{
1333    RequestPtr req = std::make_shared<Request>(
1334        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1335
1336    if (blk->isSecure()) {
1337        req->setFlags(Request::SECURE);
1338    }
1339    req->taskId(blk->task_id);
1340
1341    PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1342
1343    if (dest) {
1344        req->setFlags(dest);
1345        pkt->setWriteThrough();
1346    }
1347
1348    DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1349            blk->isWritable(), blk->isDirty());
1350
1351    if (blk->isWritable()) {
1352        // not asserting shared means we pass the block in modified
1353        // state, mark our own block non-writeable
1354        blk->status &= ~BlkWritable;
1355    } else {
1356        // we are in the Owned state, tell the receiver
1357        pkt->setHasSharers();
1358    }
1359
1360    // make sure the block is not marked dirty
1361    blk->status &= ~BlkDirty;
1362
1363    pkt->allocate();
1364    pkt->setDataFromBlock(blk->data, blkSize);
1365
1366    return pkt;
1367}
1368
1369
1370void
1371BaseCache::memWriteback()
1372{
1373    tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1374}
1375
1376void
1377BaseCache::memInvalidate()
1378{
1379    tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1380}
1381
1382bool
1383BaseCache::isDirty() const
1384{
1385    return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1386}
1387
1388bool
1389BaseCache::coalesce() const
1390{
1391    return writeAllocator && writeAllocator->coalesce();
1392}
1393
1394void
1395BaseCache::writebackVisitor(CacheBlk &blk)
1396{
1397    if (blk.isDirty()) {
1398        assert(blk.isValid());
1399
1400        RequestPtr request = std::make_shared<Request>(
1401            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1402
1403        request->taskId(blk.task_id);
1404        if (blk.isSecure()) {
1405            request->setFlags(Request::SECURE);
1406        }
1407
1408        Packet packet(request, MemCmd::WriteReq);
1409        packet.dataStatic(blk.data);
1410
1411        memSidePort.sendFunctional(&packet);
1412
1413        blk.status &= ~BlkDirty;
1414    }
1415}
1416
1417void
1418BaseCache::invalidateVisitor(CacheBlk &blk)
1419{
1420    if (blk.isDirty())
1421        warn_once("Invalidating dirty cache lines. " \
1422                  "Expect things to break.\n");
1423
1424    if (blk.isValid()) {
1425        assert(!blk.isDirty());
1426        invalidateBlock(&blk);
1427    }
1428}
1429
1430Tick
1431BaseCache::nextQueueReadyTime() const
1432{
1433    Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1434                              writeBuffer.nextReadyTime());
1435
1436    // Don't signal prefetch ready time if no MSHRs available
1437    // Will signal once enoguh MSHRs are deallocated
1438    if (prefetcher && mshrQueue.canPrefetch()) {
1439        nextReady = std::min(nextReady,
1440                             prefetcher->nextPrefetchReadyTime());
1441    }
1442
1443    return nextReady;
1444}
1445
1446
1447bool
1448BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1449{
1450    assert(mshr);
1451
1452    // use request from 1st target
1453    PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1454
1455    DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1456
1457    // if the cache is in write coalescing mode or (additionally) in
1458    // no allocation mode, and we have a write packet with an MSHR
1459    // that is not a whole-line write (due to incompatible flags etc),
1460    // then reset the write mode
1461    if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1462        if (!mshr->isWholeLineWrite()) {
1463            // if we are currently write coalescing, hold on the
1464            // MSHR as many cycles extra as we need to completely
1465            // write a cache line
1466            if (writeAllocator->delay(mshr->blkAddr)) {
1467                Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1468                DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1469                        "for write coalescing\n", tgt_pkt->print(), delay);
1470                mshrQueue.delay(mshr, delay);
1471                return false;
1472            } else {
1473                writeAllocator->reset();
1474            }
1475        } else {
1476            writeAllocator->resetDelay(mshr->blkAddr);
1477        }
1478    }
1479
1480    CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1481
1482    // either a prefetch that is not present upstream, or a normal
1483    // MSHR request, proceed to get the packet to send downstream
1484    PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1485                                     mshr->isWholeLineWrite());
1486
1487    mshr->isForward = (pkt == nullptr);
1488
1489    if (mshr->isForward) {
1490        // not a cache block request, but a response is expected
1491        // make copy of current packet to forward, keep current
1492        // copy for response handling
1493        pkt = new Packet(tgt_pkt, false, true);
1494        assert(!pkt->isWrite());
1495    }
1496
1497    // play it safe and append (rather than set) the sender state,
1498    // as forwarded packets may already have existing state
1499    pkt->pushSenderState(mshr);
1500
1501    if (pkt->isClean() && blk && blk->isDirty()) {
1502        // A cache clean opearation is looking for a dirty block. Mark
1503        // the packet so that the destination xbar can determine that
1504        // there will be a follow-up write packet as well.
1505        pkt->setSatisfied();
1506    }
1507
1508    if (!memSidePort.sendTimingReq(pkt)) {
1509        // we are awaiting a retry, but we
1510        // delete the packet and will be creating a new packet
1511        // when we get the opportunity
1512        delete pkt;
1513
1514        // note that we have now masked any requestBus and
1515        // schedSendEvent (we will wait for a retry before
1516        // doing anything), and this is so even if we do not
1517        // care about this packet and might override it before
1518        // it gets retried
1519        return true;
1520    } else {
1521        // As part of the call to sendTimingReq the packet is
1522        // forwarded to all neighbouring caches (and any caches
1523        // above them) as a snoop. Thus at this point we know if
1524        // any of the neighbouring caches are responding, and if
1525        // so, we know it is dirty, and we can determine if it is
1526        // being passed as Modified, making our MSHR the ordering
1527        // point
1528        bool pending_modified_resp = !pkt->hasSharers() &&
1529            pkt->cacheResponding();
1530        markInService(mshr, pending_modified_resp);
1531
1532        if (pkt->isClean() && blk && blk->isDirty()) {
1533            // A cache clean opearation is looking for a dirty
1534            // block. If a dirty block is encountered a WriteClean
1535            // will update any copies to the path to the memory
1536            // until the point of reference.
1537            DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1538                    __func__, pkt->print(), blk->print());
1539            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1540                                             pkt->id);
1541            PacketList writebacks;
1542            writebacks.push_back(wb_pkt);
1543            doWritebacks(writebacks, 0);
1544        }
1545
1546        return false;
1547    }
1548}
1549
1550bool
1551BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1552{
1553    assert(wq_entry);
1554
1555    // always a single target for write queue entries
1556    PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1557
1558    DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1559
1560    // forward as is, both for evictions and uncacheable writes
1561    if (!memSidePort.sendTimingReq(tgt_pkt)) {
1562        // note that we have now masked any requestBus and
1563        // schedSendEvent (we will wait for a retry before
1564        // doing anything), and this is so even if we do not
1565        // care about this packet and might override it before
1566        // it gets retried
1567        return true;
1568    } else {
1569        markInService(wq_entry);
1570        return false;
1571    }
1572}
1573
1574void
1575BaseCache::serialize(CheckpointOut &cp) const
1576{
1577    bool dirty(isDirty());
1578
1579    if (dirty) {
1580        warn("*** The cache still contains dirty data. ***\n");
1581        warn("    Make sure to drain the system using the correct flags.\n");
1582        warn("    This checkpoint will not restore correctly " \
1583             "and dirty data in the cache will be lost!\n");
1584    }
1585
1586    // Since we don't checkpoint the data in the cache, any dirty data
1587    // will be lost when restoring from a checkpoint of a system that
1588    // wasn't drained properly. Flag the checkpoint as invalid if the
1589    // cache contains dirty data.
1590    bool bad_checkpoint(dirty);
1591    SERIALIZE_SCALAR(bad_checkpoint);
1592}
1593
1594void
1595BaseCache::unserialize(CheckpointIn &cp)
1596{
1597    bool bad_checkpoint;
1598    UNSERIALIZE_SCALAR(bad_checkpoint);
1599    if (bad_checkpoint) {
1600        fatal("Restoring from checkpoints with dirty caches is not "
1601              "supported in the classic memory system. Please remove any "
1602              "caches or drain them properly before taking checkpoints.\n");
1603    }
1604}
1605
1606void
1607BaseCache::regStats()
1608{
1609    MemObject::regStats();
1610
1611    using namespace Stats;
1612
1613    // Hit statistics
1614    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1615        MemCmd cmd(access_idx);
1616        const string &cstr = cmd.toString();
1617
1618        hits[access_idx]
1619            .init(system->maxMasters())
1620            .name(name() + "." + cstr + "_hits")
1621            .desc("number of " + cstr + " hits")
1622            .flags(total | nozero | nonan)
1623            ;
1624        for (int i = 0; i < system->maxMasters(); i++) {
1625            hits[access_idx].subname(i, system->getMasterName(i));
1626        }
1627    }
1628
1629// These macros make it easier to sum the right subset of commands and
1630// to change the subset of commands that are considered "demand" vs
1631// "non-demand"
1632#define SUM_DEMAND(s) \
1633    (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1634     s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1635
1636// should writebacks be included here?  prior code was inconsistent...
1637#define SUM_NON_DEMAND(s) \
1638    (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1639
1640    demandHits
1641        .name(name() + ".demand_hits")
1642        .desc("number of demand (read+write) hits")
1643        .flags(total | nozero | nonan)
1644        ;
1645    demandHits = SUM_DEMAND(hits);
1646    for (int i = 0; i < system->maxMasters(); i++) {
1647        demandHits.subname(i, system->getMasterName(i));
1648    }
1649
1650    overallHits
1651        .name(name() + ".overall_hits")
1652        .desc("number of overall hits")
1653        .flags(total | nozero | nonan)
1654        ;
1655    overallHits = demandHits + SUM_NON_DEMAND(hits);
1656    for (int i = 0; i < system->maxMasters(); i++) {
1657        overallHits.subname(i, system->getMasterName(i));
1658    }
1659
1660    // Miss statistics
1661    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1662        MemCmd cmd(access_idx);
1663        const string &cstr = cmd.toString();
1664
1665        misses[access_idx]
1666            .init(system->maxMasters())
1667            .name(name() + "." + cstr + "_misses")
1668            .desc("number of " + cstr + " misses")
1669            .flags(total | nozero | nonan)
1670            ;
1671        for (int i = 0; i < system->maxMasters(); i++) {
1672            misses[access_idx].subname(i, system->getMasterName(i));
1673        }
1674    }
1675
1676    demandMisses
1677        .name(name() + ".demand_misses")
1678        .desc("number of demand (read+write) misses")
1679        .flags(total | nozero | nonan)
1680        ;
1681    demandMisses = SUM_DEMAND(misses);
1682    for (int i = 0; i < system->maxMasters(); i++) {
1683        demandMisses.subname(i, system->getMasterName(i));
1684    }
1685
1686    overallMisses
1687        .name(name() + ".overall_misses")
1688        .desc("number of overall misses")
1689        .flags(total | nozero | nonan)
1690        ;
1691    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1692    for (int i = 0; i < system->maxMasters(); i++) {
1693        overallMisses.subname(i, system->getMasterName(i));
1694    }
1695
1696    // Miss latency statistics
1697    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1698        MemCmd cmd(access_idx);
1699        const string &cstr = cmd.toString();
1700
1701        missLatency[access_idx]
1702            .init(system->maxMasters())
1703            .name(name() + "." + cstr + "_miss_latency")
1704            .desc("number of " + cstr + " miss cycles")
1705            .flags(total | nozero | nonan)
1706            ;
1707        for (int i = 0; i < system->maxMasters(); i++) {
1708            missLatency[access_idx].subname(i, system->getMasterName(i));
1709        }
1710    }
1711
1712    demandMissLatency
1713        .name(name() + ".demand_miss_latency")
1714        .desc("number of demand (read+write) miss cycles")
1715        .flags(total | nozero | nonan)
1716        ;
1717    demandMissLatency = SUM_DEMAND(missLatency);
1718    for (int i = 0; i < system->maxMasters(); i++) {
1719        demandMissLatency.subname(i, system->getMasterName(i));
1720    }
1721
1722    overallMissLatency
1723        .name(name() + ".overall_miss_latency")
1724        .desc("number of overall miss cycles")
1725        .flags(total | nozero | nonan)
1726        ;
1727    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1728    for (int i = 0; i < system->maxMasters(); i++) {
1729        overallMissLatency.subname(i, system->getMasterName(i));
1730    }
1731
1732    // access formulas
1733    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1734        MemCmd cmd(access_idx);
1735        const string &cstr = cmd.toString();
1736
1737        accesses[access_idx]
1738            .name(name() + "." + cstr + "_accesses")
1739            .desc("number of " + cstr + " accesses(hits+misses)")
1740            .flags(total | nozero | nonan)
1741            ;
1742        accesses[access_idx] = hits[access_idx] + misses[access_idx];
1743
1744        for (int i = 0; i < system->maxMasters(); i++) {
1745            accesses[access_idx].subname(i, system->getMasterName(i));
1746        }
1747    }
1748
1749    demandAccesses
1750        .name(name() + ".demand_accesses")
1751        .desc("number of demand (read+write) accesses")
1752        .flags(total | nozero | nonan)
1753        ;
1754    demandAccesses = demandHits + demandMisses;
1755    for (int i = 0; i < system->maxMasters(); i++) {
1756        demandAccesses.subname(i, system->getMasterName(i));
1757    }
1758
1759    overallAccesses
1760        .name(name() + ".overall_accesses")
1761        .desc("number of overall (read+write) accesses")
1762        .flags(total | nozero | nonan)
1763        ;
1764    overallAccesses = overallHits + overallMisses;
1765    for (int i = 0; i < system->maxMasters(); i++) {
1766        overallAccesses.subname(i, system->getMasterName(i));
1767    }
1768
1769    // miss rate formulas
1770    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1771        MemCmd cmd(access_idx);
1772        const string &cstr = cmd.toString();
1773
1774        missRate[access_idx]
1775            .name(name() + "." + cstr + "_miss_rate")
1776            .desc("miss rate for " + cstr + " accesses")
1777            .flags(total | nozero | nonan)
1778            ;
1779        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1780
1781        for (int i = 0; i < system->maxMasters(); i++) {
1782            missRate[access_idx].subname(i, system->getMasterName(i));
1783        }
1784    }
1785
1786    demandMissRate
1787        .name(name() + ".demand_miss_rate")
1788        .desc("miss rate for demand accesses")
1789        .flags(total | nozero | nonan)
1790        ;
1791    demandMissRate = demandMisses / demandAccesses;
1792    for (int i = 0; i < system->maxMasters(); i++) {
1793        demandMissRate.subname(i, system->getMasterName(i));
1794    }
1795
1796    overallMissRate
1797        .name(name() + ".overall_miss_rate")
1798        .desc("miss rate for overall accesses")
1799        .flags(total | nozero | nonan)
1800        ;
1801    overallMissRate = overallMisses / overallAccesses;
1802    for (int i = 0; i < system->maxMasters(); i++) {
1803        overallMissRate.subname(i, system->getMasterName(i));
1804    }
1805
1806    // miss latency formulas
1807    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1808        MemCmd cmd(access_idx);
1809        const string &cstr = cmd.toString();
1810
1811        avgMissLatency[access_idx]
1812            .name(name() + "." + cstr + "_avg_miss_latency")
1813            .desc("average " + cstr + " miss latency")
1814            .flags(total | nozero | nonan)
1815            ;
1816        avgMissLatency[access_idx] =
1817            missLatency[access_idx] / misses[access_idx];
1818
1819        for (int i = 0; i < system->maxMasters(); i++) {
1820            avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1821        }
1822    }
1823
1824    demandAvgMissLatency
1825        .name(name() + ".demand_avg_miss_latency")
1826        .desc("average overall miss latency")
1827        .flags(total | nozero | nonan)
1828        ;
1829    demandAvgMissLatency = demandMissLatency / demandMisses;
1830    for (int i = 0; i < system->maxMasters(); i++) {
1831        demandAvgMissLatency.subname(i, system->getMasterName(i));
1832    }
1833
1834    overallAvgMissLatency
1835        .name(name() + ".overall_avg_miss_latency")
1836        .desc("average overall miss latency")
1837        .flags(total | nozero | nonan)
1838        ;
1839    overallAvgMissLatency = overallMissLatency / overallMisses;
1840    for (int i = 0; i < system->maxMasters(); i++) {
1841        overallAvgMissLatency.subname(i, system->getMasterName(i));
1842    }
1843
1844    blocked_cycles.init(NUM_BLOCKED_CAUSES);
1845    blocked_cycles
1846        .name(name() + ".blocked_cycles")
1847        .desc("number of cycles access was blocked")
1848        .subname(Blocked_NoMSHRs, "no_mshrs")
1849        .subname(Blocked_NoTargets, "no_targets")
1850        ;
1851
1852
1853    blocked_causes.init(NUM_BLOCKED_CAUSES);
1854    blocked_causes
1855        .name(name() + ".blocked")
1856        .desc("number of cycles access was blocked")
1857        .subname(Blocked_NoMSHRs, "no_mshrs")
1858        .subname(Blocked_NoTargets, "no_targets")
1859        ;
1860
1861    avg_blocked
1862        .name(name() + ".avg_blocked_cycles")
1863        .desc("average number of cycles each access was blocked")
1864        .subname(Blocked_NoMSHRs, "no_mshrs")
1865        .subname(Blocked_NoTargets, "no_targets")
1866        ;
1867
1868    avg_blocked = blocked_cycles / blocked_causes;
1869
1870    unusedPrefetches
1871        .name(name() + ".unused_prefetches")
1872        .desc("number of HardPF blocks evicted w/o reference")
1873        .flags(nozero)
1874        ;
1875
1876    writebacks
1877        .init(system->maxMasters())
1878        .name(name() + ".writebacks")
1879        .desc("number of writebacks")
1880        .flags(total | nozero | nonan)
1881        ;
1882    for (int i = 0; i < system->maxMasters(); i++) {
1883        writebacks.subname(i, system->getMasterName(i));
1884    }
1885
1886    // MSHR statistics
1887    // MSHR hit statistics
1888    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1889        MemCmd cmd(access_idx);
1890        const string &cstr = cmd.toString();
1891
1892        mshr_hits[access_idx]
1893            .init(system->maxMasters())
1894            .name(name() + "." + cstr + "_mshr_hits")
1895            .desc("number of " + cstr + " MSHR hits")
1896            .flags(total | nozero | nonan)
1897            ;
1898        for (int i = 0; i < system->maxMasters(); i++) {
1899            mshr_hits[access_idx].subname(i, system->getMasterName(i));
1900        }
1901    }
1902
1903    demandMshrHits
1904        .name(name() + ".demand_mshr_hits")
1905        .desc("number of demand (read+write) MSHR hits")
1906        .flags(total | nozero | nonan)
1907        ;
1908    demandMshrHits = SUM_DEMAND(mshr_hits);
1909    for (int i = 0; i < system->maxMasters(); i++) {
1910        demandMshrHits.subname(i, system->getMasterName(i));
1911    }
1912
1913    overallMshrHits
1914        .name(name() + ".overall_mshr_hits")
1915        .desc("number of overall MSHR hits")
1916        .flags(total | nozero | nonan)
1917        ;
1918    overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1919    for (int i = 0; i < system->maxMasters(); i++) {
1920        overallMshrHits.subname(i, system->getMasterName(i));
1921    }
1922
1923    // MSHR miss statistics
1924    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1925        MemCmd cmd(access_idx);
1926        const string &cstr = cmd.toString();
1927
1928        mshr_misses[access_idx]
1929            .init(system->maxMasters())
1930            .name(name() + "." + cstr + "_mshr_misses")
1931            .desc("number of " + cstr + " MSHR misses")
1932            .flags(total | nozero | nonan)
1933            ;
1934        for (int i = 0; i < system->maxMasters(); i++) {
1935            mshr_misses[access_idx].subname(i, system->getMasterName(i));
1936        }
1937    }
1938
1939    demandMshrMisses
1940        .name(name() + ".demand_mshr_misses")
1941        .desc("number of demand (read+write) MSHR misses")
1942        .flags(total | nozero | nonan)
1943        ;
1944    demandMshrMisses = SUM_DEMAND(mshr_misses);
1945    for (int i = 0; i < system->maxMasters(); i++) {
1946        demandMshrMisses.subname(i, system->getMasterName(i));
1947    }
1948
1949    overallMshrMisses
1950        .name(name() + ".overall_mshr_misses")
1951        .desc("number of overall MSHR misses")
1952        .flags(total | nozero | nonan)
1953        ;
1954    overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1955    for (int i = 0; i < system->maxMasters(); i++) {
1956        overallMshrMisses.subname(i, system->getMasterName(i));
1957    }
1958
1959    // MSHR miss latency statistics
1960    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1961        MemCmd cmd(access_idx);
1962        const string &cstr = cmd.toString();
1963
1964        mshr_miss_latency[access_idx]
1965            .init(system->maxMasters())
1966            .name(name() + "." + cstr + "_mshr_miss_latency")
1967            .desc("number of " + cstr + " MSHR miss cycles")
1968            .flags(total | nozero | nonan)
1969            ;
1970        for (int i = 0; i < system->maxMasters(); i++) {
1971            mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1972        }
1973    }
1974
1975    demandMshrMissLatency
1976        .name(name() + ".demand_mshr_miss_latency")
1977        .desc("number of demand (read+write) MSHR miss cycles")
1978        .flags(total | nozero | nonan)
1979        ;
1980    demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
1981    for (int i = 0; i < system->maxMasters(); i++) {
1982        demandMshrMissLatency.subname(i, system->getMasterName(i));
1983    }
1984
1985    overallMshrMissLatency
1986        .name(name() + ".overall_mshr_miss_latency")
1987        .desc("number of overall MSHR miss cycles")
1988        .flags(total | nozero | nonan)
1989        ;
1990    overallMshrMissLatency =
1991        demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
1992    for (int i = 0; i < system->maxMasters(); i++) {
1993        overallMshrMissLatency.subname(i, system->getMasterName(i));
1994    }
1995
1996    // MSHR uncacheable statistics
1997    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1998        MemCmd cmd(access_idx);
1999        const string &cstr = cmd.toString();
2000
2001        mshr_uncacheable[access_idx]
2002            .init(system->maxMasters())
2003            .name(name() + "." + cstr + "_mshr_uncacheable")
2004            .desc("number of " + cstr + " MSHR uncacheable")
2005            .flags(total | nozero | nonan)
2006            ;
2007        for (int i = 0; i < system->maxMasters(); i++) {
2008            mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2009        }
2010    }
2011
2012    overallMshrUncacheable
2013        .name(name() + ".overall_mshr_uncacheable_misses")
2014        .desc("number of overall MSHR uncacheable misses")
2015        .flags(total | nozero | nonan)
2016        ;
2017    overallMshrUncacheable =
2018        SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2019    for (int i = 0; i < system->maxMasters(); i++) {
2020        overallMshrUncacheable.subname(i, system->getMasterName(i));
2021    }
2022
2023    // MSHR miss latency statistics
2024    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2025        MemCmd cmd(access_idx);
2026        const string &cstr = cmd.toString();
2027
2028        mshr_uncacheable_lat[access_idx]
2029            .init(system->maxMasters())
2030            .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2031            .desc("number of " + cstr + " MSHR uncacheable cycles")
2032            .flags(total | nozero | nonan)
2033            ;
2034        for (int i = 0; i < system->maxMasters(); i++) {
2035            mshr_uncacheable_lat[access_idx].subname(
2036                i, system->getMasterName(i));
2037        }
2038    }
2039
2040    overallMshrUncacheableLatency
2041        .name(name() + ".overall_mshr_uncacheable_latency")
2042        .desc("number of overall MSHR uncacheable cycles")
2043        .flags(total | nozero | nonan)
2044        ;
2045    overallMshrUncacheableLatency =
2046        SUM_DEMAND(mshr_uncacheable_lat) +
2047        SUM_NON_DEMAND(mshr_uncacheable_lat);
2048    for (int i = 0; i < system->maxMasters(); i++) {
2049        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2050    }
2051
2052#if 0
2053    // MSHR access formulas
2054    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2055        MemCmd cmd(access_idx);
2056        const string &cstr = cmd.toString();
2057
2058        mshrAccesses[access_idx]
2059            .name(name() + "." + cstr + "_mshr_accesses")
2060            .desc("number of " + cstr + " mshr accesses(hits+misses)")
2061            .flags(total | nozero | nonan)
2062            ;
2063        mshrAccesses[access_idx] =
2064            mshr_hits[access_idx] + mshr_misses[access_idx]
2065            + mshr_uncacheable[access_idx];
2066    }
2067
2068    demandMshrAccesses
2069        .name(name() + ".demand_mshr_accesses")
2070        .desc("number of demand (read+write) mshr accesses")
2071        .flags(total | nozero | nonan)
2072        ;
2073    demandMshrAccesses = demandMshrHits + demandMshrMisses;
2074
2075    overallMshrAccesses
2076        .name(name() + ".overall_mshr_accesses")
2077        .desc("number of overall (read+write) mshr accesses")
2078        .flags(total | nozero | nonan)
2079        ;
2080    overallMshrAccesses = overallMshrHits + overallMshrMisses
2081        + overallMshrUncacheable;
2082#endif
2083
2084    // MSHR miss rate formulas
2085    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2086        MemCmd cmd(access_idx);
2087        const string &cstr = cmd.toString();
2088
2089        mshrMissRate[access_idx]
2090            .name(name() + "." + cstr + "_mshr_miss_rate")
2091            .desc("mshr miss rate for " + cstr + " accesses")
2092            .flags(total | nozero | nonan)
2093            ;
2094        mshrMissRate[access_idx] =
2095            mshr_misses[access_idx] / accesses[access_idx];
2096
2097        for (int i = 0; i < system->maxMasters(); i++) {
2098            mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2099        }
2100    }
2101
2102    demandMshrMissRate
2103        .name(name() + ".demand_mshr_miss_rate")
2104        .desc("mshr miss rate for demand accesses")
2105        .flags(total | nozero | nonan)
2106        ;
2107    demandMshrMissRate = demandMshrMisses / demandAccesses;
2108    for (int i = 0; i < system->maxMasters(); i++) {
2109        demandMshrMissRate.subname(i, system->getMasterName(i));
2110    }
2111
2112    overallMshrMissRate
2113        .name(name() + ".overall_mshr_miss_rate")
2114        .desc("mshr miss rate for overall accesses")
2115        .flags(total | nozero | nonan)
2116        ;
2117    overallMshrMissRate = overallMshrMisses / overallAccesses;
2118    for (int i = 0; i < system->maxMasters(); i++) {
2119        overallMshrMissRate.subname(i, system->getMasterName(i));
2120    }
2121
2122    // mshrMiss latency formulas
2123    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2124        MemCmd cmd(access_idx);
2125        const string &cstr = cmd.toString();
2126
2127        avgMshrMissLatency[access_idx]
2128            .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2129            .desc("average " + cstr + " mshr miss latency")
2130            .flags(total | nozero | nonan)
2131            ;
2132        avgMshrMissLatency[access_idx] =
2133            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2134
2135        for (int i = 0; i < system->maxMasters(); i++) {
2136            avgMshrMissLatency[access_idx].subname(
2137                i, system->getMasterName(i));
2138        }
2139    }
2140
2141    demandAvgMshrMissLatency
2142        .name(name() + ".demand_avg_mshr_miss_latency")
2143        .desc("average overall mshr miss latency")
2144        .flags(total | nozero | nonan)
2145        ;
2146    demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2147    for (int i = 0; i < system->maxMasters(); i++) {
2148        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2149    }
2150
2151    overallAvgMshrMissLatency
2152        .name(name() + ".overall_avg_mshr_miss_latency")
2153        .desc("average overall mshr miss latency")
2154        .flags(total | nozero | nonan)
2155        ;
2156    overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2157    for (int i = 0; i < system->maxMasters(); i++) {
2158        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2159    }
2160
2161    // mshrUncacheable latency formulas
2162    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2163        MemCmd cmd(access_idx);
2164        const string &cstr = cmd.toString();
2165
2166        avgMshrUncacheableLatency[access_idx]
2167            .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2168            .desc("average " + cstr + " mshr uncacheable latency")
2169            .flags(total | nozero | nonan)
2170            ;
2171        avgMshrUncacheableLatency[access_idx] =
2172            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2173
2174        for (int i = 0; i < system->maxMasters(); i++) {
2175            avgMshrUncacheableLatency[access_idx].subname(
2176                i, system->getMasterName(i));
2177        }
2178    }
2179
2180    overallAvgMshrUncacheableLatency
2181        .name(name() + ".overall_avg_mshr_uncacheable_latency")
2182        .desc("average overall mshr uncacheable latency")
2183        .flags(total | nozero | nonan)
2184        ;
2185    overallAvgMshrUncacheableLatency =
2186        overallMshrUncacheableLatency / overallMshrUncacheable;
2187    for (int i = 0; i < system->maxMasters(); i++) {
2188        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2189    }
2190
2191    replacements
2192        .name(name() + ".replacements")
2193        .desc("number of replacements")
2194        ;
2195}
2196
2197void
2198BaseCache::regProbePoints()
2199{
2200    ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2201    ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2202}
2203
2204///////////////
2205//
2206// CpuSidePort
2207//
2208///////////////
2209bool
2210BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2211{
2212    // Snoops shouldn't happen when bypassing caches
2213    assert(!cache->system->bypassCaches());
2214
2215    assert(pkt->isResponse());
2216
2217    // Express snoop responses from master to slave, e.g., from L1 to L2
2218    cache->recvTimingSnoopResp(pkt);
2219    return true;
2220}
2221
2222
2223bool
2224BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2225{
2226    if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2227        // always let express snoop packets through even if blocked
2228        return true;
2229    } else if (blocked || mustSendRetry) {
2230        // either already committed to send a retry, or blocked
2231        mustSendRetry = true;
2232        return false;
2233    }
2234    mustSendRetry = false;
2235    return true;
2236}
2237
2238bool
2239BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2240{
2241    assert(pkt->isRequest());
2242
2243    if (cache->system->bypassCaches()) {
2244        // Just forward the packet if caches are disabled.
2245        // @todo This should really enqueue the packet rather
2246        bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2247        assert(success);
2248        return true;
2249    } else if (tryTiming(pkt)) {
2250        cache->recvTimingReq(pkt);
2251        return true;
2252    }
2253    return false;
2254}
2255
2256Tick
2257BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2258{
2259    if (cache->system->bypassCaches()) {
2260        // Forward the request if the system is in cache bypass mode.
2261        return cache->memSidePort.sendAtomic(pkt);
2262    } else {
2263        return cache->recvAtomic(pkt);
2264    }
2265}
2266
2267void
2268BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2269{
2270    if (cache->system->bypassCaches()) {
2271        // The cache should be flushed if we are in cache bypass mode,
2272        // so we don't need to check if we need to update anything.
2273        cache->memSidePort.sendFunctional(pkt);
2274        return;
2275    }
2276
2277    // functional request
2278    cache->functionalAccess(pkt, true);
2279}
2280
2281AddrRangeList
2282BaseCache::CpuSidePort::getAddrRanges() const
2283{
2284    return cache->getAddrRanges();
2285}
2286
2287
2288BaseCache::
2289CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2290                         const std::string &_label)
2291    : CacheSlavePort(_name, _cache, _label), cache(_cache)
2292{
2293}
2294
2295///////////////
2296//
2297// MemSidePort
2298//
2299///////////////
2300bool
2301BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2302{
2303    cache->recvTimingResp(pkt);
2304    return true;
2305}
2306
2307// Express snooping requests to memside port
2308void
2309BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2310{
2311    // Snoops shouldn't happen when bypassing caches
2312    assert(!cache->system->bypassCaches());
2313
2314    // handle snooping requests
2315    cache->recvTimingSnoopReq(pkt);
2316}
2317
2318Tick
2319BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2320{
2321    // Snoops shouldn't happen when bypassing caches
2322    assert(!cache->system->bypassCaches());
2323
2324    return cache->recvAtomicSnoop(pkt);
2325}
2326
2327void
2328BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2329{
2330    // Snoops shouldn't happen when bypassing caches
2331    assert(!cache->system->bypassCaches());
2332
2333    // functional snoop (note that in contrast to atomic we don't have
2334    // a specific functionalSnoop method, as they have the same
2335    // behaviour regardless)
2336    cache->functionalAccess(pkt, false);
2337}
2338
2339void
2340BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2341{
2342    // sanity check
2343    assert(!waitingOnRetry);
2344
2345    // there should never be any deferred request packets in the
2346    // queue, instead we resly on the cache to provide the packets
2347    // from the MSHR queue or write queue
2348    assert(deferredPacketReadyTime() == MaxTick);
2349
2350    // check for request packets (requests & writebacks)
2351    QueueEntry* entry = cache.getNextQueueEntry();
2352
2353    if (!entry) {
2354        // can happen if e.g. we attempt a writeback and fail, but
2355        // before the retry, the writeback is eliminated because
2356        // we snoop another cache's ReadEx.
2357    } else {
2358        // let our snoop responses go first if there are responses to
2359        // the same addresses
2360        if (checkConflictingSnoop(entry->blkAddr)) {
2361            return;
2362        }
2363        waitingOnRetry = entry->sendPacket(cache);
2364    }
2365
2366    // if we succeeded and are not waiting for a retry, schedule the
2367    // next send considering when the next queue is ready, note that
2368    // snoop responses have their own packet queue and thus schedule
2369    // their own events
2370    if (!waitingOnRetry) {
2371        schedSendEvent(cache.nextQueueReadyTime());
2372    }
2373}
2374
2375BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2376                                    BaseCache *_cache,
2377                                    const std::string &_label)
2378    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2379      _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2380      _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2381{
2382}
2383
2384void
2385WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2386                           Addr blk_addr)
2387{
2388    // check if we are continuing where the last write ended
2389    if (nextAddr == write_addr) {
2390        delayCtr[blk_addr] = delayThreshold;
2391        // stop if we have already saturated
2392        if (mode != WriteMode::NO_ALLOCATE) {
2393            byteCount += write_size;
2394            // switch to streaming mode if we have passed the lower
2395            // threshold
2396            if (mode == WriteMode::ALLOCATE &&
2397                byteCount > coalesceLimit) {
2398                mode = WriteMode::COALESCE;
2399                DPRINTF(Cache, "Switched to write coalescing\n");
2400            } else if (mode == WriteMode::COALESCE &&
2401                       byteCount > noAllocateLimit) {
2402                // and continue and switch to non-allocating mode if we
2403                // pass the upper threshold
2404                mode = WriteMode::NO_ALLOCATE;
2405                DPRINTF(Cache, "Switched to write-no-allocate\n");
2406            }
2407        }
2408    } else {
2409        // we did not see a write matching the previous one, start
2410        // over again
2411        byteCount = write_size;
2412        mode = WriteMode::ALLOCATE;
2413        resetDelay(blk_addr);
2414    }
2415    nextAddr = write_addr + write_size;
2416}
2417
2418WriteAllocator*
2419WriteAllocatorParams::create()
2420{
2421    return new WriteAllocator(this);
2422}
2423