base.cc (12728:57bdea4f96aa) base.cc (12729:9870d6f73e04)
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheVerbose.hh"
56#include "mem/cache/mshr.hh"
57#include "mem/cache/prefetch/base.hh"
58#include "mem/cache/queue_entry.hh"
59#include "params/BaseCache.hh"
60#include "sim/core.hh"
61
62class BaseMasterPort;
63class BaseSlavePort;
64
65using namespace std;
66
67BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
68 BaseCache *_cache,
69 const std::string &_label)
70 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label),
71 blocked(false), mustSendRetry(false),
72 sendRetryEvent([this]{ processSendRetry(); }, _name)
73{
74}
75
76BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
77 : MemObject(p),
78 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
79 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
80 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
81 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
82 tags(p->tags),
83 prefetcher(p->prefetcher),
84 prefetchOnAccess(p->prefetch_on_access),
85 writebackClean(p->writeback_clean),
86 tempBlockWriteback(nullptr),
87 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
88 name(), false,
89 EventBase::Delayed_Writeback_Pri),
90 blkSize(blk_size),
91 lookupLatency(p->tag_latency),
92 dataLatency(p->data_latency),
93 forwardLatency(p->tag_latency),
94 fillLatency(p->data_latency),
95 responseLatency(p->response_latency),
96 numTarget(p->tgts_per_mshr),
97 forwardSnoops(true),
98 clusivity(p->clusivity),
99 isReadOnly(p->is_read_only),
100 blocked(0),
101 order(0),
102 noTargetMSHR(nullptr),
103 missCount(p->max_miss_count),
104 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
105 system(p->system)
106{
107 // the MSHR queue has no reserve entries as we check the MSHR
108 // queue on every single allocation, whereas the write queue has
109 // as many reserve entries as we have MSHRs, since every MSHR may
110 // eventually require a writeback, and we do not check the write
111 // buffer before committing to an MSHR
112
113 // forward snoops is overridden in init() once we can query
114 // whether the connected master is actually snooping or not
115
116 tempBlock = new CacheBlk();
117 tempBlock->data = new uint8_t[blkSize];
118
119 tags->setCache(this);
120 if (prefetcher)
121 prefetcher->setCache(this);
122}
123
124BaseCache::~BaseCache()
125{
126 delete [] tempBlock->data;
127 delete tempBlock;
128}
129
130void
131BaseCache::CacheSlavePort::setBlocked()
132{
133 assert(!blocked);
134 DPRINTF(CachePort, "Port is blocking new requests\n");
135 blocked = true;
136 // if we already scheduled a retry in this cycle, but it has not yet
137 // happened, cancel it
138 if (sendRetryEvent.scheduled()) {
139 owner.deschedule(sendRetryEvent);
140 DPRINTF(CachePort, "Port descheduled retry\n");
141 mustSendRetry = true;
142 }
143}
144
145void
146BaseCache::CacheSlavePort::clearBlocked()
147{
148 assert(blocked);
149 DPRINTF(CachePort, "Port is accepting new requests\n");
150 blocked = false;
151 if (mustSendRetry) {
152 // @TODO: need to find a better time (next cycle?)
153 owner.schedule(sendRetryEvent, curTick() + 1);
154 }
155}
156
157void
158BaseCache::CacheSlavePort::processSendRetry()
159{
160 DPRINTF(CachePort, "Port is sending retry\n");
161
162 // reset the flag and call retry
163 mustSendRetry = false;
164 sendRetryReq();
165}
166
167void
168BaseCache::init()
169{
170 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
171 fatal("Cache ports on %s are not connected\n", name());
172 cpuSidePort.sendRangeChange();
173 forwardSnoops = cpuSidePort.isSnooping();
174}
175
176BaseMasterPort &
177BaseCache::getMasterPort(const std::string &if_name, PortID idx)
178{
179 if (if_name == "mem_side") {
180 return memSidePort;
181 } else {
182 return MemObject::getMasterPort(if_name, idx);
183 }
184}
185
186BaseSlavePort &
187BaseCache::getSlavePort(const std::string &if_name, PortID idx)
188{
189 if (if_name == "cpu_side") {
190 return cpuSidePort;
191 } else {
192 return MemObject::getSlavePort(if_name, idx);
193 }
194}
195
196bool
197BaseCache::inRange(Addr addr) const
198{
199 for (const auto& r : addrRanges) {
200 if (r.contains(addr)) {
201 return true;
202 }
203 }
204 return false;
205}
206
207void
208BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
209{
210 if (pkt->needsResponse()) {
211 pkt->makeTimingResponse();
212 // @todo: Make someone pay for this
213 pkt->headerDelay = pkt->payloadDelay = 0;
214
215 // In this case we are considering request_time that takes
216 // into account the delay of the xbar, if any, and just
217 // lat, neglecting responseLatency, modelling hit latency
218 // just as lookupLatency or or the value of lat overriden
219 // by access(), that calls accessBlock() function.
220 cpuSidePort.schedTimingResp(pkt, request_time, true);
221 } else {
222 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
223 pkt->print());
224
225 // queue the packet for deletion, as the sending cache is
226 // still relying on it; if the block is found in access(),
227 // CleanEvict and Writeback messages will be deleted
228 // here as well
229 pendingDelete.reset(pkt);
230 }
231}
232
233void
234BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
235 Tick forward_time, Tick request_time)
236{
237 if (mshr) {
238 /// MSHR hit
239 /// @note writebacks will be checked in getNextMSHR()
240 /// for any conflicting requests to the same block
241
242 //@todo remove hw_pf here
243
244 // Coalesce unless it was a software prefetch (see above).
245 if (pkt) {
246 assert(!pkt->isWriteback());
247 // CleanEvicts corresponding to blocks which have
248 // outstanding requests in MSHRs are simply sunk here
249 if (pkt->cmd == MemCmd::CleanEvict) {
250 pendingDelete.reset(pkt);
251 } else if (pkt->cmd == MemCmd::WriteClean) {
252 // A WriteClean should never coalesce with any
253 // outstanding cache maintenance requests.
254
255 // We use forward_time here because there is an
256 // uncached memory write, forwarded to WriteBuffer.
257 allocateWriteBuffer(pkt, forward_time);
258 } else {
259 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
260 pkt->print());
261
262 assert(pkt->req->masterId() < system->maxMasters());
263 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
264
265 // We use forward_time here because it is the same
266 // considering new targets. We have multiple
267 // requests for the same address here. It
268 // specifies the latency to allocate an internal
269 // buffer and to schedule an event to the queued
270 // port and also takes into account the additional
271 // delay of the xbar.
272 mshr->allocateTarget(pkt, forward_time, order++,
273 allocOnFill(pkt->cmd));
274 if (mshr->getNumTargets() == numTarget) {
275 noTargetMSHR = mshr;
276 setBlocked(Blocked_NoTargets);
277 // need to be careful with this... if this mshr isn't
278 // ready yet (i.e. time > curTick()), we don't want to
279 // move it ahead of mshrs that are ready
280 // mshrQueue.moveToFront(mshr);
281 }
282 }
283 }
284 } else {
285 // no MSHR
286 assert(pkt->req->masterId() < system->maxMasters());
287 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
288
289 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
290 // We use forward_time here because there is an
291 // writeback or writeclean, forwarded to WriteBuffer.
292 allocateWriteBuffer(pkt, forward_time);
293 } else {
294 if (blk && blk->isValid()) {
295 // If we have a write miss to a valid block, we
296 // need to mark the block non-readable. Otherwise
297 // if we allow reads while there's an outstanding
298 // write miss, the read could return stale data
299 // out of the cache block... a more aggressive
300 // system could detect the overlap (if any) and
301 // forward data out of the MSHRs, but we don't do
302 // that yet. Note that we do need to leave the
303 // block valid so that it stays in the cache, in
304 // case we get an upgrade response (and hence no
305 // new data) when the write miss completes.
306 // As long as CPUs do proper store/load forwarding
307 // internally, and have a sufficiently weak memory
308 // model, this is probably unnecessary, but at some
309 // point it must have seemed like we needed it...
310 assert((pkt->needsWritable() && !blk->isWritable()) ||
311 pkt->req->isCacheMaintenance());
312 blk->status &= ~BlkReadable;
313 }
314 // Here we are using forward_time, modelling the latency of
315 // a miss (outbound) just as forwardLatency, neglecting the
316 // lookupLatency component.
317 allocateMissBuffer(pkt, forward_time);
318 }
319 }
320}
321
322void
323BaseCache::recvTimingReq(PacketPtr pkt)
324{
325 // anything that is merely forwarded pays for the forward latency and
326 // the delay provided by the crossbar
327 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
328
329 // We use lookupLatency here because it is used to specify the latency
330 // to access.
331 Cycles lat = lookupLatency;
332 CacheBlk *blk = nullptr;
333 bool satisfied = false;
334 {
335 PacketList writebacks;
336 // Note that lat is passed by reference here. The function
337 // access() calls accessBlock() which can modify lat value.
338 satisfied = access(pkt, blk, lat, writebacks);
339
340 // copy writebacks to write buffer here to ensure they logically
341 // proceed anything happening below
342 doWritebacks(writebacks, forward_time);
343 }
344
345 // Here we charge the headerDelay that takes into account the latencies
346 // of the bus, if the packet comes from it.
347 // The latency charged it is just lat that is the value of lookupLatency
348 // modified by access() function, or if not just lookupLatency.
349 // In case of a hit we are neglecting response latency.
350 // In case of a miss we are neglecting forward latency.
351 Tick request_time = clockEdge(lat) + pkt->headerDelay;
352 // Here we reset the timing of the packet.
353 pkt->headerDelay = pkt->payloadDelay = 0;
354 // track time of availability of next prefetch, if any
355 Tick next_pf_time = MaxTick;
356
357 if (satisfied) {
358 // if need to notify the prefetcher we have to do it before
359 // anything else as later handleTimingReqHit might turn the
360 // packet in a response
361 if (prefetcher &&
362 (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
363 if (blk)
364 blk->status &= ~BlkHWPrefetched;
365
366 // Don't notify on SWPrefetch
367 if (!pkt->cmd.isSWPrefetch()) {
368 assert(!pkt->req->isCacheMaintenance());
369 next_pf_time = prefetcher->notify(pkt);
370 }
371 }
372
373 handleTimingReqHit(pkt, blk, request_time);
374 } else {
375 handleTimingReqMiss(pkt, blk, forward_time, request_time);
376
377 // We should call the prefetcher reguardless if the request is
378 // satisfied or not, reguardless if the request is in the MSHR
379 // or not. The request could be a ReadReq hit, but still not
380 // satisfied (potentially because of a prior write to the same
381 // cache line. So, even when not satisfied, there is an MSHR
382 // already allocated for this, we need to let the prefetcher
383 // know about the request
384
385 // Don't notify prefetcher on SWPrefetch or cache maintenance
386 // operations
387 if (prefetcher && pkt &&
388 !pkt->cmd.isSWPrefetch() &&
389 !pkt->req->isCacheMaintenance()) {
390 next_pf_time = prefetcher->notify(pkt);
391 }
392 }
393
394 if (next_pf_time != MaxTick) {
395 schedMemSideSendEvent(next_pf_time);
396 }
397}
398
399void
400BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
401{
402 Tick completion_time = clockEdge(responseLatency) +
403 pkt->headerDelay + pkt->payloadDelay;
404
405 // Reset the bus additional time as it is now accounted for
406 pkt->headerDelay = pkt->payloadDelay = 0;
407
408 cpuSidePort.schedTimingResp(pkt, completion_time, true);
409}
410
411void
412BaseCache::recvTimingResp(PacketPtr pkt)
413{
414 assert(pkt->isResponse());
415
416 // all header delay should be paid for by the crossbar, unless
417 // this is a prefetch response from above
418 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
419 "%s saw a non-zero packet delay\n", name());
420
421 const bool is_error = pkt->isError();
422
423 if (is_error) {
424 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
425 pkt->print());
426 }
427
428 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
429 pkt->print());
430
431 // if this is a write, we should be looking at an uncacheable
432 // write
433 if (pkt->isWrite()) {
434 assert(pkt->req->isUncacheable());
435 handleUncacheableWriteResp(pkt);
436 return;
437 }
438
439 // we have dealt with any (uncacheable) writes above, from here on
440 // we know we are dealing with an MSHR due to a miss or a prefetch
441 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
442 assert(mshr);
443
444 if (mshr == noTargetMSHR) {
445 // we always clear at least one target
446 clearBlocked(Blocked_NoTargets);
447 noTargetMSHR = nullptr;
448 }
449
450 // Initial target is used just for stats
451 MSHR::Target *initial_tgt = mshr->getTarget();
452 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
453 Tick miss_latency = curTick() - initial_tgt->recvTime;
454
455 if (pkt->req->isUncacheable()) {
456 assert(pkt->req->masterId() < system->maxMasters());
457 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
458 miss_latency;
459 } else {
460 assert(pkt->req->masterId() < system->maxMasters());
461 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
462 miss_latency;
463 }
464
465 PacketList writebacks;
466
467 bool is_fill = !mshr->isForward &&
468 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
469
470 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
471
472 if (is_fill && !is_error) {
473 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
474 pkt->getAddr());
475
476 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
477 assert(blk != nullptr);
478 }
479
480 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
481 // The block was marked not readable while there was a pending
482 // cache maintenance operation, restore its flag.
483 blk->status |= BlkReadable;
484 }
485
486 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
487 // If at this point the referenced block is writable and the
488 // response is not a cache invalidate, we promote targets that
489 // were deferred as we couldn't guarrantee a writable copy
490 mshr->promoteWritable();
491 }
492
493 serviceMSHRTargets(mshr, pkt, blk, writebacks);
494
495 if (mshr->promoteDeferredTargets()) {
496 // avoid later read getting stale data while write miss is
497 // outstanding.. see comment in timingAccess()
498 if (blk) {
499 blk->status &= ~BlkReadable;
500 }
501 mshrQueue.markPending(mshr);
502 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
503 } else {
504 // while we deallocate an mshr from the queue we still have to
505 // check the isFull condition before and after as we might
506 // have been using the reserved entries already
507 const bool was_full = mshrQueue.isFull();
508 mshrQueue.deallocate(mshr);
509 if (was_full && !mshrQueue.isFull()) {
510 clearBlocked(Blocked_NoMSHRs);
511 }
512
513 // Request the bus for a prefetch if this deallocation freed enough
514 // MSHRs for a prefetch to take place
515 if (prefetcher && mshrQueue.canPrefetch()) {
516 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
517 clockEdge());
518 if (next_pf_time != MaxTick)
519 schedMemSideSendEvent(next_pf_time);
520 }
521 }
522
523 // if we used temp block, check to see if its valid and then clear it out
524 if (blk == tempBlock && tempBlock->isValid()) {
525 evictBlock(blk, writebacks);
526 }
527
528 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
529 // copy writebacks to write buffer
530 doWritebacks(writebacks, forward_time);
531
532 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
533 delete pkt;
534}
535
536
537Tick
538BaseCache::recvAtomic(PacketPtr pkt)
539{
540 // We are in atomic mode so we pay just for lookupLatency here.
541 Cycles lat = lookupLatency;
542
543 // follow the same flow as in recvTimingReq, and check if a cache
544 // above us is responding
545 if (pkt->cacheResponding() && !pkt->isClean()) {
546 assert(!pkt->req->isCacheInvalidate());
547 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
548 pkt->print());
549
550 // if a cache is responding, and it had the line in Owned
551 // rather than Modified state, we need to invalidate any
552 // copies that are not on the same path to memory
553 assert(pkt->needsWritable() && !pkt->responderHadWritable());
554 lat += ticksToCycles(memSidePort.sendAtomic(pkt));
555
556 return lat * clockPeriod();
557 }
558
559 // should assert here that there are no outstanding MSHRs or
560 // writebacks... that would mean that someone used an atomic
561 // access in timing mode
562
563 CacheBlk *blk = nullptr;
564 PacketList writebacks;
565 bool satisfied = access(pkt, blk, lat, writebacks);
566
567 if (pkt->isClean() && blk && blk->isDirty()) {
568 // A cache clean opearation is looking for a dirty
569 // block. If a dirty block is encountered a WriteClean
570 // will update any copies to the path to the memory
571 // until the point of reference.
572 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
573 __func__, pkt->print(), blk->print());
574 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
575 writebacks.push_back(wb_pkt);
576 pkt->setSatisfied();
577 }
578
579 // handle writebacks resulting from the access here to ensure they
580 // logically proceed anything happening below
581 doWritebacksAtomic(writebacks);
582 assert(writebacks.empty());
583
584 if (!satisfied) {
585 lat += handleAtomicReqMiss(pkt, blk, writebacks);
586 }
587
588 // Note that we don't invoke the prefetcher at all in atomic mode.
589 // It's not clear how to do it properly, particularly for
590 // prefetchers that aggressively generate prefetch candidates and
591 // rely on bandwidth contention to throttle them; these will tend
592 // to pollute the cache in atomic mode since there is no bandwidth
593 // contention. If we ever do want to enable prefetching in atomic
594 // mode, though, this is the place to do it... see timingAccess()
595 // for an example (though we'd want to issue the prefetch(es)
596 // immediately rather than calling requestMemSideBus() as we do
597 // there).
598
599 // do any writebacks resulting from the response handling
600 doWritebacksAtomic(writebacks);
601
602 // if we used temp block, check to see if its valid and if so
603 // clear it out, but only do so after the call to recvAtomic is
604 // finished so that any downstream observers (such as a snoop
605 // filter), first see the fill, and only then see the eviction
606 if (blk == tempBlock && tempBlock->isValid()) {
607 // the atomic CPU calls recvAtomic for fetch and load/store
608 // sequentuially, and we may already have a tempBlock
609 // writeback from the fetch that we have not yet sent
610 if (tempBlockWriteback) {
611 // if that is the case, write the prevoius one back, and
612 // do not schedule any new event
613 writebackTempBlockAtomic();
614 } else {
615 // the writeback/clean eviction happens after the call to
616 // recvAtomic has finished (but before any successive
617 // calls), so that the response handling from the fill is
618 // allowed to happen first
619 schedule(writebackTempBlockAtomicEvent, curTick());
620 }
621
622 tempBlockWriteback = evictBlock(blk);
623 }
624
625 if (pkt->needsResponse()) {
626 pkt->makeAtomicResponse();
627 }
628
629 return lat * clockPeriod();
630}
631
632void
633BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
634{
635 Addr blk_addr = pkt->getBlockAddr(blkSize);
636 bool is_secure = pkt->isSecure();
637 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
638 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
639
640 pkt->pushLabel(name());
641
642 CacheBlkPrintWrapper cbpw(blk);
643
644 // Note that just because an L2/L3 has valid data doesn't mean an
645 // L1 doesn't have a more up-to-date modified copy that still
646 // needs to be found. As a result we always update the request if
647 // we have it, but only declare it satisfied if we are the owner.
648
649 // see if we have data at all (owned or otherwise)
650 bool have_data = blk && blk->isValid()
651 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
652 blk->data);
653
654 // data we have is dirty if marked as such or if we have an
655 // in-service MSHR that is pending a modified line
656 bool have_dirty =
657 have_data && (blk->isDirty() ||
658 (mshr && mshr->inService && mshr->isPendingModified()));
659
660 bool done = have_dirty ||
661 cpuSidePort.checkFunctional(pkt) ||
662 mshrQueue.checkFunctional(pkt, blk_addr) ||
663 writeBuffer.checkFunctional(pkt, blk_addr) ||
664 memSidePort.checkFunctional(pkt);
665
666 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
667 (blk && blk->isValid()) ? "valid " : "",
668 have_data ? "data " : "", done ? "done " : "");
669
670 // We're leaving the cache, so pop cache->name() label
671 pkt->popLabel();
672
673 if (done) {
674 pkt->makeResponse();
675 } else {
676 // if it came as a request from the CPU side then make sure it
677 // continues towards the memory side
678 if (from_cpu_side) {
679 memSidePort.sendFunctional(pkt);
680 } else if (cpuSidePort.isSnooping()) {
681 // if it came from the memory side, it must be a snoop request
682 // and we should only forward it if we are forwarding snoops
683 cpuSidePort.sendFunctionalSnoop(pkt);
684 }
685 }
686}
687
688
689void
690BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
691{
692 assert(pkt->isRequest());
693
694 uint64_t overwrite_val;
695 bool overwrite_mem;
696 uint64_t condition_val64;
697 uint32_t condition_val32;
698
699 int offset = pkt->getOffset(blkSize);
700 uint8_t *blk_data = blk->data + offset;
701
702 assert(sizeof(uint64_t) >= pkt->getSize());
703
704 overwrite_mem = true;
705 // keep a copy of our possible write value, and copy what is at the
706 // memory address into the packet
707 pkt->writeData((uint8_t *)&overwrite_val);
708 pkt->setData(blk_data);
709
710 if (pkt->req->isCondSwap()) {
711 if (pkt->getSize() == sizeof(uint64_t)) {
712 condition_val64 = pkt->req->getExtraData();
713 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
714 sizeof(uint64_t));
715 } else if (pkt->getSize() == sizeof(uint32_t)) {
716 condition_val32 = (uint32_t)pkt->req->getExtraData();
717 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
718 sizeof(uint32_t));
719 } else
720 panic("Invalid size for conditional read/write\n");
721 }
722
723 if (overwrite_mem) {
724 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
725 blk->status |= BlkDirty;
726 }
727}
728
729QueueEntry*
730BaseCache::getNextQueueEntry()
731{
732 // Check both MSHR queue and write buffer for potential requests,
733 // note that null does not mean there is no request, it could
734 // simply be that it is not ready
735 MSHR *miss_mshr = mshrQueue.getNext();
736 WriteQueueEntry *wq_entry = writeBuffer.getNext();
737
738 // If we got a write buffer request ready, first priority is a
739 // full write buffer, otherwise we favour the miss requests
740 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
741 // need to search MSHR queue for conflicting earlier miss.
742 MSHR *conflict_mshr =
743 mshrQueue.findPending(wq_entry->blkAddr,
744 wq_entry->isSecure);
745
746 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
747 // Service misses in order until conflict is cleared.
748 return conflict_mshr;
749
750 // @todo Note that we ignore the ready time of the conflict here
751 }
752
753 // No conflicts; issue write
754 return wq_entry;
755 } else if (miss_mshr) {
756 // need to check for conflicting earlier writeback
757 WriteQueueEntry *conflict_mshr =
758 writeBuffer.findPending(miss_mshr->blkAddr,
759 miss_mshr->isSecure);
760 if (conflict_mshr) {
761 // not sure why we don't check order here... it was in the
762 // original code but commented out.
763
764 // The only way this happens is if we are
765 // doing a write and we didn't have permissions
766 // then subsequently saw a writeback (owned got evicted)
767 // We need to make sure to perform the writeback first
768 // To preserve the dirty data, then we can issue the write
769
770 // should we return wq_entry here instead? I.e. do we
771 // have to flush writes in order? I don't think so... not
772 // for Alpha anyway. Maybe for x86?
773 return conflict_mshr;
774
775 // @todo Note that we ignore the ready time of the conflict here
776 }
777
778 // No conflicts; issue read
779 return miss_mshr;
780 }
781
782 // fall through... no pending requests. Try a prefetch.
783 assert(!miss_mshr && !wq_entry);
784 if (prefetcher && mshrQueue.canPrefetch()) {
785 // If we have a miss queue slot, we can try a prefetch
786 PacketPtr pkt = prefetcher->getPacket();
787 if (pkt) {
788 Addr pf_addr = pkt->getBlockAddr(blkSize);
789 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
790 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
791 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
792 // Update statistic on number of prefetches issued
793 // (hwpf_mshr_misses)
794 assert(pkt->req->masterId() < system->maxMasters());
795 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
796
797 // allocate an MSHR and return it, note
798 // that we send the packet straight away, so do not
799 // schedule the send
800 return allocateMissBuffer(pkt, curTick(), false);
801 } else {
802 // free the request and packet
803 delete pkt->req;
804 delete pkt;
805 }
806 }
807 }
808
809 return nullptr;
810}
811
812void
813BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
814{
815 assert(pkt->isRequest());
816
817 assert(blk && blk->isValid());
818 // Occasionally this is not true... if we are a lower-level cache
819 // satisfying a string of Read and ReadEx requests from
820 // upper-level caches, a Read will mark the block as shared but we
821 // can satisfy a following ReadEx anyway since we can rely on the
822 // Read requester(s) to have buffered the ReadEx snoop and to
823 // invalidate their blocks after receiving them.
824 // assert(!pkt->needsWritable() || blk->isWritable());
825 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
826
827 // Check RMW operations first since both isRead() and
828 // isWrite() will be true for them
829 if (pkt->cmd == MemCmd::SwapReq) {
830 cmpAndSwap(blk, pkt);
831 } else if (pkt->isWrite()) {
832 // we have the block in a writable state and can go ahead,
833 // note that the line may be also be considered writable in
834 // downstream caches along the path to memory, but always
835 // Exclusive, and never Modified
836 assert(blk->isWritable());
837 // Write or WriteLine at the first cache with block in writable state
838 if (blk->checkWrite(pkt)) {
839 pkt->writeDataToBlock(blk->data, blkSize);
840 }
841 // Always mark the line as dirty (and thus transition to the
842 // Modified state) even if we are a failed StoreCond so we
843 // supply data to any snoops that have appended themselves to
844 // this cache before knowing the store will fail.
845 blk->status |= BlkDirty;
846 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
847 } else if (pkt->isRead()) {
848 if (pkt->isLLSC()) {
849 blk->trackLoadLocked(pkt);
850 }
851
852 // all read responses have a data payload
853 assert(pkt->hasRespData());
854 pkt->setDataFromBlock(blk->data, blkSize);
855 } else if (pkt->isUpgrade()) {
856 // sanity check
857 assert(!pkt->hasSharers());
858
859 if (blk->isDirty()) {
860 // we were in the Owned state, and a cache above us that
861 // has the line in Shared state needs to be made aware
862 // that the data it already has is in fact dirty
863 pkt->setCacheResponding();
864 blk->status &= ~BlkDirty;
865 }
866 } else {
867 assert(pkt->isInvalidate());
868 invalidateBlock(blk);
869 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
870 pkt->print());
871 }
872}
873
874/////////////////////////////////////////////////////
875//
876// Access path: requests coming in from the CPU side
877//
878/////////////////////////////////////////////////////
879
880bool
881BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
882 PacketList &writebacks)
883{
884 // sanity check
885 assert(pkt->isRequest());
886
887 chatty_assert(!(isReadOnly && pkt->isWrite()),
888 "Should never see a write in a read-only cache %s\n",
889 name());
890
891 // Here lat is the value passed as parameter to accessBlock() function
892 // that can modify its value.
893 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
894
895 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
896 blk ? "hit " + blk->print() : "miss");
897
898 if (pkt->req->isCacheMaintenance()) {
899 // A cache maintenance operation is always forwarded to the
900 // memory below even if the block is found in dirty state.
901
902 // We defer any changes to the state of the block until we
903 // create and mark as in service the mshr for the downstream
904 // packet.
905 return false;
906 }
907
908 if (pkt->isEviction()) {
909 // We check for presence of block in above caches before issuing
910 // Writeback or CleanEvict to write buffer. Therefore the only
911 // possible cases can be of a CleanEvict packet coming from above
912 // encountering a Writeback generated in this cache peer cache and
913 // waiting in the write buffer. Cases of upper level peer caches
914 // generating CleanEvict and Writeback or simply CleanEvict and
915 // CleanEvict almost simultaneously will be caught by snoops sent out
916 // by crossbar.
917 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
918 pkt->isSecure());
919 if (wb_entry) {
920 assert(wb_entry->getNumTargets() == 1);
921 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
922 assert(wbPkt->isWriteback());
923
924 if (pkt->isCleanEviction()) {
925 // The CleanEvict and WritebackClean snoops into other
926 // peer caches of the same level while traversing the
927 // crossbar. If a copy of the block is found, the
928 // packet is deleted in the crossbar. Hence, none of
929 // the other upper level caches connected to this
930 // cache have the block, so we can clear the
931 // BLOCK_CACHED flag in the Writeback if set and
932 // discard the CleanEvict by returning true.
933 wbPkt->clearBlockCached();
934 return true;
935 } else {
936 assert(pkt->cmd == MemCmd::WritebackDirty);
937 // Dirty writeback from above trumps our clean
938 // writeback... discard here
939 // Note: markInService will remove entry from writeback buffer.
940 markInService(wb_entry);
941 delete wbPkt;
942 }
943 }
944 }
945
946 // Writeback handling is special case. We can write the block into
947 // the cache without having a writeable copy (or any copy at all).
948 if (pkt->isWriteback()) {
949 assert(blkSize == pkt->getSize());
950
951 // we could get a clean writeback while we are having
952 // outstanding accesses to a block, do the simple thing for
953 // now and drop the clean writeback so that we do not upset
954 // any ordering/decisions about ownership already taken
955 if (pkt->cmd == MemCmd::WritebackClean &&
956 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
957 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
958 "dropping\n", pkt->getAddr());
959 return true;
960 }
961
962 if (!blk) {
963 // need to do a replacement
964 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
965 if (!blk) {
966 // no replaceable block available: give up, fwd to next level.
967 incMissCount(pkt);
968 return false;
969 }
970 tags->insertBlock(pkt, blk);
971
972 blk->status |= (BlkValid | BlkReadable);
973 }
974 // only mark the block dirty if we got a writeback command,
975 // and leave it as is for a clean writeback
976 if (pkt->cmd == MemCmd::WritebackDirty) {
977 // TODO: the coherent cache can assert(!blk->isDirty());
978 blk->status |= BlkDirty;
979 }
980 // if the packet does not have sharers, it is passing
981 // writable, and we got the writeback in Modified or Exclusive
982 // state, if not we are in the Owned or Shared state
983 if (!pkt->hasSharers()) {
984 blk->status |= BlkWritable;
985 }
986 // nothing else to do; writeback doesn't expect response
987 assert(!pkt->needsResponse());
988 pkt->writeDataToBlock(blk->data, blkSize);
989 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
990 incHitCount(pkt);
991 // populate the time when the block will be ready to access.
992 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
993 pkt->payloadDelay;
994 return true;
995 } else if (pkt->cmd == MemCmd::CleanEvict) {
996 if (blk) {
997 // Found the block in the tags, need to stop CleanEvict from
998 // propagating further down the hierarchy. Returning true will
999 // treat the CleanEvict like a satisfied write request and delete
1000 // it.
1001 return true;
1002 }
1003 // We didn't find the block here, propagate the CleanEvict further
1004 // down the memory hierarchy. Returning false will treat the CleanEvict
1005 // like a Writeback which could not find a replaceable block so has to
1006 // go to next level.
1007 return false;
1008 } else if (pkt->cmd == MemCmd::WriteClean) {
1009 // WriteClean handling is a special case. We can allocate a
1010 // block directly if it doesn't exist and we can update the
1011 // block immediately. The WriteClean transfers the ownership
1012 // of the block as well.
1013 assert(blkSize == pkt->getSize());
1014
1015 if (!blk) {
1016 if (pkt->writeThrough()) {
1017 // if this is a write through packet, we don't try to
1018 // allocate if the block is not present
1019 return false;
1020 } else {
1021 // a writeback that misses needs to allocate a new block
1022 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(),
1023 writebacks);
1024 if (!blk) {
1025 // no replaceable block available: give up, fwd to
1026 // next level.
1027 incMissCount(pkt);
1028 return false;
1029 }
1030 tags->insertBlock(pkt, blk);
1031
1032 blk->status |= (BlkValid | BlkReadable);
1033 }
1034 }
1035
1036 // at this point either this is a writeback or a write-through
1037 // write clean operation and the block is already in this
1038 // cache, we need to update the data and the block flags
1039 assert(blk);
1040 // TODO: the coherent cache can assert(!blk->isDirty());
1041 if (!pkt->writeThrough()) {
1042 blk->status |= BlkDirty;
1043 }
1044 // nothing else to do; writeback doesn't expect response
1045 assert(!pkt->needsResponse());
1046 pkt->writeDataToBlock(blk->data, blkSize);
1047 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1048
1049 incHitCount(pkt);
1050 // populate the time when the block will be ready to access.
1051 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
1052 pkt->payloadDelay;
1053 // if this a write-through packet it will be sent to cache
1054 // below
1055 return !pkt->writeThrough();
1056 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1057 blk->isReadable())) {
1058 // OK to satisfy access
1059 incHitCount(pkt);
1060 satisfyRequest(pkt, blk);
1061 maintainClusivity(pkt->fromCache(), blk);
1062
1063 return true;
1064 }
1065
1066 // Can't satisfy access normally... either no block (blk == nullptr)
1067 // or have block but need writable
1068
1069 incMissCount(pkt);
1070
1071 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1072 // complete miss on store conditional... just give up now
1073 pkt->req->setExtraData(0);
1074 return true;
1075 }
1076
1077 return false;
1078}
1079
1080void
1081BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1082{
1083 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1084 clusivity == Enums::mostly_excl) {
1085 // if we have responded to a cache, and our block is still
1086 // valid, but not dirty, and this cache is mostly exclusive
1087 // with respect to the cache above, drop the block
1088 invalidateBlock(blk);
1089 }
1090}
1091
1092CacheBlk*
1093BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1094 bool allocate)
1095{
1096 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1097 Addr addr = pkt->getAddr();
1098 bool is_secure = pkt->isSecure();
1099#if TRACING_ON
1100 CacheBlk::State old_state = blk ? blk->status : 0;
1101#endif
1102
1103 // When handling a fill, we should have no writes to this line.
1104 assert(addr == pkt->getBlockAddr(blkSize));
1105 assert(!writeBuffer.findMatch(addr, is_secure));
1106
1107 if (!blk) {
1108 // better have read new data...
1109 assert(pkt->hasData());
1110
1111 // only read responses and write-line requests have data;
1112 // note that we don't write the data here for write-line - that
1113 // happens in the subsequent call to satisfyRequest
1114 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1115
1116 // need to do a replacement if allocating, otherwise we stick
1117 // with the temporary storage
1118 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr;
1119
1120 if (!blk) {
1121 // No replaceable block or a mostly exclusive
1122 // cache... just use temporary storage to complete the
1123 // current request and then get rid of it
1124 assert(!tempBlock->isValid());
1125 blk = tempBlock;
1126 tempBlock->set = tags->extractSet(addr);
1127 tempBlock->tag = tags->extractTag(addr);
1128 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1129 is_secure ? "s" : "ns");
1130 } else {
1131 tags->insertBlock(pkt, blk);
1132 }
1133
1134 // we should never be overwriting a valid block
1135 assert(!blk->isValid());
1136 } else {
1137 // existing block... probably an upgrade
1138 assert(blk->tag == tags->extractTag(addr));
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheVerbose.hh"
56#include "mem/cache/mshr.hh"
57#include "mem/cache/prefetch/base.hh"
58#include "mem/cache/queue_entry.hh"
59#include "params/BaseCache.hh"
60#include "sim/core.hh"
61
62class BaseMasterPort;
63class BaseSlavePort;
64
65using namespace std;
66
67BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
68 BaseCache *_cache,
69 const std::string &_label)
70 : QueuedSlavePort(_name, _cache, queue), queue(*_cache, *this, _label),
71 blocked(false), mustSendRetry(false),
72 sendRetryEvent([this]{ processSendRetry(); }, _name)
73{
74}
75
76BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
77 : MemObject(p),
78 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
79 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
80 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
81 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
82 tags(p->tags),
83 prefetcher(p->prefetcher),
84 prefetchOnAccess(p->prefetch_on_access),
85 writebackClean(p->writeback_clean),
86 tempBlockWriteback(nullptr),
87 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
88 name(), false,
89 EventBase::Delayed_Writeback_Pri),
90 blkSize(blk_size),
91 lookupLatency(p->tag_latency),
92 dataLatency(p->data_latency),
93 forwardLatency(p->tag_latency),
94 fillLatency(p->data_latency),
95 responseLatency(p->response_latency),
96 numTarget(p->tgts_per_mshr),
97 forwardSnoops(true),
98 clusivity(p->clusivity),
99 isReadOnly(p->is_read_only),
100 blocked(0),
101 order(0),
102 noTargetMSHR(nullptr),
103 missCount(p->max_miss_count),
104 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
105 system(p->system)
106{
107 // the MSHR queue has no reserve entries as we check the MSHR
108 // queue on every single allocation, whereas the write queue has
109 // as many reserve entries as we have MSHRs, since every MSHR may
110 // eventually require a writeback, and we do not check the write
111 // buffer before committing to an MSHR
112
113 // forward snoops is overridden in init() once we can query
114 // whether the connected master is actually snooping or not
115
116 tempBlock = new CacheBlk();
117 tempBlock->data = new uint8_t[blkSize];
118
119 tags->setCache(this);
120 if (prefetcher)
121 prefetcher->setCache(this);
122}
123
124BaseCache::~BaseCache()
125{
126 delete [] tempBlock->data;
127 delete tempBlock;
128}
129
130void
131BaseCache::CacheSlavePort::setBlocked()
132{
133 assert(!blocked);
134 DPRINTF(CachePort, "Port is blocking new requests\n");
135 blocked = true;
136 // if we already scheduled a retry in this cycle, but it has not yet
137 // happened, cancel it
138 if (sendRetryEvent.scheduled()) {
139 owner.deschedule(sendRetryEvent);
140 DPRINTF(CachePort, "Port descheduled retry\n");
141 mustSendRetry = true;
142 }
143}
144
145void
146BaseCache::CacheSlavePort::clearBlocked()
147{
148 assert(blocked);
149 DPRINTF(CachePort, "Port is accepting new requests\n");
150 blocked = false;
151 if (mustSendRetry) {
152 // @TODO: need to find a better time (next cycle?)
153 owner.schedule(sendRetryEvent, curTick() + 1);
154 }
155}
156
157void
158BaseCache::CacheSlavePort::processSendRetry()
159{
160 DPRINTF(CachePort, "Port is sending retry\n");
161
162 // reset the flag and call retry
163 mustSendRetry = false;
164 sendRetryReq();
165}
166
167void
168BaseCache::init()
169{
170 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
171 fatal("Cache ports on %s are not connected\n", name());
172 cpuSidePort.sendRangeChange();
173 forwardSnoops = cpuSidePort.isSnooping();
174}
175
176BaseMasterPort &
177BaseCache::getMasterPort(const std::string &if_name, PortID idx)
178{
179 if (if_name == "mem_side") {
180 return memSidePort;
181 } else {
182 return MemObject::getMasterPort(if_name, idx);
183 }
184}
185
186BaseSlavePort &
187BaseCache::getSlavePort(const std::string &if_name, PortID idx)
188{
189 if (if_name == "cpu_side") {
190 return cpuSidePort;
191 } else {
192 return MemObject::getSlavePort(if_name, idx);
193 }
194}
195
196bool
197BaseCache::inRange(Addr addr) const
198{
199 for (const auto& r : addrRanges) {
200 if (r.contains(addr)) {
201 return true;
202 }
203 }
204 return false;
205}
206
207void
208BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
209{
210 if (pkt->needsResponse()) {
211 pkt->makeTimingResponse();
212 // @todo: Make someone pay for this
213 pkt->headerDelay = pkt->payloadDelay = 0;
214
215 // In this case we are considering request_time that takes
216 // into account the delay of the xbar, if any, and just
217 // lat, neglecting responseLatency, modelling hit latency
218 // just as lookupLatency or or the value of lat overriden
219 // by access(), that calls accessBlock() function.
220 cpuSidePort.schedTimingResp(pkt, request_time, true);
221 } else {
222 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
223 pkt->print());
224
225 // queue the packet for deletion, as the sending cache is
226 // still relying on it; if the block is found in access(),
227 // CleanEvict and Writeback messages will be deleted
228 // here as well
229 pendingDelete.reset(pkt);
230 }
231}
232
233void
234BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
235 Tick forward_time, Tick request_time)
236{
237 if (mshr) {
238 /// MSHR hit
239 /// @note writebacks will be checked in getNextMSHR()
240 /// for any conflicting requests to the same block
241
242 //@todo remove hw_pf here
243
244 // Coalesce unless it was a software prefetch (see above).
245 if (pkt) {
246 assert(!pkt->isWriteback());
247 // CleanEvicts corresponding to blocks which have
248 // outstanding requests in MSHRs are simply sunk here
249 if (pkt->cmd == MemCmd::CleanEvict) {
250 pendingDelete.reset(pkt);
251 } else if (pkt->cmd == MemCmd::WriteClean) {
252 // A WriteClean should never coalesce with any
253 // outstanding cache maintenance requests.
254
255 // We use forward_time here because there is an
256 // uncached memory write, forwarded to WriteBuffer.
257 allocateWriteBuffer(pkt, forward_time);
258 } else {
259 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
260 pkt->print());
261
262 assert(pkt->req->masterId() < system->maxMasters());
263 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
264
265 // We use forward_time here because it is the same
266 // considering new targets. We have multiple
267 // requests for the same address here. It
268 // specifies the latency to allocate an internal
269 // buffer and to schedule an event to the queued
270 // port and also takes into account the additional
271 // delay of the xbar.
272 mshr->allocateTarget(pkt, forward_time, order++,
273 allocOnFill(pkt->cmd));
274 if (mshr->getNumTargets() == numTarget) {
275 noTargetMSHR = mshr;
276 setBlocked(Blocked_NoTargets);
277 // need to be careful with this... if this mshr isn't
278 // ready yet (i.e. time > curTick()), we don't want to
279 // move it ahead of mshrs that are ready
280 // mshrQueue.moveToFront(mshr);
281 }
282 }
283 }
284 } else {
285 // no MSHR
286 assert(pkt->req->masterId() < system->maxMasters());
287 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
288
289 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
290 // We use forward_time here because there is an
291 // writeback or writeclean, forwarded to WriteBuffer.
292 allocateWriteBuffer(pkt, forward_time);
293 } else {
294 if (blk && blk->isValid()) {
295 // If we have a write miss to a valid block, we
296 // need to mark the block non-readable. Otherwise
297 // if we allow reads while there's an outstanding
298 // write miss, the read could return stale data
299 // out of the cache block... a more aggressive
300 // system could detect the overlap (if any) and
301 // forward data out of the MSHRs, but we don't do
302 // that yet. Note that we do need to leave the
303 // block valid so that it stays in the cache, in
304 // case we get an upgrade response (and hence no
305 // new data) when the write miss completes.
306 // As long as CPUs do proper store/load forwarding
307 // internally, and have a sufficiently weak memory
308 // model, this is probably unnecessary, but at some
309 // point it must have seemed like we needed it...
310 assert((pkt->needsWritable() && !blk->isWritable()) ||
311 pkt->req->isCacheMaintenance());
312 blk->status &= ~BlkReadable;
313 }
314 // Here we are using forward_time, modelling the latency of
315 // a miss (outbound) just as forwardLatency, neglecting the
316 // lookupLatency component.
317 allocateMissBuffer(pkt, forward_time);
318 }
319 }
320}
321
322void
323BaseCache::recvTimingReq(PacketPtr pkt)
324{
325 // anything that is merely forwarded pays for the forward latency and
326 // the delay provided by the crossbar
327 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
328
329 // We use lookupLatency here because it is used to specify the latency
330 // to access.
331 Cycles lat = lookupLatency;
332 CacheBlk *blk = nullptr;
333 bool satisfied = false;
334 {
335 PacketList writebacks;
336 // Note that lat is passed by reference here. The function
337 // access() calls accessBlock() which can modify lat value.
338 satisfied = access(pkt, blk, lat, writebacks);
339
340 // copy writebacks to write buffer here to ensure they logically
341 // proceed anything happening below
342 doWritebacks(writebacks, forward_time);
343 }
344
345 // Here we charge the headerDelay that takes into account the latencies
346 // of the bus, if the packet comes from it.
347 // The latency charged it is just lat that is the value of lookupLatency
348 // modified by access() function, or if not just lookupLatency.
349 // In case of a hit we are neglecting response latency.
350 // In case of a miss we are neglecting forward latency.
351 Tick request_time = clockEdge(lat) + pkt->headerDelay;
352 // Here we reset the timing of the packet.
353 pkt->headerDelay = pkt->payloadDelay = 0;
354 // track time of availability of next prefetch, if any
355 Tick next_pf_time = MaxTick;
356
357 if (satisfied) {
358 // if need to notify the prefetcher we have to do it before
359 // anything else as later handleTimingReqHit might turn the
360 // packet in a response
361 if (prefetcher &&
362 (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
363 if (blk)
364 blk->status &= ~BlkHWPrefetched;
365
366 // Don't notify on SWPrefetch
367 if (!pkt->cmd.isSWPrefetch()) {
368 assert(!pkt->req->isCacheMaintenance());
369 next_pf_time = prefetcher->notify(pkt);
370 }
371 }
372
373 handleTimingReqHit(pkt, blk, request_time);
374 } else {
375 handleTimingReqMiss(pkt, blk, forward_time, request_time);
376
377 // We should call the prefetcher reguardless if the request is
378 // satisfied or not, reguardless if the request is in the MSHR
379 // or not. The request could be a ReadReq hit, but still not
380 // satisfied (potentially because of a prior write to the same
381 // cache line. So, even when not satisfied, there is an MSHR
382 // already allocated for this, we need to let the prefetcher
383 // know about the request
384
385 // Don't notify prefetcher on SWPrefetch or cache maintenance
386 // operations
387 if (prefetcher && pkt &&
388 !pkt->cmd.isSWPrefetch() &&
389 !pkt->req->isCacheMaintenance()) {
390 next_pf_time = prefetcher->notify(pkt);
391 }
392 }
393
394 if (next_pf_time != MaxTick) {
395 schedMemSideSendEvent(next_pf_time);
396 }
397}
398
399void
400BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
401{
402 Tick completion_time = clockEdge(responseLatency) +
403 pkt->headerDelay + pkt->payloadDelay;
404
405 // Reset the bus additional time as it is now accounted for
406 pkt->headerDelay = pkt->payloadDelay = 0;
407
408 cpuSidePort.schedTimingResp(pkt, completion_time, true);
409}
410
411void
412BaseCache::recvTimingResp(PacketPtr pkt)
413{
414 assert(pkt->isResponse());
415
416 // all header delay should be paid for by the crossbar, unless
417 // this is a prefetch response from above
418 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
419 "%s saw a non-zero packet delay\n", name());
420
421 const bool is_error = pkt->isError();
422
423 if (is_error) {
424 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
425 pkt->print());
426 }
427
428 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
429 pkt->print());
430
431 // if this is a write, we should be looking at an uncacheable
432 // write
433 if (pkt->isWrite()) {
434 assert(pkt->req->isUncacheable());
435 handleUncacheableWriteResp(pkt);
436 return;
437 }
438
439 // we have dealt with any (uncacheable) writes above, from here on
440 // we know we are dealing with an MSHR due to a miss or a prefetch
441 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
442 assert(mshr);
443
444 if (mshr == noTargetMSHR) {
445 // we always clear at least one target
446 clearBlocked(Blocked_NoTargets);
447 noTargetMSHR = nullptr;
448 }
449
450 // Initial target is used just for stats
451 MSHR::Target *initial_tgt = mshr->getTarget();
452 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
453 Tick miss_latency = curTick() - initial_tgt->recvTime;
454
455 if (pkt->req->isUncacheable()) {
456 assert(pkt->req->masterId() < system->maxMasters());
457 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
458 miss_latency;
459 } else {
460 assert(pkt->req->masterId() < system->maxMasters());
461 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
462 miss_latency;
463 }
464
465 PacketList writebacks;
466
467 bool is_fill = !mshr->isForward &&
468 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
469
470 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
471
472 if (is_fill && !is_error) {
473 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
474 pkt->getAddr());
475
476 blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
477 assert(blk != nullptr);
478 }
479
480 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
481 // The block was marked not readable while there was a pending
482 // cache maintenance operation, restore its flag.
483 blk->status |= BlkReadable;
484 }
485
486 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
487 // If at this point the referenced block is writable and the
488 // response is not a cache invalidate, we promote targets that
489 // were deferred as we couldn't guarrantee a writable copy
490 mshr->promoteWritable();
491 }
492
493 serviceMSHRTargets(mshr, pkt, blk, writebacks);
494
495 if (mshr->promoteDeferredTargets()) {
496 // avoid later read getting stale data while write miss is
497 // outstanding.. see comment in timingAccess()
498 if (blk) {
499 blk->status &= ~BlkReadable;
500 }
501 mshrQueue.markPending(mshr);
502 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
503 } else {
504 // while we deallocate an mshr from the queue we still have to
505 // check the isFull condition before and after as we might
506 // have been using the reserved entries already
507 const bool was_full = mshrQueue.isFull();
508 mshrQueue.deallocate(mshr);
509 if (was_full && !mshrQueue.isFull()) {
510 clearBlocked(Blocked_NoMSHRs);
511 }
512
513 // Request the bus for a prefetch if this deallocation freed enough
514 // MSHRs for a prefetch to take place
515 if (prefetcher && mshrQueue.canPrefetch()) {
516 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
517 clockEdge());
518 if (next_pf_time != MaxTick)
519 schedMemSideSendEvent(next_pf_time);
520 }
521 }
522
523 // if we used temp block, check to see if its valid and then clear it out
524 if (blk == tempBlock && tempBlock->isValid()) {
525 evictBlock(blk, writebacks);
526 }
527
528 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
529 // copy writebacks to write buffer
530 doWritebacks(writebacks, forward_time);
531
532 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
533 delete pkt;
534}
535
536
537Tick
538BaseCache::recvAtomic(PacketPtr pkt)
539{
540 // We are in atomic mode so we pay just for lookupLatency here.
541 Cycles lat = lookupLatency;
542
543 // follow the same flow as in recvTimingReq, and check if a cache
544 // above us is responding
545 if (pkt->cacheResponding() && !pkt->isClean()) {
546 assert(!pkt->req->isCacheInvalidate());
547 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
548 pkt->print());
549
550 // if a cache is responding, and it had the line in Owned
551 // rather than Modified state, we need to invalidate any
552 // copies that are not on the same path to memory
553 assert(pkt->needsWritable() && !pkt->responderHadWritable());
554 lat += ticksToCycles(memSidePort.sendAtomic(pkt));
555
556 return lat * clockPeriod();
557 }
558
559 // should assert here that there are no outstanding MSHRs or
560 // writebacks... that would mean that someone used an atomic
561 // access in timing mode
562
563 CacheBlk *blk = nullptr;
564 PacketList writebacks;
565 bool satisfied = access(pkt, blk, lat, writebacks);
566
567 if (pkt->isClean() && blk && blk->isDirty()) {
568 // A cache clean opearation is looking for a dirty
569 // block. If a dirty block is encountered a WriteClean
570 // will update any copies to the path to the memory
571 // until the point of reference.
572 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
573 __func__, pkt->print(), blk->print());
574 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
575 writebacks.push_back(wb_pkt);
576 pkt->setSatisfied();
577 }
578
579 // handle writebacks resulting from the access here to ensure they
580 // logically proceed anything happening below
581 doWritebacksAtomic(writebacks);
582 assert(writebacks.empty());
583
584 if (!satisfied) {
585 lat += handleAtomicReqMiss(pkt, blk, writebacks);
586 }
587
588 // Note that we don't invoke the prefetcher at all in atomic mode.
589 // It's not clear how to do it properly, particularly for
590 // prefetchers that aggressively generate prefetch candidates and
591 // rely on bandwidth contention to throttle them; these will tend
592 // to pollute the cache in atomic mode since there is no bandwidth
593 // contention. If we ever do want to enable prefetching in atomic
594 // mode, though, this is the place to do it... see timingAccess()
595 // for an example (though we'd want to issue the prefetch(es)
596 // immediately rather than calling requestMemSideBus() as we do
597 // there).
598
599 // do any writebacks resulting from the response handling
600 doWritebacksAtomic(writebacks);
601
602 // if we used temp block, check to see if its valid and if so
603 // clear it out, but only do so after the call to recvAtomic is
604 // finished so that any downstream observers (such as a snoop
605 // filter), first see the fill, and only then see the eviction
606 if (blk == tempBlock && tempBlock->isValid()) {
607 // the atomic CPU calls recvAtomic for fetch and load/store
608 // sequentuially, and we may already have a tempBlock
609 // writeback from the fetch that we have not yet sent
610 if (tempBlockWriteback) {
611 // if that is the case, write the prevoius one back, and
612 // do not schedule any new event
613 writebackTempBlockAtomic();
614 } else {
615 // the writeback/clean eviction happens after the call to
616 // recvAtomic has finished (but before any successive
617 // calls), so that the response handling from the fill is
618 // allowed to happen first
619 schedule(writebackTempBlockAtomicEvent, curTick());
620 }
621
622 tempBlockWriteback = evictBlock(blk);
623 }
624
625 if (pkt->needsResponse()) {
626 pkt->makeAtomicResponse();
627 }
628
629 return lat * clockPeriod();
630}
631
632void
633BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
634{
635 Addr blk_addr = pkt->getBlockAddr(blkSize);
636 bool is_secure = pkt->isSecure();
637 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
638 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
639
640 pkt->pushLabel(name());
641
642 CacheBlkPrintWrapper cbpw(blk);
643
644 // Note that just because an L2/L3 has valid data doesn't mean an
645 // L1 doesn't have a more up-to-date modified copy that still
646 // needs to be found. As a result we always update the request if
647 // we have it, but only declare it satisfied if we are the owner.
648
649 // see if we have data at all (owned or otherwise)
650 bool have_data = blk && blk->isValid()
651 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
652 blk->data);
653
654 // data we have is dirty if marked as such or if we have an
655 // in-service MSHR that is pending a modified line
656 bool have_dirty =
657 have_data && (blk->isDirty() ||
658 (mshr && mshr->inService && mshr->isPendingModified()));
659
660 bool done = have_dirty ||
661 cpuSidePort.checkFunctional(pkt) ||
662 mshrQueue.checkFunctional(pkt, blk_addr) ||
663 writeBuffer.checkFunctional(pkt, blk_addr) ||
664 memSidePort.checkFunctional(pkt);
665
666 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
667 (blk && blk->isValid()) ? "valid " : "",
668 have_data ? "data " : "", done ? "done " : "");
669
670 // We're leaving the cache, so pop cache->name() label
671 pkt->popLabel();
672
673 if (done) {
674 pkt->makeResponse();
675 } else {
676 // if it came as a request from the CPU side then make sure it
677 // continues towards the memory side
678 if (from_cpu_side) {
679 memSidePort.sendFunctional(pkt);
680 } else if (cpuSidePort.isSnooping()) {
681 // if it came from the memory side, it must be a snoop request
682 // and we should only forward it if we are forwarding snoops
683 cpuSidePort.sendFunctionalSnoop(pkt);
684 }
685 }
686}
687
688
689void
690BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
691{
692 assert(pkt->isRequest());
693
694 uint64_t overwrite_val;
695 bool overwrite_mem;
696 uint64_t condition_val64;
697 uint32_t condition_val32;
698
699 int offset = pkt->getOffset(blkSize);
700 uint8_t *blk_data = blk->data + offset;
701
702 assert(sizeof(uint64_t) >= pkt->getSize());
703
704 overwrite_mem = true;
705 // keep a copy of our possible write value, and copy what is at the
706 // memory address into the packet
707 pkt->writeData((uint8_t *)&overwrite_val);
708 pkt->setData(blk_data);
709
710 if (pkt->req->isCondSwap()) {
711 if (pkt->getSize() == sizeof(uint64_t)) {
712 condition_val64 = pkt->req->getExtraData();
713 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
714 sizeof(uint64_t));
715 } else if (pkt->getSize() == sizeof(uint32_t)) {
716 condition_val32 = (uint32_t)pkt->req->getExtraData();
717 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
718 sizeof(uint32_t));
719 } else
720 panic("Invalid size for conditional read/write\n");
721 }
722
723 if (overwrite_mem) {
724 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
725 blk->status |= BlkDirty;
726 }
727}
728
729QueueEntry*
730BaseCache::getNextQueueEntry()
731{
732 // Check both MSHR queue and write buffer for potential requests,
733 // note that null does not mean there is no request, it could
734 // simply be that it is not ready
735 MSHR *miss_mshr = mshrQueue.getNext();
736 WriteQueueEntry *wq_entry = writeBuffer.getNext();
737
738 // If we got a write buffer request ready, first priority is a
739 // full write buffer, otherwise we favour the miss requests
740 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
741 // need to search MSHR queue for conflicting earlier miss.
742 MSHR *conflict_mshr =
743 mshrQueue.findPending(wq_entry->blkAddr,
744 wq_entry->isSecure);
745
746 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
747 // Service misses in order until conflict is cleared.
748 return conflict_mshr;
749
750 // @todo Note that we ignore the ready time of the conflict here
751 }
752
753 // No conflicts; issue write
754 return wq_entry;
755 } else if (miss_mshr) {
756 // need to check for conflicting earlier writeback
757 WriteQueueEntry *conflict_mshr =
758 writeBuffer.findPending(miss_mshr->blkAddr,
759 miss_mshr->isSecure);
760 if (conflict_mshr) {
761 // not sure why we don't check order here... it was in the
762 // original code but commented out.
763
764 // The only way this happens is if we are
765 // doing a write and we didn't have permissions
766 // then subsequently saw a writeback (owned got evicted)
767 // We need to make sure to perform the writeback first
768 // To preserve the dirty data, then we can issue the write
769
770 // should we return wq_entry here instead? I.e. do we
771 // have to flush writes in order? I don't think so... not
772 // for Alpha anyway. Maybe for x86?
773 return conflict_mshr;
774
775 // @todo Note that we ignore the ready time of the conflict here
776 }
777
778 // No conflicts; issue read
779 return miss_mshr;
780 }
781
782 // fall through... no pending requests. Try a prefetch.
783 assert(!miss_mshr && !wq_entry);
784 if (prefetcher && mshrQueue.canPrefetch()) {
785 // If we have a miss queue slot, we can try a prefetch
786 PacketPtr pkt = prefetcher->getPacket();
787 if (pkt) {
788 Addr pf_addr = pkt->getBlockAddr(blkSize);
789 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
790 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
791 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
792 // Update statistic on number of prefetches issued
793 // (hwpf_mshr_misses)
794 assert(pkt->req->masterId() < system->maxMasters());
795 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
796
797 // allocate an MSHR and return it, note
798 // that we send the packet straight away, so do not
799 // schedule the send
800 return allocateMissBuffer(pkt, curTick(), false);
801 } else {
802 // free the request and packet
803 delete pkt->req;
804 delete pkt;
805 }
806 }
807 }
808
809 return nullptr;
810}
811
812void
813BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
814{
815 assert(pkt->isRequest());
816
817 assert(blk && blk->isValid());
818 // Occasionally this is not true... if we are a lower-level cache
819 // satisfying a string of Read and ReadEx requests from
820 // upper-level caches, a Read will mark the block as shared but we
821 // can satisfy a following ReadEx anyway since we can rely on the
822 // Read requester(s) to have buffered the ReadEx snoop and to
823 // invalidate their blocks after receiving them.
824 // assert(!pkt->needsWritable() || blk->isWritable());
825 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
826
827 // Check RMW operations first since both isRead() and
828 // isWrite() will be true for them
829 if (pkt->cmd == MemCmd::SwapReq) {
830 cmpAndSwap(blk, pkt);
831 } else if (pkt->isWrite()) {
832 // we have the block in a writable state and can go ahead,
833 // note that the line may be also be considered writable in
834 // downstream caches along the path to memory, but always
835 // Exclusive, and never Modified
836 assert(blk->isWritable());
837 // Write or WriteLine at the first cache with block in writable state
838 if (blk->checkWrite(pkt)) {
839 pkt->writeDataToBlock(blk->data, blkSize);
840 }
841 // Always mark the line as dirty (and thus transition to the
842 // Modified state) even if we are a failed StoreCond so we
843 // supply data to any snoops that have appended themselves to
844 // this cache before knowing the store will fail.
845 blk->status |= BlkDirty;
846 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
847 } else if (pkt->isRead()) {
848 if (pkt->isLLSC()) {
849 blk->trackLoadLocked(pkt);
850 }
851
852 // all read responses have a data payload
853 assert(pkt->hasRespData());
854 pkt->setDataFromBlock(blk->data, blkSize);
855 } else if (pkt->isUpgrade()) {
856 // sanity check
857 assert(!pkt->hasSharers());
858
859 if (blk->isDirty()) {
860 // we were in the Owned state, and a cache above us that
861 // has the line in Shared state needs to be made aware
862 // that the data it already has is in fact dirty
863 pkt->setCacheResponding();
864 blk->status &= ~BlkDirty;
865 }
866 } else {
867 assert(pkt->isInvalidate());
868 invalidateBlock(blk);
869 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
870 pkt->print());
871 }
872}
873
874/////////////////////////////////////////////////////
875//
876// Access path: requests coming in from the CPU side
877//
878/////////////////////////////////////////////////////
879
880bool
881BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
882 PacketList &writebacks)
883{
884 // sanity check
885 assert(pkt->isRequest());
886
887 chatty_assert(!(isReadOnly && pkt->isWrite()),
888 "Should never see a write in a read-only cache %s\n",
889 name());
890
891 // Here lat is the value passed as parameter to accessBlock() function
892 // that can modify its value.
893 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
894
895 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
896 blk ? "hit " + blk->print() : "miss");
897
898 if (pkt->req->isCacheMaintenance()) {
899 // A cache maintenance operation is always forwarded to the
900 // memory below even if the block is found in dirty state.
901
902 // We defer any changes to the state of the block until we
903 // create and mark as in service the mshr for the downstream
904 // packet.
905 return false;
906 }
907
908 if (pkt->isEviction()) {
909 // We check for presence of block in above caches before issuing
910 // Writeback or CleanEvict to write buffer. Therefore the only
911 // possible cases can be of a CleanEvict packet coming from above
912 // encountering a Writeback generated in this cache peer cache and
913 // waiting in the write buffer. Cases of upper level peer caches
914 // generating CleanEvict and Writeback or simply CleanEvict and
915 // CleanEvict almost simultaneously will be caught by snoops sent out
916 // by crossbar.
917 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
918 pkt->isSecure());
919 if (wb_entry) {
920 assert(wb_entry->getNumTargets() == 1);
921 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
922 assert(wbPkt->isWriteback());
923
924 if (pkt->isCleanEviction()) {
925 // The CleanEvict and WritebackClean snoops into other
926 // peer caches of the same level while traversing the
927 // crossbar. If a copy of the block is found, the
928 // packet is deleted in the crossbar. Hence, none of
929 // the other upper level caches connected to this
930 // cache have the block, so we can clear the
931 // BLOCK_CACHED flag in the Writeback if set and
932 // discard the CleanEvict by returning true.
933 wbPkt->clearBlockCached();
934 return true;
935 } else {
936 assert(pkt->cmd == MemCmd::WritebackDirty);
937 // Dirty writeback from above trumps our clean
938 // writeback... discard here
939 // Note: markInService will remove entry from writeback buffer.
940 markInService(wb_entry);
941 delete wbPkt;
942 }
943 }
944 }
945
946 // Writeback handling is special case. We can write the block into
947 // the cache without having a writeable copy (or any copy at all).
948 if (pkt->isWriteback()) {
949 assert(blkSize == pkt->getSize());
950
951 // we could get a clean writeback while we are having
952 // outstanding accesses to a block, do the simple thing for
953 // now and drop the clean writeback so that we do not upset
954 // any ordering/decisions about ownership already taken
955 if (pkt->cmd == MemCmd::WritebackClean &&
956 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
957 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
958 "dropping\n", pkt->getAddr());
959 return true;
960 }
961
962 if (!blk) {
963 // need to do a replacement
964 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
965 if (!blk) {
966 // no replaceable block available: give up, fwd to next level.
967 incMissCount(pkt);
968 return false;
969 }
970 tags->insertBlock(pkt, blk);
971
972 blk->status |= (BlkValid | BlkReadable);
973 }
974 // only mark the block dirty if we got a writeback command,
975 // and leave it as is for a clean writeback
976 if (pkt->cmd == MemCmd::WritebackDirty) {
977 // TODO: the coherent cache can assert(!blk->isDirty());
978 blk->status |= BlkDirty;
979 }
980 // if the packet does not have sharers, it is passing
981 // writable, and we got the writeback in Modified or Exclusive
982 // state, if not we are in the Owned or Shared state
983 if (!pkt->hasSharers()) {
984 blk->status |= BlkWritable;
985 }
986 // nothing else to do; writeback doesn't expect response
987 assert(!pkt->needsResponse());
988 pkt->writeDataToBlock(blk->data, blkSize);
989 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
990 incHitCount(pkt);
991 // populate the time when the block will be ready to access.
992 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
993 pkt->payloadDelay;
994 return true;
995 } else if (pkt->cmd == MemCmd::CleanEvict) {
996 if (blk) {
997 // Found the block in the tags, need to stop CleanEvict from
998 // propagating further down the hierarchy. Returning true will
999 // treat the CleanEvict like a satisfied write request and delete
1000 // it.
1001 return true;
1002 }
1003 // We didn't find the block here, propagate the CleanEvict further
1004 // down the memory hierarchy. Returning false will treat the CleanEvict
1005 // like a Writeback which could not find a replaceable block so has to
1006 // go to next level.
1007 return false;
1008 } else if (pkt->cmd == MemCmd::WriteClean) {
1009 // WriteClean handling is a special case. We can allocate a
1010 // block directly if it doesn't exist and we can update the
1011 // block immediately. The WriteClean transfers the ownership
1012 // of the block as well.
1013 assert(blkSize == pkt->getSize());
1014
1015 if (!blk) {
1016 if (pkt->writeThrough()) {
1017 // if this is a write through packet, we don't try to
1018 // allocate if the block is not present
1019 return false;
1020 } else {
1021 // a writeback that misses needs to allocate a new block
1022 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(),
1023 writebacks);
1024 if (!blk) {
1025 // no replaceable block available: give up, fwd to
1026 // next level.
1027 incMissCount(pkt);
1028 return false;
1029 }
1030 tags->insertBlock(pkt, blk);
1031
1032 blk->status |= (BlkValid | BlkReadable);
1033 }
1034 }
1035
1036 // at this point either this is a writeback or a write-through
1037 // write clean operation and the block is already in this
1038 // cache, we need to update the data and the block flags
1039 assert(blk);
1040 // TODO: the coherent cache can assert(!blk->isDirty());
1041 if (!pkt->writeThrough()) {
1042 blk->status |= BlkDirty;
1043 }
1044 // nothing else to do; writeback doesn't expect response
1045 assert(!pkt->needsResponse());
1046 pkt->writeDataToBlock(blk->data, blkSize);
1047 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1048
1049 incHitCount(pkt);
1050 // populate the time when the block will be ready to access.
1051 blk->whenReady = clockEdge(fillLatency) + pkt->headerDelay +
1052 pkt->payloadDelay;
1053 // if this a write-through packet it will be sent to cache
1054 // below
1055 return !pkt->writeThrough();
1056 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1057 blk->isReadable())) {
1058 // OK to satisfy access
1059 incHitCount(pkt);
1060 satisfyRequest(pkt, blk);
1061 maintainClusivity(pkt->fromCache(), blk);
1062
1063 return true;
1064 }
1065
1066 // Can't satisfy access normally... either no block (blk == nullptr)
1067 // or have block but need writable
1068
1069 incMissCount(pkt);
1070
1071 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1072 // complete miss on store conditional... just give up now
1073 pkt->req->setExtraData(0);
1074 return true;
1075 }
1076
1077 return false;
1078}
1079
1080void
1081BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1082{
1083 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1084 clusivity == Enums::mostly_excl) {
1085 // if we have responded to a cache, and our block is still
1086 // valid, but not dirty, and this cache is mostly exclusive
1087 // with respect to the cache above, drop the block
1088 invalidateBlock(blk);
1089 }
1090}
1091
1092CacheBlk*
1093BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1094 bool allocate)
1095{
1096 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1097 Addr addr = pkt->getAddr();
1098 bool is_secure = pkt->isSecure();
1099#if TRACING_ON
1100 CacheBlk::State old_state = blk ? blk->status : 0;
1101#endif
1102
1103 // When handling a fill, we should have no writes to this line.
1104 assert(addr == pkt->getBlockAddr(blkSize));
1105 assert(!writeBuffer.findMatch(addr, is_secure));
1106
1107 if (!blk) {
1108 // better have read new data...
1109 assert(pkt->hasData());
1110
1111 // only read responses and write-line requests have data;
1112 // note that we don't write the data here for write-line - that
1113 // happens in the subsequent call to satisfyRequest
1114 assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1115
1116 // need to do a replacement if allocating, otherwise we stick
1117 // with the temporary storage
1118 blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr;
1119
1120 if (!blk) {
1121 // No replaceable block or a mostly exclusive
1122 // cache... just use temporary storage to complete the
1123 // current request and then get rid of it
1124 assert(!tempBlock->isValid());
1125 blk = tempBlock;
1126 tempBlock->set = tags->extractSet(addr);
1127 tempBlock->tag = tags->extractTag(addr);
1128 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1129 is_secure ? "s" : "ns");
1130 } else {
1131 tags->insertBlock(pkt, blk);
1132 }
1133
1134 // we should never be overwriting a valid block
1135 assert(!blk->isValid());
1136 } else {
1137 // existing block... probably an upgrade
1138 assert(blk->tag == tags->extractTag(addr));
1139 assert(blk->isSecure() == is_secure);
1139 // either we're getting new data or the block should already be valid
1140 assert(pkt->hasData() || blk->isValid());
1141 // don't clear block status... if block is already dirty we
1142 // don't want to lose that
1143 }
1144
1140 // either we're getting new data or the block should already be valid
1141 assert(pkt->hasData() || blk->isValid());
1142 // don't clear block status... if block is already dirty we
1143 // don't want to lose that
1144 }
1145
1145 if (is_secure)
1146 blk->status |= BlkSecure;
1147 blk->status |= BlkValid | BlkReadable;
1148
1149 // sanity check for whole-line writes, which should always be
1150 // marked as writable as part of the fill, and then later marked
1151 // dirty as part of satisfyRequest
1152 if (pkt->cmd == MemCmd::WriteLineReq) {
1153 assert(!pkt->hasSharers());
1154 }
1155
1156 // here we deal with setting the appropriate state of the line,
1157 // and we start by looking at the hasSharers flag, and ignore the
1158 // cacheResponding flag (normally signalling dirty data) if the
1159 // packet has sharers, thus the line is never allocated as Owned
1160 // (dirty but not writable), and always ends up being either
1161 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1162 // for more details
1163 if (!pkt->hasSharers()) {
1164 // we could get a writable line from memory (rather than a
1165 // cache) even in a read-only cache, note that we set this bit
1166 // even for a read-only cache, possibly revisit this decision
1167 blk->status |= BlkWritable;
1168
1169 // check if we got this via cache-to-cache transfer (i.e., from a
1170 // cache that had the block in Modified or Owned state)
1171 if (pkt->cacheResponding()) {
1172 // we got the block in Modified state, and invalidated the
1173 // owners copy
1174 blk->status |= BlkDirty;
1175
1176 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1177 "in read-only cache %s\n", name());
1178 }
1179 }
1180
1181 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1182 addr, is_secure ? "s" : "ns", old_state, blk->print());
1183
1184 // if we got new data, copy it in (checking for a read response
1185 // and a response that has data is the same in the end)
1186 if (pkt->isRead()) {
1187 // sanity checks
1188 assert(pkt->hasData());
1189 assert(pkt->getSize() == blkSize);
1190
1191 pkt->writeDataToBlock(blk->data, blkSize);
1192 }
1193 // We pay for fillLatency here.
1194 blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1195 pkt->payloadDelay;
1196
1197 return blk;
1198}
1199
1200CacheBlk*
1201BaseCache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1202{
1203 // Find replacement victim
1204 CacheBlk *blk = tags->findVictim(addr);
1205
1206 // It is valid to return nullptr if there is no victim
1207 if (!blk)
1208 return nullptr;
1209
1210 if (blk->isValid()) {
1211 Addr repl_addr = tags->regenerateBlkAddr(blk);
1212 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1213 if (repl_mshr) {
1214 // must be an outstanding upgrade or clean request
1215 // on a block we're about to replace...
1216 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1217 repl_mshr->isCleaning());
1218 // too hard to replace block with transient state
1219 // allocation failed, block not inserted
1220 return nullptr;
1221 } else {
1222 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1223 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
1224 addr, is_secure ? "s" : "ns",
1225 blk->isDirty() ? "writeback" : "clean");
1226
1227 if (blk->wasPrefetched()) {
1228 unusedPrefetches++;
1229 }
1230 evictBlock(blk, writebacks);
1231 replacements++;
1232 }
1233 }
1234
1235 return blk;
1236}
1237
1238void
1239BaseCache::invalidateBlock(CacheBlk *blk)
1240{
1241 if (blk != tempBlock)
1242 tags->invalidate(blk);
1243 blk->invalidate();
1244}
1245
1246PacketPtr
1247BaseCache::writebackBlk(CacheBlk *blk)
1248{
1249 chatty_assert(!isReadOnly || writebackClean,
1250 "Writeback from read-only cache");
1251 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1252
1253 writebacks[Request::wbMasterId]++;
1254
1255 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
1256 Request::wbMasterId);
1257 if (blk->isSecure())
1258 req->setFlags(Request::SECURE);
1259
1260 req->taskId(blk->task_id);
1261
1262 PacketPtr pkt =
1263 new Packet(req, blk->isDirty() ?
1264 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1265
1266 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1267 pkt->print(), blk->isWritable(), blk->isDirty());
1268
1269 if (blk->isWritable()) {
1270 // not asserting shared means we pass the block in modified
1271 // state, mark our own block non-writeable
1272 blk->status &= ~BlkWritable;
1273 } else {
1274 // we are in the Owned state, tell the receiver
1275 pkt->setHasSharers();
1276 }
1277
1278 // make sure the block is not marked dirty
1279 blk->status &= ~BlkDirty;
1280
1281 pkt->allocate();
1282 pkt->setDataFromBlock(blk->data, blkSize);
1283
1284 return pkt;
1285}
1286
1287PacketPtr
1288BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1289{
1290 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
1291 Request::wbMasterId);
1292 if (blk->isSecure()) {
1293 req->setFlags(Request::SECURE);
1294 }
1295 req->taskId(blk->task_id);
1296
1297 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1298
1299 if (dest) {
1300 req->setFlags(dest);
1301 pkt->setWriteThrough();
1302 }
1303
1304 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1305 blk->isWritable(), blk->isDirty());
1306
1307 if (blk->isWritable()) {
1308 // not asserting shared means we pass the block in modified
1309 // state, mark our own block non-writeable
1310 blk->status &= ~BlkWritable;
1311 } else {
1312 // we are in the Owned state, tell the receiver
1313 pkt->setHasSharers();
1314 }
1315
1316 // make sure the block is not marked dirty
1317 blk->status &= ~BlkDirty;
1318
1319 pkt->allocate();
1320 pkt->setDataFromBlock(blk->data, blkSize);
1321
1322 return pkt;
1323}
1324
1325
1326void
1327BaseCache::memWriteback()
1328{
1329 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1330}
1331
1332void
1333BaseCache::memInvalidate()
1334{
1335 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1336}
1337
1338bool
1339BaseCache::isDirty() const
1340{
1341 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1342}
1343
1344void
1345BaseCache::writebackVisitor(CacheBlk &blk)
1346{
1347 if (blk.isDirty()) {
1348 assert(blk.isValid());
1349
1350 Request request(tags->regenerateBlkAddr(&blk),
1351 blkSize, 0, Request::funcMasterId);
1352 request.taskId(blk.task_id);
1353 if (blk.isSecure()) {
1354 request.setFlags(Request::SECURE);
1355 }
1356
1357 Packet packet(&request, MemCmd::WriteReq);
1358 packet.dataStatic(blk.data);
1359
1360 memSidePort.sendFunctional(&packet);
1361
1362 blk.status &= ~BlkDirty;
1363 }
1364}
1365
1366void
1367BaseCache::invalidateVisitor(CacheBlk &blk)
1368{
1369 if (blk.isDirty())
1370 warn_once("Invalidating dirty cache lines. " \
1371 "Expect things to break.\n");
1372
1373 if (blk.isValid()) {
1374 assert(!blk.isDirty());
1375 invalidateBlock(&blk);
1376 }
1377}
1378
1379Tick
1380BaseCache::nextQueueReadyTime() const
1381{
1382 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1383 writeBuffer.nextReadyTime());
1384
1385 // Don't signal prefetch ready time if no MSHRs available
1386 // Will signal once enoguh MSHRs are deallocated
1387 if (prefetcher && mshrQueue.canPrefetch()) {
1388 nextReady = std::min(nextReady,
1389 prefetcher->nextPrefetchReadyTime());
1390 }
1391
1392 return nextReady;
1393}
1394
1395
1396bool
1397BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1398{
1399 assert(mshr);
1400
1401 // use request from 1st target
1402 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1403
1404 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1405
1406 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1407
1408 // either a prefetch that is not present upstream, or a normal
1409 // MSHR request, proceed to get the packet to send downstream
1410 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable());
1411
1412 mshr->isForward = (pkt == nullptr);
1413
1414 if (mshr->isForward) {
1415 // not a cache block request, but a response is expected
1416 // make copy of current packet to forward, keep current
1417 // copy for response handling
1418 pkt = new Packet(tgt_pkt, false, true);
1419 assert(!pkt->isWrite());
1420 }
1421
1422 // play it safe and append (rather than set) the sender state,
1423 // as forwarded packets may already have existing state
1424 pkt->pushSenderState(mshr);
1425
1426 if (pkt->isClean() && blk && blk->isDirty()) {
1427 // A cache clean opearation is looking for a dirty block. Mark
1428 // the packet so that the destination xbar can determine that
1429 // there will be a follow-up write packet as well.
1430 pkt->setSatisfied();
1431 }
1432
1433 if (!memSidePort.sendTimingReq(pkt)) {
1434 // we are awaiting a retry, but we
1435 // delete the packet and will be creating a new packet
1436 // when we get the opportunity
1437 delete pkt;
1438
1439 // note that we have now masked any requestBus and
1440 // schedSendEvent (we will wait for a retry before
1441 // doing anything), and this is so even if we do not
1442 // care about this packet and might override it before
1443 // it gets retried
1444 return true;
1445 } else {
1446 // As part of the call to sendTimingReq the packet is
1447 // forwarded to all neighbouring caches (and any caches
1448 // above them) as a snoop. Thus at this point we know if
1449 // any of the neighbouring caches are responding, and if
1450 // so, we know it is dirty, and we can determine if it is
1451 // being passed as Modified, making our MSHR the ordering
1452 // point
1453 bool pending_modified_resp = !pkt->hasSharers() &&
1454 pkt->cacheResponding();
1455 markInService(mshr, pending_modified_resp);
1456
1457 if (pkt->isClean() && blk && blk->isDirty()) {
1458 // A cache clean opearation is looking for a dirty
1459 // block. If a dirty block is encountered a WriteClean
1460 // will update any copies to the path to the memory
1461 // until the point of reference.
1462 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1463 __func__, pkt->print(), blk->print());
1464 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1465 pkt->id);
1466 PacketList writebacks;
1467 writebacks.push_back(wb_pkt);
1468 doWritebacks(writebacks, 0);
1469 }
1470
1471 return false;
1472 }
1473}
1474
1475bool
1476BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1477{
1478 assert(wq_entry);
1479
1480 // always a single target for write queue entries
1481 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1482
1483 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1484
1485 // forward as is, both for evictions and uncacheable writes
1486 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1487 // note that we have now masked any requestBus and
1488 // schedSendEvent (we will wait for a retry before
1489 // doing anything), and this is so even if we do not
1490 // care about this packet and might override it before
1491 // it gets retried
1492 return true;
1493 } else {
1494 markInService(wq_entry);
1495 return false;
1496 }
1497}
1498
1499void
1500BaseCache::serialize(CheckpointOut &cp) const
1501{
1502 bool dirty(isDirty());
1503
1504 if (dirty) {
1505 warn("*** The cache still contains dirty data. ***\n");
1506 warn(" Make sure to drain the system using the correct flags.\n");
1507 warn(" This checkpoint will not restore correctly " \
1508 "and dirty data in the cache will be lost!\n");
1509 }
1510
1511 // Since we don't checkpoint the data in the cache, any dirty data
1512 // will be lost when restoring from a checkpoint of a system that
1513 // wasn't drained properly. Flag the checkpoint as invalid if the
1514 // cache contains dirty data.
1515 bool bad_checkpoint(dirty);
1516 SERIALIZE_SCALAR(bad_checkpoint);
1517}
1518
1519void
1520BaseCache::unserialize(CheckpointIn &cp)
1521{
1522 bool bad_checkpoint;
1523 UNSERIALIZE_SCALAR(bad_checkpoint);
1524 if (bad_checkpoint) {
1525 fatal("Restoring from checkpoints with dirty caches is not "
1526 "supported in the classic memory system. Please remove any "
1527 "caches or drain them properly before taking checkpoints.\n");
1528 }
1529}
1530
1531void
1532BaseCache::regStats()
1533{
1534 MemObject::regStats();
1535
1536 using namespace Stats;
1537
1538 // Hit statistics
1539 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1540 MemCmd cmd(access_idx);
1541 const string &cstr = cmd.toString();
1542
1543 hits[access_idx]
1544 .init(system->maxMasters())
1545 .name(name() + "." + cstr + "_hits")
1546 .desc("number of " + cstr + " hits")
1547 .flags(total | nozero | nonan)
1548 ;
1549 for (int i = 0; i < system->maxMasters(); i++) {
1550 hits[access_idx].subname(i, system->getMasterName(i));
1551 }
1552 }
1553
1554// These macros make it easier to sum the right subset of commands and
1555// to change the subset of commands that are considered "demand" vs
1556// "non-demand"
1557#define SUM_DEMAND(s) \
1558 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1559 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1560
1561// should writebacks be included here? prior code was inconsistent...
1562#define SUM_NON_DEMAND(s) \
1563 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
1564
1565 demandHits
1566 .name(name() + ".demand_hits")
1567 .desc("number of demand (read+write) hits")
1568 .flags(total | nozero | nonan)
1569 ;
1570 demandHits = SUM_DEMAND(hits);
1571 for (int i = 0; i < system->maxMasters(); i++) {
1572 demandHits.subname(i, system->getMasterName(i));
1573 }
1574
1575 overallHits
1576 .name(name() + ".overall_hits")
1577 .desc("number of overall hits")
1578 .flags(total | nozero | nonan)
1579 ;
1580 overallHits = demandHits + SUM_NON_DEMAND(hits);
1581 for (int i = 0; i < system->maxMasters(); i++) {
1582 overallHits.subname(i, system->getMasterName(i));
1583 }
1584
1585 // Miss statistics
1586 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1587 MemCmd cmd(access_idx);
1588 const string &cstr = cmd.toString();
1589
1590 misses[access_idx]
1591 .init(system->maxMasters())
1592 .name(name() + "." + cstr + "_misses")
1593 .desc("number of " + cstr + " misses")
1594 .flags(total | nozero | nonan)
1595 ;
1596 for (int i = 0; i < system->maxMasters(); i++) {
1597 misses[access_idx].subname(i, system->getMasterName(i));
1598 }
1599 }
1600
1601 demandMisses
1602 .name(name() + ".demand_misses")
1603 .desc("number of demand (read+write) misses")
1604 .flags(total | nozero | nonan)
1605 ;
1606 demandMisses = SUM_DEMAND(misses);
1607 for (int i = 0; i < system->maxMasters(); i++) {
1608 demandMisses.subname(i, system->getMasterName(i));
1609 }
1610
1611 overallMisses
1612 .name(name() + ".overall_misses")
1613 .desc("number of overall misses")
1614 .flags(total | nozero | nonan)
1615 ;
1616 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1617 for (int i = 0; i < system->maxMasters(); i++) {
1618 overallMisses.subname(i, system->getMasterName(i));
1619 }
1620
1621 // Miss latency statistics
1622 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1623 MemCmd cmd(access_idx);
1624 const string &cstr = cmd.toString();
1625
1626 missLatency[access_idx]
1627 .init(system->maxMasters())
1628 .name(name() + "." + cstr + "_miss_latency")
1629 .desc("number of " + cstr + " miss cycles")
1630 .flags(total | nozero | nonan)
1631 ;
1632 for (int i = 0; i < system->maxMasters(); i++) {
1633 missLatency[access_idx].subname(i, system->getMasterName(i));
1634 }
1635 }
1636
1637 demandMissLatency
1638 .name(name() + ".demand_miss_latency")
1639 .desc("number of demand (read+write) miss cycles")
1640 .flags(total | nozero | nonan)
1641 ;
1642 demandMissLatency = SUM_DEMAND(missLatency);
1643 for (int i = 0; i < system->maxMasters(); i++) {
1644 demandMissLatency.subname(i, system->getMasterName(i));
1645 }
1646
1647 overallMissLatency
1648 .name(name() + ".overall_miss_latency")
1649 .desc("number of overall miss cycles")
1650 .flags(total | nozero | nonan)
1651 ;
1652 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1653 for (int i = 0; i < system->maxMasters(); i++) {
1654 overallMissLatency.subname(i, system->getMasterName(i));
1655 }
1656
1657 // access formulas
1658 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1659 MemCmd cmd(access_idx);
1660 const string &cstr = cmd.toString();
1661
1662 accesses[access_idx]
1663 .name(name() + "." + cstr + "_accesses")
1664 .desc("number of " + cstr + " accesses(hits+misses)")
1665 .flags(total | nozero | nonan)
1666 ;
1667 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1668
1669 for (int i = 0; i < system->maxMasters(); i++) {
1670 accesses[access_idx].subname(i, system->getMasterName(i));
1671 }
1672 }
1673
1674 demandAccesses
1675 .name(name() + ".demand_accesses")
1676 .desc("number of demand (read+write) accesses")
1677 .flags(total | nozero | nonan)
1678 ;
1679 demandAccesses = demandHits + demandMisses;
1680 for (int i = 0; i < system->maxMasters(); i++) {
1681 demandAccesses.subname(i, system->getMasterName(i));
1682 }
1683
1684 overallAccesses
1685 .name(name() + ".overall_accesses")
1686 .desc("number of overall (read+write) accesses")
1687 .flags(total | nozero | nonan)
1688 ;
1689 overallAccesses = overallHits + overallMisses;
1690 for (int i = 0; i < system->maxMasters(); i++) {
1691 overallAccesses.subname(i, system->getMasterName(i));
1692 }
1693
1694 // miss rate formulas
1695 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1696 MemCmd cmd(access_idx);
1697 const string &cstr = cmd.toString();
1698
1699 missRate[access_idx]
1700 .name(name() + "." + cstr + "_miss_rate")
1701 .desc("miss rate for " + cstr + " accesses")
1702 .flags(total | nozero | nonan)
1703 ;
1704 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1705
1706 for (int i = 0; i < system->maxMasters(); i++) {
1707 missRate[access_idx].subname(i, system->getMasterName(i));
1708 }
1709 }
1710
1711 demandMissRate
1712 .name(name() + ".demand_miss_rate")
1713 .desc("miss rate for demand accesses")
1714 .flags(total | nozero | nonan)
1715 ;
1716 demandMissRate = demandMisses / demandAccesses;
1717 for (int i = 0; i < system->maxMasters(); i++) {
1718 demandMissRate.subname(i, system->getMasterName(i));
1719 }
1720
1721 overallMissRate
1722 .name(name() + ".overall_miss_rate")
1723 .desc("miss rate for overall accesses")
1724 .flags(total | nozero | nonan)
1725 ;
1726 overallMissRate = overallMisses / overallAccesses;
1727 for (int i = 0; i < system->maxMasters(); i++) {
1728 overallMissRate.subname(i, system->getMasterName(i));
1729 }
1730
1731 // miss latency formulas
1732 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1733 MemCmd cmd(access_idx);
1734 const string &cstr = cmd.toString();
1735
1736 avgMissLatency[access_idx]
1737 .name(name() + "." + cstr + "_avg_miss_latency")
1738 .desc("average " + cstr + " miss latency")
1739 .flags(total | nozero | nonan)
1740 ;
1741 avgMissLatency[access_idx] =
1742 missLatency[access_idx] / misses[access_idx];
1743
1744 for (int i = 0; i < system->maxMasters(); i++) {
1745 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1746 }
1747 }
1748
1749 demandAvgMissLatency
1750 .name(name() + ".demand_avg_miss_latency")
1751 .desc("average overall miss latency")
1752 .flags(total | nozero | nonan)
1753 ;
1754 demandAvgMissLatency = demandMissLatency / demandMisses;
1755 for (int i = 0; i < system->maxMasters(); i++) {
1756 demandAvgMissLatency.subname(i, system->getMasterName(i));
1757 }
1758
1759 overallAvgMissLatency
1760 .name(name() + ".overall_avg_miss_latency")
1761 .desc("average overall miss latency")
1762 .flags(total | nozero | nonan)
1763 ;
1764 overallAvgMissLatency = overallMissLatency / overallMisses;
1765 for (int i = 0; i < system->maxMasters(); i++) {
1766 overallAvgMissLatency.subname(i, system->getMasterName(i));
1767 }
1768
1769 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1770 blocked_cycles
1771 .name(name() + ".blocked_cycles")
1772 .desc("number of cycles access was blocked")
1773 .subname(Blocked_NoMSHRs, "no_mshrs")
1774 .subname(Blocked_NoTargets, "no_targets")
1775 ;
1776
1777
1778 blocked_causes.init(NUM_BLOCKED_CAUSES);
1779 blocked_causes
1780 .name(name() + ".blocked")
1781 .desc("number of cycles access was blocked")
1782 .subname(Blocked_NoMSHRs, "no_mshrs")
1783 .subname(Blocked_NoTargets, "no_targets")
1784 ;
1785
1786 avg_blocked
1787 .name(name() + ".avg_blocked_cycles")
1788 .desc("average number of cycles each access was blocked")
1789 .subname(Blocked_NoMSHRs, "no_mshrs")
1790 .subname(Blocked_NoTargets, "no_targets")
1791 ;
1792
1793 avg_blocked = blocked_cycles / blocked_causes;
1794
1795 unusedPrefetches
1796 .name(name() + ".unused_prefetches")
1797 .desc("number of HardPF blocks evicted w/o reference")
1798 .flags(nozero)
1799 ;
1800
1801 writebacks
1802 .init(system->maxMasters())
1803 .name(name() + ".writebacks")
1804 .desc("number of writebacks")
1805 .flags(total | nozero | nonan)
1806 ;
1807 for (int i = 0; i < system->maxMasters(); i++) {
1808 writebacks.subname(i, system->getMasterName(i));
1809 }
1810
1811 // MSHR statistics
1812 // MSHR hit statistics
1813 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1814 MemCmd cmd(access_idx);
1815 const string &cstr = cmd.toString();
1816
1817 mshr_hits[access_idx]
1818 .init(system->maxMasters())
1819 .name(name() + "." + cstr + "_mshr_hits")
1820 .desc("number of " + cstr + " MSHR hits")
1821 .flags(total | nozero | nonan)
1822 ;
1823 for (int i = 0; i < system->maxMasters(); i++) {
1824 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1825 }
1826 }
1827
1828 demandMshrHits
1829 .name(name() + ".demand_mshr_hits")
1830 .desc("number of demand (read+write) MSHR hits")
1831 .flags(total | nozero | nonan)
1832 ;
1833 demandMshrHits = SUM_DEMAND(mshr_hits);
1834 for (int i = 0; i < system->maxMasters(); i++) {
1835 demandMshrHits.subname(i, system->getMasterName(i));
1836 }
1837
1838 overallMshrHits
1839 .name(name() + ".overall_mshr_hits")
1840 .desc("number of overall MSHR hits")
1841 .flags(total | nozero | nonan)
1842 ;
1843 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1844 for (int i = 0; i < system->maxMasters(); i++) {
1845 overallMshrHits.subname(i, system->getMasterName(i));
1846 }
1847
1848 // MSHR miss statistics
1849 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1850 MemCmd cmd(access_idx);
1851 const string &cstr = cmd.toString();
1852
1853 mshr_misses[access_idx]
1854 .init(system->maxMasters())
1855 .name(name() + "." + cstr + "_mshr_misses")
1856 .desc("number of " + cstr + " MSHR misses")
1857 .flags(total | nozero | nonan)
1858 ;
1859 for (int i = 0; i < system->maxMasters(); i++) {
1860 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1861 }
1862 }
1863
1864 demandMshrMisses
1865 .name(name() + ".demand_mshr_misses")
1866 .desc("number of demand (read+write) MSHR misses")
1867 .flags(total | nozero | nonan)
1868 ;
1869 demandMshrMisses = SUM_DEMAND(mshr_misses);
1870 for (int i = 0; i < system->maxMasters(); i++) {
1871 demandMshrMisses.subname(i, system->getMasterName(i));
1872 }
1873
1874 overallMshrMisses
1875 .name(name() + ".overall_mshr_misses")
1876 .desc("number of overall MSHR misses")
1877 .flags(total | nozero | nonan)
1878 ;
1879 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1880 for (int i = 0; i < system->maxMasters(); i++) {
1881 overallMshrMisses.subname(i, system->getMasterName(i));
1882 }
1883
1884 // MSHR miss latency statistics
1885 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1886 MemCmd cmd(access_idx);
1887 const string &cstr = cmd.toString();
1888
1889 mshr_miss_latency[access_idx]
1890 .init(system->maxMasters())
1891 .name(name() + "." + cstr + "_mshr_miss_latency")
1892 .desc("number of " + cstr + " MSHR miss cycles")
1893 .flags(total | nozero | nonan)
1894 ;
1895 for (int i = 0; i < system->maxMasters(); i++) {
1896 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1897 }
1898 }
1899
1900 demandMshrMissLatency
1901 .name(name() + ".demand_mshr_miss_latency")
1902 .desc("number of demand (read+write) MSHR miss cycles")
1903 .flags(total | nozero | nonan)
1904 ;
1905 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
1906 for (int i = 0; i < system->maxMasters(); i++) {
1907 demandMshrMissLatency.subname(i, system->getMasterName(i));
1908 }
1909
1910 overallMshrMissLatency
1911 .name(name() + ".overall_mshr_miss_latency")
1912 .desc("number of overall MSHR miss cycles")
1913 .flags(total | nozero | nonan)
1914 ;
1915 overallMshrMissLatency =
1916 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
1917 for (int i = 0; i < system->maxMasters(); i++) {
1918 overallMshrMissLatency.subname(i, system->getMasterName(i));
1919 }
1920
1921 // MSHR uncacheable statistics
1922 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1923 MemCmd cmd(access_idx);
1924 const string &cstr = cmd.toString();
1925
1926 mshr_uncacheable[access_idx]
1927 .init(system->maxMasters())
1928 .name(name() + "." + cstr + "_mshr_uncacheable")
1929 .desc("number of " + cstr + " MSHR uncacheable")
1930 .flags(total | nozero | nonan)
1931 ;
1932 for (int i = 0; i < system->maxMasters(); i++) {
1933 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
1934 }
1935 }
1936
1937 overallMshrUncacheable
1938 .name(name() + ".overall_mshr_uncacheable_misses")
1939 .desc("number of overall MSHR uncacheable misses")
1940 .flags(total | nozero | nonan)
1941 ;
1942 overallMshrUncacheable =
1943 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
1944 for (int i = 0; i < system->maxMasters(); i++) {
1945 overallMshrUncacheable.subname(i, system->getMasterName(i));
1946 }
1947
1948 // MSHR miss latency statistics
1949 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1950 MemCmd cmd(access_idx);
1951 const string &cstr = cmd.toString();
1952
1953 mshr_uncacheable_lat[access_idx]
1954 .init(system->maxMasters())
1955 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
1956 .desc("number of " + cstr + " MSHR uncacheable cycles")
1957 .flags(total | nozero | nonan)
1958 ;
1959 for (int i = 0; i < system->maxMasters(); i++) {
1960 mshr_uncacheable_lat[access_idx].subname(
1961 i, system->getMasterName(i));
1962 }
1963 }
1964
1965 overallMshrUncacheableLatency
1966 .name(name() + ".overall_mshr_uncacheable_latency")
1967 .desc("number of overall MSHR uncacheable cycles")
1968 .flags(total | nozero | nonan)
1969 ;
1970 overallMshrUncacheableLatency =
1971 SUM_DEMAND(mshr_uncacheable_lat) +
1972 SUM_NON_DEMAND(mshr_uncacheable_lat);
1973 for (int i = 0; i < system->maxMasters(); i++) {
1974 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
1975 }
1976
1977#if 0
1978 // MSHR access formulas
1979 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1980 MemCmd cmd(access_idx);
1981 const string &cstr = cmd.toString();
1982
1983 mshrAccesses[access_idx]
1984 .name(name() + "." + cstr + "_mshr_accesses")
1985 .desc("number of " + cstr + " mshr accesses(hits+misses)")
1986 .flags(total | nozero | nonan)
1987 ;
1988 mshrAccesses[access_idx] =
1989 mshr_hits[access_idx] + mshr_misses[access_idx]
1990 + mshr_uncacheable[access_idx];
1991 }
1992
1993 demandMshrAccesses
1994 .name(name() + ".demand_mshr_accesses")
1995 .desc("number of demand (read+write) mshr accesses")
1996 .flags(total | nozero | nonan)
1997 ;
1998 demandMshrAccesses = demandMshrHits + demandMshrMisses;
1999
2000 overallMshrAccesses
2001 .name(name() + ".overall_mshr_accesses")
2002 .desc("number of overall (read+write) mshr accesses")
2003 .flags(total | nozero | nonan)
2004 ;
2005 overallMshrAccesses = overallMshrHits + overallMshrMisses
2006 + overallMshrUncacheable;
2007#endif
2008
2009 // MSHR miss rate formulas
2010 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2011 MemCmd cmd(access_idx);
2012 const string &cstr = cmd.toString();
2013
2014 mshrMissRate[access_idx]
2015 .name(name() + "." + cstr + "_mshr_miss_rate")
2016 .desc("mshr miss rate for " + cstr + " accesses")
2017 .flags(total | nozero | nonan)
2018 ;
2019 mshrMissRate[access_idx] =
2020 mshr_misses[access_idx] / accesses[access_idx];
2021
2022 for (int i = 0; i < system->maxMasters(); i++) {
2023 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2024 }
2025 }
2026
2027 demandMshrMissRate
2028 .name(name() + ".demand_mshr_miss_rate")
2029 .desc("mshr miss rate for demand accesses")
2030 .flags(total | nozero | nonan)
2031 ;
2032 demandMshrMissRate = demandMshrMisses / demandAccesses;
2033 for (int i = 0; i < system->maxMasters(); i++) {
2034 demandMshrMissRate.subname(i, system->getMasterName(i));
2035 }
2036
2037 overallMshrMissRate
2038 .name(name() + ".overall_mshr_miss_rate")
2039 .desc("mshr miss rate for overall accesses")
2040 .flags(total | nozero | nonan)
2041 ;
2042 overallMshrMissRate = overallMshrMisses / overallAccesses;
2043 for (int i = 0; i < system->maxMasters(); i++) {
2044 overallMshrMissRate.subname(i, system->getMasterName(i));
2045 }
2046
2047 // mshrMiss latency formulas
2048 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2049 MemCmd cmd(access_idx);
2050 const string &cstr = cmd.toString();
2051
2052 avgMshrMissLatency[access_idx]
2053 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2054 .desc("average " + cstr + " mshr miss latency")
2055 .flags(total | nozero | nonan)
2056 ;
2057 avgMshrMissLatency[access_idx] =
2058 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2059
2060 for (int i = 0; i < system->maxMasters(); i++) {
2061 avgMshrMissLatency[access_idx].subname(
2062 i, system->getMasterName(i));
2063 }
2064 }
2065
2066 demandAvgMshrMissLatency
2067 .name(name() + ".demand_avg_mshr_miss_latency")
2068 .desc("average overall mshr miss latency")
2069 .flags(total | nozero | nonan)
2070 ;
2071 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2072 for (int i = 0; i < system->maxMasters(); i++) {
2073 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2074 }
2075
2076 overallAvgMshrMissLatency
2077 .name(name() + ".overall_avg_mshr_miss_latency")
2078 .desc("average overall mshr miss latency")
2079 .flags(total | nozero | nonan)
2080 ;
2081 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2082 for (int i = 0; i < system->maxMasters(); i++) {
2083 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2084 }
2085
2086 // mshrUncacheable latency formulas
2087 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2088 MemCmd cmd(access_idx);
2089 const string &cstr = cmd.toString();
2090
2091 avgMshrUncacheableLatency[access_idx]
2092 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2093 .desc("average " + cstr + " mshr uncacheable latency")
2094 .flags(total | nozero | nonan)
2095 ;
2096 avgMshrUncacheableLatency[access_idx] =
2097 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2098
2099 for (int i = 0; i < system->maxMasters(); i++) {
2100 avgMshrUncacheableLatency[access_idx].subname(
2101 i, system->getMasterName(i));
2102 }
2103 }
2104
2105 overallAvgMshrUncacheableLatency
2106 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2107 .desc("average overall mshr uncacheable latency")
2108 .flags(total | nozero | nonan)
2109 ;
2110 overallAvgMshrUncacheableLatency =
2111 overallMshrUncacheableLatency / overallMshrUncacheable;
2112 for (int i = 0; i < system->maxMasters(); i++) {
2113 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2114 }
2115
2116 replacements
2117 .name(name() + ".replacements")
2118 .desc("number of replacements")
2119 ;
2120}
2121
2122///////////////
2123//
2124// CpuSidePort
2125//
2126///////////////
2127bool
2128BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2129{
2130 // Snoops shouldn't happen when bypassing caches
2131 assert(!cache->system->bypassCaches());
2132
2133 assert(pkt->isResponse());
2134
2135 // Express snoop responses from master to slave, e.g., from L1 to L2
2136 cache->recvTimingSnoopResp(pkt);
2137 return true;
2138}
2139
2140
2141bool
2142BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2143{
2144 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2145 // always let express snoop packets through even if blocked
2146 return true;
2147 } else if (blocked || mustSendRetry) {
2148 // either already committed to send a retry, or blocked
2149 mustSendRetry = true;
2150 return false;
2151 }
2152 mustSendRetry = false;
2153 return true;
2154}
2155
2156bool
2157BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2158{
2159 assert(pkt->isRequest());
2160
2161 if (cache->system->bypassCaches()) {
2162 // Just forward the packet if caches are disabled.
2163 // @todo This should really enqueue the packet rather
2164 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2165 assert(success);
2166 return true;
2167 } else if (tryTiming(pkt)) {
2168 cache->recvTimingReq(pkt);
2169 return true;
2170 }
2171 return false;
2172}
2173
2174Tick
2175BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2176{
2177 if (cache->system->bypassCaches()) {
2178 // Forward the request if the system is in cache bypass mode.
2179 return cache->memSidePort.sendAtomic(pkt);
2180 } else {
2181 return cache->recvAtomic(pkt);
2182 }
2183}
2184
2185void
2186BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2187{
2188 if (cache->system->bypassCaches()) {
2189 // The cache should be flushed if we are in cache bypass mode,
2190 // so we don't need to check if we need to update anything.
2191 cache->memSidePort.sendFunctional(pkt);
2192 return;
2193 }
2194
2195 // functional request
2196 cache->functionalAccess(pkt, true);
2197}
2198
2199AddrRangeList
2200BaseCache::CpuSidePort::getAddrRanges() const
2201{
2202 return cache->getAddrRanges();
2203}
2204
2205
2206BaseCache::
2207CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2208 const std::string &_label)
2209 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2210{
2211}
2212
2213///////////////
2214//
2215// MemSidePort
2216//
2217///////////////
2218bool
2219BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2220{
2221 cache->recvTimingResp(pkt);
2222 return true;
2223}
2224
2225// Express snooping requests to memside port
2226void
2227BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2228{
2229 // Snoops shouldn't happen when bypassing caches
2230 assert(!cache->system->bypassCaches());
2231
2232 // handle snooping requests
2233 cache->recvTimingSnoopReq(pkt);
2234}
2235
2236Tick
2237BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2238{
2239 // Snoops shouldn't happen when bypassing caches
2240 assert(!cache->system->bypassCaches());
2241
2242 return cache->recvAtomicSnoop(pkt);
2243}
2244
2245void
2246BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2247{
2248 // Snoops shouldn't happen when bypassing caches
2249 assert(!cache->system->bypassCaches());
2250
2251 // functional snoop (note that in contrast to atomic we don't have
2252 // a specific functionalSnoop method, as they have the same
2253 // behaviour regardless)
2254 cache->functionalAccess(pkt, false);
2255}
2256
2257void
2258BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2259{
2260 // sanity check
2261 assert(!waitingOnRetry);
2262
2263 // there should never be any deferred request packets in the
2264 // queue, instead we resly on the cache to provide the packets
2265 // from the MSHR queue or write queue
2266 assert(deferredPacketReadyTime() == MaxTick);
2267
2268 // check for request packets (requests & writebacks)
2269 QueueEntry* entry = cache.getNextQueueEntry();
2270
2271 if (!entry) {
2272 // can happen if e.g. we attempt a writeback and fail, but
2273 // before the retry, the writeback is eliminated because
2274 // we snoop another cache's ReadEx.
2275 } else {
2276 // let our snoop responses go first if there are responses to
2277 // the same addresses
2278 if (checkConflictingSnoop(entry->blkAddr)) {
2279 return;
2280 }
2281 waitingOnRetry = entry->sendPacket(cache);
2282 }
2283
2284 // if we succeeded and are not waiting for a retry, schedule the
2285 // next send considering when the next queue is ready, note that
2286 // snoop responses have their own packet queue and thus schedule
2287 // their own events
2288 if (!waitingOnRetry) {
2289 schedSendEvent(cache.nextQueueReadyTime());
2290 }
2291}
2292
2293BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2294 BaseCache *_cache,
2295 const std::string &_label)
2296 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2297 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2298 _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2299{
2300}
1146 blk->status |= BlkValid | BlkReadable;
1147
1148 // sanity check for whole-line writes, which should always be
1149 // marked as writable as part of the fill, and then later marked
1150 // dirty as part of satisfyRequest
1151 if (pkt->cmd == MemCmd::WriteLineReq) {
1152 assert(!pkt->hasSharers());
1153 }
1154
1155 // here we deal with setting the appropriate state of the line,
1156 // and we start by looking at the hasSharers flag, and ignore the
1157 // cacheResponding flag (normally signalling dirty data) if the
1158 // packet has sharers, thus the line is never allocated as Owned
1159 // (dirty but not writable), and always ends up being either
1160 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1161 // for more details
1162 if (!pkt->hasSharers()) {
1163 // we could get a writable line from memory (rather than a
1164 // cache) even in a read-only cache, note that we set this bit
1165 // even for a read-only cache, possibly revisit this decision
1166 blk->status |= BlkWritable;
1167
1168 // check if we got this via cache-to-cache transfer (i.e., from a
1169 // cache that had the block in Modified or Owned state)
1170 if (pkt->cacheResponding()) {
1171 // we got the block in Modified state, and invalidated the
1172 // owners copy
1173 blk->status |= BlkDirty;
1174
1175 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1176 "in read-only cache %s\n", name());
1177 }
1178 }
1179
1180 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1181 addr, is_secure ? "s" : "ns", old_state, blk->print());
1182
1183 // if we got new data, copy it in (checking for a read response
1184 // and a response that has data is the same in the end)
1185 if (pkt->isRead()) {
1186 // sanity checks
1187 assert(pkt->hasData());
1188 assert(pkt->getSize() == blkSize);
1189
1190 pkt->writeDataToBlock(blk->data, blkSize);
1191 }
1192 // We pay for fillLatency here.
1193 blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1194 pkt->payloadDelay;
1195
1196 return blk;
1197}
1198
1199CacheBlk*
1200BaseCache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1201{
1202 // Find replacement victim
1203 CacheBlk *blk = tags->findVictim(addr);
1204
1205 // It is valid to return nullptr if there is no victim
1206 if (!blk)
1207 return nullptr;
1208
1209 if (blk->isValid()) {
1210 Addr repl_addr = tags->regenerateBlkAddr(blk);
1211 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1212 if (repl_mshr) {
1213 // must be an outstanding upgrade or clean request
1214 // on a block we're about to replace...
1215 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1216 repl_mshr->isCleaning());
1217 // too hard to replace block with transient state
1218 // allocation failed, block not inserted
1219 return nullptr;
1220 } else {
1221 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1222 "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
1223 addr, is_secure ? "s" : "ns",
1224 blk->isDirty() ? "writeback" : "clean");
1225
1226 if (blk->wasPrefetched()) {
1227 unusedPrefetches++;
1228 }
1229 evictBlock(blk, writebacks);
1230 replacements++;
1231 }
1232 }
1233
1234 return blk;
1235}
1236
1237void
1238BaseCache::invalidateBlock(CacheBlk *blk)
1239{
1240 if (blk != tempBlock)
1241 tags->invalidate(blk);
1242 blk->invalidate();
1243}
1244
1245PacketPtr
1246BaseCache::writebackBlk(CacheBlk *blk)
1247{
1248 chatty_assert(!isReadOnly || writebackClean,
1249 "Writeback from read-only cache");
1250 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1251
1252 writebacks[Request::wbMasterId]++;
1253
1254 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
1255 Request::wbMasterId);
1256 if (blk->isSecure())
1257 req->setFlags(Request::SECURE);
1258
1259 req->taskId(blk->task_id);
1260
1261 PacketPtr pkt =
1262 new Packet(req, blk->isDirty() ?
1263 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1264
1265 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1266 pkt->print(), blk->isWritable(), blk->isDirty());
1267
1268 if (blk->isWritable()) {
1269 // not asserting shared means we pass the block in modified
1270 // state, mark our own block non-writeable
1271 blk->status &= ~BlkWritable;
1272 } else {
1273 // we are in the Owned state, tell the receiver
1274 pkt->setHasSharers();
1275 }
1276
1277 // make sure the block is not marked dirty
1278 blk->status &= ~BlkDirty;
1279
1280 pkt->allocate();
1281 pkt->setDataFromBlock(blk->data, blkSize);
1282
1283 return pkt;
1284}
1285
1286PacketPtr
1287BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1288{
1289 Request *req = new Request(tags->regenerateBlkAddr(blk), blkSize, 0,
1290 Request::wbMasterId);
1291 if (blk->isSecure()) {
1292 req->setFlags(Request::SECURE);
1293 }
1294 req->taskId(blk->task_id);
1295
1296 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1297
1298 if (dest) {
1299 req->setFlags(dest);
1300 pkt->setWriteThrough();
1301 }
1302
1303 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1304 blk->isWritable(), blk->isDirty());
1305
1306 if (blk->isWritable()) {
1307 // not asserting shared means we pass the block in modified
1308 // state, mark our own block non-writeable
1309 blk->status &= ~BlkWritable;
1310 } else {
1311 // we are in the Owned state, tell the receiver
1312 pkt->setHasSharers();
1313 }
1314
1315 // make sure the block is not marked dirty
1316 blk->status &= ~BlkDirty;
1317
1318 pkt->allocate();
1319 pkt->setDataFromBlock(blk->data, blkSize);
1320
1321 return pkt;
1322}
1323
1324
1325void
1326BaseCache::memWriteback()
1327{
1328 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1329}
1330
1331void
1332BaseCache::memInvalidate()
1333{
1334 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1335}
1336
1337bool
1338BaseCache::isDirty() const
1339{
1340 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1341}
1342
1343void
1344BaseCache::writebackVisitor(CacheBlk &blk)
1345{
1346 if (blk.isDirty()) {
1347 assert(blk.isValid());
1348
1349 Request request(tags->regenerateBlkAddr(&blk),
1350 blkSize, 0, Request::funcMasterId);
1351 request.taskId(blk.task_id);
1352 if (blk.isSecure()) {
1353 request.setFlags(Request::SECURE);
1354 }
1355
1356 Packet packet(&request, MemCmd::WriteReq);
1357 packet.dataStatic(blk.data);
1358
1359 memSidePort.sendFunctional(&packet);
1360
1361 blk.status &= ~BlkDirty;
1362 }
1363}
1364
1365void
1366BaseCache::invalidateVisitor(CacheBlk &blk)
1367{
1368 if (blk.isDirty())
1369 warn_once("Invalidating dirty cache lines. " \
1370 "Expect things to break.\n");
1371
1372 if (blk.isValid()) {
1373 assert(!blk.isDirty());
1374 invalidateBlock(&blk);
1375 }
1376}
1377
1378Tick
1379BaseCache::nextQueueReadyTime() const
1380{
1381 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1382 writeBuffer.nextReadyTime());
1383
1384 // Don't signal prefetch ready time if no MSHRs available
1385 // Will signal once enoguh MSHRs are deallocated
1386 if (prefetcher && mshrQueue.canPrefetch()) {
1387 nextReady = std::min(nextReady,
1388 prefetcher->nextPrefetchReadyTime());
1389 }
1390
1391 return nextReady;
1392}
1393
1394
1395bool
1396BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1397{
1398 assert(mshr);
1399
1400 // use request from 1st target
1401 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1402
1403 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1404
1405 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1406
1407 // either a prefetch that is not present upstream, or a normal
1408 // MSHR request, proceed to get the packet to send downstream
1409 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable());
1410
1411 mshr->isForward = (pkt == nullptr);
1412
1413 if (mshr->isForward) {
1414 // not a cache block request, but a response is expected
1415 // make copy of current packet to forward, keep current
1416 // copy for response handling
1417 pkt = new Packet(tgt_pkt, false, true);
1418 assert(!pkt->isWrite());
1419 }
1420
1421 // play it safe and append (rather than set) the sender state,
1422 // as forwarded packets may already have existing state
1423 pkt->pushSenderState(mshr);
1424
1425 if (pkt->isClean() && blk && blk->isDirty()) {
1426 // A cache clean opearation is looking for a dirty block. Mark
1427 // the packet so that the destination xbar can determine that
1428 // there will be a follow-up write packet as well.
1429 pkt->setSatisfied();
1430 }
1431
1432 if (!memSidePort.sendTimingReq(pkt)) {
1433 // we are awaiting a retry, but we
1434 // delete the packet and will be creating a new packet
1435 // when we get the opportunity
1436 delete pkt;
1437
1438 // note that we have now masked any requestBus and
1439 // schedSendEvent (we will wait for a retry before
1440 // doing anything), and this is so even if we do not
1441 // care about this packet and might override it before
1442 // it gets retried
1443 return true;
1444 } else {
1445 // As part of the call to sendTimingReq the packet is
1446 // forwarded to all neighbouring caches (and any caches
1447 // above them) as a snoop. Thus at this point we know if
1448 // any of the neighbouring caches are responding, and if
1449 // so, we know it is dirty, and we can determine if it is
1450 // being passed as Modified, making our MSHR the ordering
1451 // point
1452 bool pending_modified_resp = !pkt->hasSharers() &&
1453 pkt->cacheResponding();
1454 markInService(mshr, pending_modified_resp);
1455
1456 if (pkt->isClean() && blk && blk->isDirty()) {
1457 // A cache clean opearation is looking for a dirty
1458 // block. If a dirty block is encountered a WriteClean
1459 // will update any copies to the path to the memory
1460 // until the point of reference.
1461 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1462 __func__, pkt->print(), blk->print());
1463 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1464 pkt->id);
1465 PacketList writebacks;
1466 writebacks.push_back(wb_pkt);
1467 doWritebacks(writebacks, 0);
1468 }
1469
1470 return false;
1471 }
1472}
1473
1474bool
1475BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1476{
1477 assert(wq_entry);
1478
1479 // always a single target for write queue entries
1480 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1481
1482 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1483
1484 // forward as is, both for evictions and uncacheable writes
1485 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1486 // note that we have now masked any requestBus and
1487 // schedSendEvent (we will wait for a retry before
1488 // doing anything), and this is so even if we do not
1489 // care about this packet and might override it before
1490 // it gets retried
1491 return true;
1492 } else {
1493 markInService(wq_entry);
1494 return false;
1495 }
1496}
1497
1498void
1499BaseCache::serialize(CheckpointOut &cp) const
1500{
1501 bool dirty(isDirty());
1502
1503 if (dirty) {
1504 warn("*** The cache still contains dirty data. ***\n");
1505 warn(" Make sure to drain the system using the correct flags.\n");
1506 warn(" This checkpoint will not restore correctly " \
1507 "and dirty data in the cache will be lost!\n");
1508 }
1509
1510 // Since we don't checkpoint the data in the cache, any dirty data
1511 // will be lost when restoring from a checkpoint of a system that
1512 // wasn't drained properly. Flag the checkpoint as invalid if the
1513 // cache contains dirty data.
1514 bool bad_checkpoint(dirty);
1515 SERIALIZE_SCALAR(bad_checkpoint);
1516}
1517
1518void
1519BaseCache::unserialize(CheckpointIn &cp)
1520{
1521 bool bad_checkpoint;
1522 UNSERIALIZE_SCALAR(bad_checkpoint);
1523 if (bad_checkpoint) {
1524 fatal("Restoring from checkpoints with dirty caches is not "
1525 "supported in the classic memory system. Please remove any "
1526 "caches or drain them properly before taking checkpoints.\n");
1527 }
1528}
1529
1530void
1531BaseCache::regStats()
1532{
1533 MemObject::regStats();
1534
1535 using namespace Stats;
1536
1537 // Hit statistics
1538 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1539 MemCmd cmd(access_idx);
1540 const string &cstr = cmd.toString();
1541
1542 hits[access_idx]
1543 .init(system->maxMasters())
1544 .name(name() + "." + cstr + "_hits")
1545 .desc("number of " + cstr + " hits")
1546 .flags(total | nozero | nonan)
1547 ;
1548 for (int i = 0; i < system->maxMasters(); i++) {
1549 hits[access_idx].subname(i, system->getMasterName(i));
1550 }
1551 }
1552
1553// These macros make it easier to sum the right subset of commands and
1554// to change the subset of commands that are considered "demand" vs
1555// "non-demand"
1556#define SUM_DEMAND(s) \
1557 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1558 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1559
1560// should writebacks be included here? prior code was inconsistent...
1561#define SUM_NON_DEMAND(s) \
1562 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
1563
1564 demandHits
1565 .name(name() + ".demand_hits")
1566 .desc("number of demand (read+write) hits")
1567 .flags(total | nozero | nonan)
1568 ;
1569 demandHits = SUM_DEMAND(hits);
1570 for (int i = 0; i < system->maxMasters(); i++) {
1571 demandHits.subname(i, system->getMasterName(i));
1572 }
1573
1574 overallHits
1575 .name(name() + ".overall_hits")
1576 .desc("number of overall hits")
1577 .flags(total | nozero | nonan)
1578 ;
1579 overallHits = demandHits + SUM_NON_DEMAND(hits);
1580 for (int i = 0; i < system->maxMasters(); i++) {
1581 overallHits.subname(i, system->getMasterName(i));
1582 }
1583
1584 // Miss statistics
1585 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1586 MemCmd cmd(access_idx);
1587 const string &cstr = cmd.toString();
1588
1589 misses[access_idx]
1590 .init(system->maxMasters())
1591 .name(name() + "." + cstr + "_misses")
1592 .desc("number of " + cstr + " misses")
1593 .flags(total | nozero | nonan)
1594 ;
1595 for (int i = 0; i < system->maxMasters(); i++) {
1596 misses[access_idx].subname(i, system->getMasterName(i));
1597 }
1598 }
1599
1600 demandMisses
1601 .name(name() + ".demand_misses")
1602 .desc("number of demand (read+write) misses")
1603 .flags(total | nozero | nonan)
1604 ;
1605 demandMisses = SUM_DEMAND(misses);
1606 for (int i = 0; i < system->maxMasters(); i++) {
1607 demandMisses.subname(i, system->getMasterName(i));
1608 }
1609
1610 overallMisses
1611 .name(name() + ".overall_misses")
1612 .desc("number of overall misses")
1613 .flags(total | nozero | nonan)
1614 ;
1615 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1616 for (int i = 0; i < system->maxMasters(); i++) {
1617 overallMisses.subname(i, system->getMasterName(i));
1618 }
1619
1620 // Miss latency statistics
1621 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1622 MemCmd cmd(access_idx);
1623 const string &cstr = cmd.toString();
1624
1625 missLatency[access_idx]
1626 .init(system->maxMasters())
1627 .name(name() + "." + cstr + "_miss_latency")
1628 .desc("number of " + cstr + " miss cycles")
1629 .flags(total | nozero | nonan)
1630 ;
1631 for (int i = 0; i < system->maxMasters(); i++) {
1632 missLatency[access_idx].subname(i, system->getMasterName(i));
1633 }
1634 }
1635
1636 demandMissLatency
1637 .name(name() + ".demand_miss_latency")
1638 .desc("number of demand (read+write) miss cycles")
1639 .flags(total | nozero | nonan)
1640 ;
1641 demandMissLatency = SUM_DEMAND(missLatency);
1642 for (int i = 0; i < system->maxMasters(); i++) {
1643 demandMissLatency.subname(i, system->getMasterName(i));
1644 }
1645
1646 overallMissLatency
1647 .name(name() + ".overall_miss_latency")
1648 .desc("number of overall miss cycles")
1649 .flags(total | nozero | nonan)
1650 ;
1651 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1652 for (int i = 0; i < system->maxMasters(); i++) {
1653 overallMissLatency.subname(i, system->getMasterName(i));
1654 }
1655
1656 // access formulas
1657 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1658 MemCmd cmd(access_idx);
1659 const string &cstr = cmd.toString();
1660
1661 accesses[access_idx]
1662 .name(name() + "." + cstr + "_accesses")
1663 .desc("number of " + cstr + " accesses(hits+misses)")
1664 .flags(total | nozero | nonan)
1665 ;
1666 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1667
1668 for (int i = 0; i < system->maxMasters(); i++) {
1669 accesses[access_idx].subname(i, system->getMasterName(i));
1670 }
1671 }
1672
1673 demandAccesses
1674 .name(name() + ".demand_accesses")
1675 .desc("number of demand (read+write) accesses")
1676 .flags(total | nozero | nonan)
1677 ;
1678 demandAccesses = demandHits + demandMisses;
1679 for (int i = 0; i < system->maxMasters(); i++) {
1680 demandAccesses.subname(i, system->getMasterName(i));
1681 }
1682
1683 overallAccesses
1684 .name(name() + ".overall_accesses")
1685 .desc("number of overall (read+write) accesses")
1686 .flags(total | nozero | nonan)
1687 ;
1688 overallAccesses = overallHits + overallMisses;
1689 for (int i = 0; i < system->maxMasters(); i++) {
1690 overallAccesses.subname(i, system->getMasterName(i));
1691 }
1692
1693 // miss rate formulas
1694 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1695 MemCmd cmd(access_idx);
1696 const string &cstr = cmd.toString();
1697
1698 missRate[access_idx]
1699 .name(name() + "." + cstr + "_miss_rate")
1700 .desc("miss rate for " + cstr + " accesses")
1701 .flags(total | nozero | nonan)
1702 ;
1703 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1704
1705 for (int i = 0; i < system->maxMasters(); i++) {
1706 missRate[access_idx].subname(i, system->getMasterName(i));
1707 }
1708 }
1709
1710 demandMissRate
1711 .name(name() + ".demand_miss_rate")
1712 .desc("miss rate for demand accesses")
1713 .flags(total | nozero | nonan)
1714 ;
1715 demandMissRate = demandMisses / demandAccesses;
1716 for (int i = 0; i < system->maxMasters(); i++) {
1717 demandMissRate.subname(i, system->getMasterName(i));
1718 }
1719
1720 overallMissRate
1721 .name(name() + ".overall_miss_rate")
1722 .desc("miss rate for overall accesses")
1723 .flags(total | nozero | nonan)
1724 ;
1725 overallMissRate = overallMisses / overallAccesses;
1726 for (int i = 0; i < system->maxMasters(); i++) {
1727 overallMissRate.subname(i, system->getMasterName(i));
1728 }
1729
1730 // miss latency formulas
1731 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1732 MemCmd cmd(access_idx);
1733 const string &cstr = cmd.toString();
1734
1735 avgMissLatency[access_idx]
1736 .name(name() + "." + cstr + "_avg_miss_latency")
1737 .desc("average " + cstr + " miss latency")
1738 .flags(total | nozero | nonan)
1739 ;
1740 avgMissLatency[access_idx] =
1741 missLatency[access_idx] / misses[access_idx];
1742
1743 for (int i = 0; i < system->maxMasters(); i++) {
1744 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1745 }
1746 }
1747
1748 demandAvgMissLatency
1749 .name(name() + ".demand_avg_miss_latency")
1750 .desc("average overall miss latency")
1751 .flags(total | nozero | nonan)
1752 ;
1753 demandAvgMissLatency = demandMissLatency / demandMisses;
1754 for (int i = 0; i < system->maxMasters(); i++) {
1755 demandAvgMissLatency.subname(i, system->getMasterName(i));
1756 }
1757
1758 overallAvgMissLatency
1759 .name(name() + ".overall_avg_miss_latency")
1760 .desc("average overall miss latency")
1761 .flags(total | nozero | nonan)
1762 ;
1763 overallAvgMissLatency = overallMissLatency / overallMisses;
1764 for (int i = 0; i < system->maxMasters(); i++) {
1765 overallAvgMissLatency.subname(i, system->getMasterName(i));
1766 }
1767
1768 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1769 blocked_cycles
1770 .name(name() + ".blocked_cycles")
1771 .desc("number of cycles access was blocked")
1772 .subname(Blocked_NoMSHRs, "no_mshrs")
1773 .subname(Blocked_NoTargets, "no_targets")
1774 ;
1775
1776
1777 blocked_causes.init(NUM_BLOCKED_CAUSES);
1778 blocked_causes
1779 .name(name() + ".blocked")
1780 .desc("number of cycles access was blocked")
1781 .subname(Blocked_NoMSHRs, "no_mshrs")
1782 .subname(Blocked_NoTargets, "no_targets")
1783 ;
1784
1785 avg_blocked
1786 .name(name() + ".avg_blocked_cycles")
1787 .desc("average number of cycles each access was blocked")
1788 .subname(Blocked_NoMSHRs, "no_mshrs")
1789 .subname(Blocked_NoTargets, "no_targets")
1790 ;
1791
1792 avg_blocked = blocked_cycles / blocked_causes;
1793
1794 unusedPrefetches
1795 .name(name() + ".unused_prefetches")
1796 .desc("number of HardPF blocks evicted w/o reference")
1797 .flags(nozero)
1798 ;
1799
1800 writebacks
1801 .init(system->maxMasters())
1802 .name(name() + ".writebacks")
1803 .desc("number of writebacks")
1804 .flags(total | nozero | nonan)
1805 ;
1806 for (int i = 0; i < system->maxMasters(); i++) {
1807 writebacks.subname(i, system->getMasterName(i));
1808 }
1809
1810 // MSHR statistics
1811 // MSHR hit statistics
1812 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1813 MemCmd cmd(access_idx);
1814 const string &cstr = cmd.toString();
1815
1816 mshr_hits[access_idx]
1817 .init(system->maxMasters())
1818 .name(name() + "." + cstr + "_mshr_hits")
1819 .desc("number of " + cstr + " MSHR hits")
1820 .flags(total | nozero | nonan)
1821 ;
1822 for (int i = 0; i < system->maxMasters(); i++) {
1823 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1824 }
1825 }
1826
1827 demandMshrHits
1828 .name(name() + ".demand_mshr_hits")
1829 .desc("number of demand (read+write) MSHR hits")
1830 .flags(total | nozero | nonan)
1831 ;
1832 demandMshrHits = SUM_DEMAND(mshr_hits);
1833 for (int i = 0; i < system->maxMasters(); i++) {
1834 demandMshrHits.subname(i, system->getMasterName(i));
1835 }
1836
1837 overallMshrHits
1838 .name(name() + ".overall_mshr_hits")
1839 .desc("number of overall MSHR hits")
1840 .flags(total | nozero | nonan)
1841 ;
1842 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1843 for (int i = 0; i < system->maxMasters(); i++) {
1844 overallMshrHits.subname(i, system->getMasterName(i));
1845 }
1846
1847 // MSHR miss statistics
1848 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1849 MemCmd cmd(access_idx);
1850 const string &cstr = cmd.toString();
1851
1852 mshr_misses[access_idx]
1853 .init(system->maxMasters())
1854 .name(name() + "." + cstr + "_mshr_misses")
1855 .desc("number of " + cstr + " MSHR misses")
1856 .flags(total | nozero | nonan)
1857 ;
1858 for (int i = 0; i < system->maxMasters(); i++) {
1859 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1860 }
1861 }
1862
1863 demandMshrMisses
1864 .name(name() + ".demand_mshr_misses")
1865 .desc("number of demand (read+write) MSHR misses")
1866 .flags(total | nozero | nonan)
1867 ;
1868 demandMshrMisses = SUM_DEMAND(mshr_misses);
1869 for (int i = 0; i < system->maxMasters(); i++) {
1870 demandMshrMisses.subname(i, system->getMasterName(i));
1871 }
1872
1873 overallMshrMisses
1874 .name(name() + ".overall_mshr_misses")
1875 .desc("number of overall MSHR misses")
1876 .flags(total | nozero | nonan)
1877 ;
1878 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1879 for (int i = 0; i < system->maxMasters(); i++) {
1880 overallMshrMisses.subname(i, system->getMasterName(i));
1881 }
1882
1883 // MSHR miss latency statistics
1884 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1885 MemCmd cmd(access_idx);
1886 const string &cstr = cmd.toString();
1887
1888 mshr_miss_latency[access_idx]
1889 .init(system->maxMasters())
1890 .name(name() + "." + cstr + "_mshr_miss_latency")
1891 .desc("number of " + cstr + " MSHR miss cycles")
1892 .flags(total | nozero | nonan)
1893 ;
1894 for (int i = 0; i < system->maxMasters(); i++) {
1895 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1896 }
1897 }
1898
1899 demandMshrMissLatency
1900 .name(name() + ".demand_mshr_miss_latency")
1901 .desc("number of demand (read+write) MSHR miss cycles")
1902 .flags(total | nozero | nonan)
1903 ;
1904 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
1905 for (int i = 0; i < system->maxMasters(); i++) {
1906 demandMshrMissLatency.subname(i, system->getMasterName(i));
1907 }
1908
1909 overallMshrMissLatency
1910 .name(name() + ".overall_mshr_miss_latency")
1911 .desc("number of overall MSHR miss cycles")
1912 .flags(total | nozero | nonan)
1913 ;
1914 overallMshrMissLatency =
1915 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
1916 for (int i = 0; i < system->maxMasters(); i++) {
1917 overallMshrMissLatency.subname(i, system->getMasterName(i));
1918 }
1919
1920 // MSHR uncacheable statistics
1921 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1922 MemCmd cmd(access_idx);
1923 const string &cstr = cmd.toString();
1924
1925 mshr_uncacheable[access_idx]
1926 .init(system->maxMasters())
1927 .name(name() + "." + cstr + "_mshr_uncacheable")
1928 .desc("number of " + cstr + " MSHR uncacheable")
1929 .flags(total | nozero | nonan)
1930 ;
1931 for (int i = 0; i < system->maxMasters(); i++) {
1932 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
1933 }
1934 }
1935
1936 overallMshrUncacheable
1937 .name(name() + ".overall_mshr_uncacheable_misses")
1938 .desc("number of overall MSHR uncacheable misses")
1939 .flags(total | nozero | nonan)
1940 ;
1941 overallMshrUncacheable =
1942 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
1943 for (int i = 0; i < system->maxMasters(); i++) {
1944 overallMshrUncacheable.subname(i, system->getMasterName(i));
1945 }
1946
1947 // MSHR miss latency statistics
1948 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1949 MemCmd cmd(access_idx);
1950 const string &cstr = cmd.toString();
1951
1952 mshr_uncacheable_lat[access_idx]
1953 .init(system->maxMasters())
1954 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
1955 .desc("number of " + cstr + " MSHR uncacheable cycles")
1956 .flags(total | nozero | nonan)
1957 ;
1958 for (int i = 0; i < system->maxMasters(); i++) {
1959 mshr_uncacheable_lat[access_idx].subname(
1960 i, system->getMasterName(i));
1961 }
1962 }
1963
1964 overallMshrUncacheableLatency
1965 .name(name() + ".overall_mshr_uncacheable_latency")
1966 .desc("number of overall MSHR uncacheable cycles")
1967 .flags(total | nozero | nonan)
1968 ;
1969 overallMshrUncacheableLatency =
1970 SUM_DEMAND(mshr_uncacheable_lat) +
1971 SUM_NON_DEMAND(mshr_uncacheable_lat);
1972 for (int i = 0; i < system->maxMasters(); i++) {
1973 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
1974 }
1975
1976#if 0
1977 // MSHR access formulas
1978 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1979 MemCmd cmd(access_idx);
1980 const string &cstr = cmd.toString();
1981
1982 mshrAccesses[access_idx]
1983 .name(name() + "." + cstr + "_mshr_accesses")
1984 .desc("number of " + cstr + " mshr accesses(hits+misses)")
1985 .flags(total | nozero | nonan)
1986 ;
1987 mshrAccesses[access_idx] =
1988 mshr_hits[access_idx] + mshr_misses[access_idx]
1989 + mshr_uncacheable[access_idx];
1990 }
1991
1992 demandMshrAccesses
1993 .name(name() + ".demand_mshr_accesses")
1994 .desc("number of demand (read+write) mshr accesses")
1995 .flags(total | nozero | nonan)
1996 ;
1997 demandMshrAccesses = demandMshrHits + demandMshrMisses;
1998
1999 overallMshrAccesses
2000 .name(name() + ".overall_mshr_accesses")
2001 .desc("number of overall (read+write) mshr accesses")
2002 .flags(total | nozero | nonan)
2003 ;
2004 overallMshrAccesses = overallMshrHits + overallMshrMisses
2005 + overallMshrUncacheable;
2006#endif
2007
2008 // MSHR miss rate formulas
2009 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2010 MemCmd cmd(access_idx);
2011 const string &cstr = cmd.toString();
2012
2013 mshrMissRate[access_idx]
2014 .name(name() + "." + cstr + "_mshr_miss_rate")
2015 .desc("mshr miss rate for " + cstr + " accesses")
2016 .flags(total | nozero | nonan)
2017 ;
2018 mshrMissRate[access_idx] =
2019 mshr_misses[access_idx] / accesses[access_idx];
2020
2021 for (int i = 0; i < system->maxMasters(); i++) {
2022 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2023 }
2024 }
2025
2026 demandMshrMissRate
2027 .name(name() + ".demand_mshr_miss_rate")
2028 .desc("mshr miss rate for demand accesses")
2029 .flags(total | nozero | nonan)
2030 ;
2031 demandMshrMissRate = demandMshrMisses / demandAccesses;
2032 for (int i = 0; i < system->maxMasters(); i++) {
2033 demandMshrMissRate.subname(i, system->getMasterName(i));
2034 }
2035
2036 overallMshrMissRate
2037 .name(name() + ".overall_mshr_miss_rate")
2038 .desc("mshr miss rate for overall accesses")
2039 .flags(total | nozero | nonan)
2040 ;
2041 overallMshrMissRate = overallMshrMisses / overallAccesses;
2042 for (int i = 0; i < system->maxMasters(); i++) {
2043 overallMshrMissRate.subname(i, system->getMasterName(i));
2044 }
2045
2046 // mshrMiss latency formulas
2047 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2048 MemCmd cmd(access_idx);
2049 const string &cstr = cmd.toString();
2050
2051 avgMshrMissLatency[access_idx]
2052 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2053 .desc("average " + cstr + " mshr miss latency")
2054 .flags(total | nozero | nonan)
2055 ;
2056 avgMshrMissLatency[access_idx] =
2057 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2058
2059 for (int i = 0; i < system->maxMasters(); i++) {
2060 avgMshrMissLatency[access_idx].subname(
2061 i, system->getMasterName(i));
2062 }
2063 }
2064
2065 demandAvgMshrMissLatency
2066 .name(name() + ".demand_avg_mshr_miss_latency")
2067 .desc("average overall mshr miss latency")
2068 .flags(total | nozero | nonan)
2069 ;
2070 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2071 for (int i = 0; i < system->maxMasters(); i++) {
2072 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2073 }
2074
2075 overallAvgMshrMissLatency
2076 .name(name() + ".overall_avg_mshr_miss_latency")
2077 .desc("average overall mshr miss latency")
2078 .flags(total | nozero | nonan)
2079 ;
2080 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2081 for (int i = 0; i < system->maxMasters(); i++) {
2082 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2083 }
2084
2085 // mshrUncacheable latency formulas
2086 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2087 MemCmd cmd(access_idx);
2088 const string &cstr = cmd.toString();
2089
2090 avgMshrUncacheableLatency[access_idx]
2091 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2092 .desc("average " + cstr + " mshr uncacheable latency")
2093 .flags(total | nozero | nonan)
2094 ;
2095 avgMshrUncacheableLatency[access_idx] =
2096 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2097
2098 for (int i = 0; i < system->maxMasters(); i++) {
2099 avgMshrUncacheableLatency[access_idx].subname(
2100 i, system->getMasterName(i));
2101 }
2102 }
2103
2104 overallAvgMshrUncacheableLatency
2105 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2106 .desc("average overall mshr uncacheable latency")
2107 .flags(total | nozero | nonan)
2108 ;
2109 overallAvgMshrUncacheableLatency =
2110 overallMshrUncacheableLatency / overallMshrUncacheable;
2111 for (int i = 0; i < system->maxMasters(); i++) {
2112 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2113 }
2114
2115 replacements
2116 .name(name() + ".replacements")
2117 .desc("number of replacements")
2118 ;
2119}
2120
2121///////////////
2122//
2123// CpuSidePort
2124//
2125///////////////
2126bool
2127BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2128{
2129 // Snoops shouldn't happen when bypassing caches
2130 assert(!cache->system->bypassCaches());
2131
2132 assert(pkt->isResponse());
2133
2134 // Express snoop responses from master to slave, e.g., from L1 to L2
2135 cache->recvTimingSnoopResp(pkt);
2136 return true;
2137}
2138
2139
2140bool
2141BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2142{
2143 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2144 // always let express snoop packets through even if blocked
2145 return true;
2146 } else if (blocked || mustSendRetry) {
2147 // either already committed to send a retry, or blocked
2148 mustSendRetry = true;
2149 return false;
2150 }
2151 mustSendRetry = false;
2152 return true;
2153}
2154
2155bool
2156BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2157{
2158 assert(pkt->isRequest());
2159
2160 if (cache->system->bypassCaches()) {
2161 // Just forward the packet if caches are disabled.
2162 // @todo This should really enqueue the packet rather
2163 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2164 assert(success);
2165 return true;
2166 } else if (tryTiming(pkt)) {
2167 cache->recvTimingReq(pkt);
2168 return true;
2169 }
2170 return false;
2171}
2172
2173Tick
2174BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2175{
2176 if (cache->system->bypassCaches()) {
2177 // Forward the request if the system is in cache bypass mode.
2178 return cache->memSidePort.sendAtomic(pkt);
2179 } else {
2180 return cache->recvAtomic(pkt);
2181 }
2182}
2183
2184void
2185BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2186{
2187 if (cache->system->bypassCaches()) {
2188 // The cache should be flushed if we are in cache bypass mode,
2189 // so we don't need to check if we need to update anything.
2190 cache->memSidePort.sendFunctional(pkt);
2191 return;
2192 }
2193
2194 // functional request
2195 cache->functionalAccess(pkt, true);
2196}
2197
2198AddrRangeList
2199BaseCache::CpuSidePort::getAddrRanges() const
2200{
2201 return cache->getAddrRanges();
2202}
2203
2204
2205BaseCache::
2206CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2207 const std::string &_label)
2208 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2209{
2210}
2211
2212///////////////
2213//
2214// MemSidePort
2215//
2216///////////////
2217bool
2218BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2219{
2220 cache->recvTimingResp(pkt);
2221 return true;
2222}
2223
2224// Express snooping requests to memside port
2225void
2226BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2227{
2228 // Snoops shouldn't happen when bypassing caches
2229 assert(!cache->system->bypassCaches());
2230
2231 // handle snooping requests
2232 cache->recvTimingSnoopReq(pkt);
2233}
2234
2235Tick
2236BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2237{
2238 // Snoops shouldn't happen when bypassing caches
2239 assert(!cache->system->bypassCaches());
2240
2241 return cache->recvAtomicSnoop(pkt);
2242}
2243
2244void
2245BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2246{
2247 // Snoops shouldn't happen when bypassing caches
2248 assert(!cache->system->bypassCaches());
2249
2250 // functional snoop (note that in contrast to atomic we don't have
2251 // a specific functionalSnoop method, as they have the same
2252 // behaviour regardless)
2253 cache->functionalAccess(pkt, false);
2254}
2255
2256void
2257BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2258{
2259 // sanity check
2260 assert(!waitingOnRetry);
2261
2262 // there should never be any deferred request packets in the
2263 // queue, instead we resly on the cache to provide the packets
2264 // from the MSHR queue or write queue
2265 assert(deferredPacketReadyTime() == MaxTick);
2266
2267 // check for request packets (requests & writebacks)
2268 QueueEntry* entry = cache.getNextQueueEntry();
2269
2270 if (!entry) {
2271 // can happen if e.g. we attempt a writeback and fail, but
2272 // before the retry, the writeback is eliminated because
2273 // we snoop another cache's ReadEx.
2274 } else {
2275 // let our snoop responses go first if there are responses to
2276 // the same addresses
2277 if (checkConflictingSnoop(entry->blkAddr)) {
2278 return;
2279 }
2280 waitingOnRetry = entry->sendPacket(cache);
2281 }
2282
2283 // if we succeeded and are not waiting for a retry, schedule the
2284 // next send considering when the next queue is ready, note that
2285 // snoop responses have their own packet queue and thus schedule
2286 // their own events
2287 if (!waitingOnRetry) {
2288 schedSendEvent(cache.nextQueueReadyTime());
2289 }
2290}
2291
2292BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2293 BaseCache *_cache,
2294 const std::string &_label)
2295 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2296 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2297 _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2298{
2299}