base.cc (13564:9bbd53a77887) base.cc (13717:11e81e2a98bd)
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheRepl.hh"
56#include "debug/CacheVerbose.hh"
57#include "mem/cache/mshr.hh"
58#include "mem/cache/prefetch/base.hh"
59#include "mem/cache/queue_entry.hh"
60#include "params/BaseCache.hh"
61#include "params/WriteAllocator.hh"
62#include "sim/core.hh"
63
64class BaseMasterPort;
65class BaseSlavePort;
66
67using namespace std;
68
69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70 BaseCache *_cache,
71 const std::string &_label)
72 : QueuedSlavePort(_name, _cache, queue),
73 queue(*_cache, *this, true, _label),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name)
76{
77}
78
79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
80 : MemObject(p),
81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
85 tags(p->tags),
86 prefetcher(p->prefetcher),
87 writeAllocator(p->write_allocator),
88 writebackClean(p->writeback_clean),
89 tempBlockWriteback(nullptr),
90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
91 name(), false,
92 EventBase::Delayed_Writeback_Pri),
93 blkSize(blk_size),
94 lookupLatency(p->tag_latency),
95 dataLatency(p->data_latency),
96 forwardLatency(p->tag_latency),
97 fillLatency(p->data_latency),
98 responseLatency(p->response_latency),
99 sequentialAccess(p->sequential_access),
100 numTarget(p->tgts_per_mshr),
101 forwardSnoops(true),
102 clusivity(p->clusivity),
103 isReadOnly(p->is_read_only),
104 blocked(0),
105 order(0),
106 noTargetMSHR(nullptr),
107 missCount(p->max_miss_count),
108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
109 system(p->system)
110{
111 // the MSHR queue has no reserve entries as we check the MSHR
112 // queue on every single allocation, whereas the write queue has
113 // as many reserve entries as we have MSHRs, since every MSHR may
114 // eventually require a writeback, and we do not check the write
115 // buffer before committing to an MSHR
116
117 // forward snoops is overridden in init() once we can query
118 // whether the connected master is actually snooping or not
119
120 tempBlock = new TempCacheBlk(blkSize);
121
122 tags->tagsInit();
123 if (prefetcher)
124 prefetcher->setCache(this);
125}
126
127BaseCache::~BaseCache()
128{
129 delete tempBlock;
130}
131
132void
133BaseCache::CacheSlavePort::setBlocked()
134{
135 assert(!blocked);
136 DPRINTF(CachePort, "Port is blocking new requests\n");
137 blocked = true;
138 // if we already scheduled a retry in this cycle, but it has not yet
139 // happened, cancel it
140 if (sendRetryEvent.scheduled()) {
141 owner.deschedule(sendRetryEvent);
142 DPRINTF(CachePort, "Port descheduled retry\n");
143 mustSendRetry = true;
144 }
145}
146
147void
148BaseCache::CacheSlavePort::clearBlocked()
149{
150 assert(blocked);
151 DPRINTF(CachePort, "Port is accepting new requests\n");
152 blocked = false;
153 if (mustSendRetry) {
154 // @TODO: need to find a better time (next cycle?)
155 owner.schedule(sendRetryEvent, curTick() + 1);
156 }
157}
158
159void
160BaseCache::CacheSlavePort::processSendRetry()
161{
162 DPRINTF(CachePort, "Port is sending retry\n");
163
164 // reset the flag and call retry
165 mustSendRetry = false;
166 sendRetryReq();
167}
168
169Addr
170BaseCache::regenerateBlkAddr(CacheBlk* blk)
171{
172 if (blk != tempBlock) {
173 return tags->regenerateBlkAddr(blk);
174 } else {
175 return tempBlock->getAddr();
176 }
177}
178
179void
180BaseCache::init()
181{
182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
183 fatal("Cache ports on %s are not connected\n", name());
184 cpuSidePort.sendRangeChange();
185 forwardSnoops = cpuSidePort.isSnooping();
186}
187
188BaseMasterPort &
189BaseCache::getMasterPort(const std::string &if_name, PortID idx)
190{
191 if (if_name == "mem_side") {
192 return memSidePort;
193 } else {
194 return MemObject::getMasterPort(if_name, idx);
195 }
196}
197
198BaseSlavePort &
199BaseCache::getSlavePort(const std::string &if_name, PortID idx)
200{
201 if (if_name == "cpu_side") {
202 return cpuSidePort;
203 } else {
204 return MemObject::getSlavePort(if_name, idx);
205 }
206}
207
208bool
209BaseCache::inRange(Addr addr) const
210{
211 for (const auto& r : addrRanges) {
212 if (r.contains(addr)) {
213 return true;
214 }
215 }
216 return false;
217}
218
219void
220BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
221{
222 if (pkt->needsResponse()) {
223 pkt->makeTimingResponse();
224 // @todo: Make someone pay for this
225 pkt->headerDelay = pkt->payloadDelay = 0;
226
227 // In this case we are considering request_time that takes
228 // into account the delay of the xbar, if any, and just
229 // lat, neglecting responseLatency, modelling hit latency
230 // just as the value of lat overriden by access(), which calls
231 // the calculateAccessLatency() function.
232 cpuSidePort.schedTimingResp(pkt, request_time);
233 } else {
234 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
235 pkt->print());
236
237 // queue the packet for deletion, as the sending cache is
238 // still relying on it; if the block is found in access(),
239 // CleanEvict and Writeback messages will be deleted
240 // here as well
241 pendingDelete.reset(pkt);
242 }
243}
244
245void
246BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
247 Tick forward_time, Tick request_time)
248{
249 if (writeAllocator &&
250 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
251 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
252 pkt->getBlockAddr(blkSize));
253 }
254
255 if (mshr) {
256 /// MSHR hit
257 /// @note writebacks will be checked in getNextMSHR()
258 /// for any conflicting requests to the same block
259
260 //@todo remove hw_pf here
261
262 // Coalesce unless it was a software prefetch (see above).
263 if (pkt) {
264 assert(!pkt->isWriteback());
265 // CleanEvicts corresponding to blocks which have
266 // outstanding requests in MSHRs are simply sunk here
267 if (pkt->cmd == MemCmd::CleanEvict) {
268 pendingDelete.reset(pkt);
269 } else if (pkt->cmd == MemCmd::WriteClean) {
270 // A WriteClean should never coalesce with any
271 // outstanding cache maintenance requests.
272
273 // We use forward_time here because there is an
274 // uncached memory write, forwarded to WriteBuffer.
275 allocateWriteBuffer(pkt, forward_time);
276 } else {
277 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
278 pkt->print());
279
280 assert(pkt->req->masterId() < system->maxMasters());
281 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
282
283 // We use forward_time here because it is the same
284 // considering new targets. We have multiple
285 // requests for the same address here. It
286 // specifies the latency to allocate an internal
287 // buffer and to schedule an event to the queued
288 // port and also takes into account the additional
289 // delay of the xbar.
290 mshr->allocateTarget(pkt, forward_time, order++,
291 allocOnFill(pkt->cmd));
292 if (mshr->getNumTargets() == numTarget) {
293 noTargetMSHR = mshr;
294 setBlocked(Blocked_NoTargets);
295 // need to be careful with this... if this mshr isn't
296 // ready yet (i.e. time > curTick()), we don't want to
297 // move it ahead of mshrs that are ready
298 // mshrQueue.moveToFront(mshr);
299 }
300 }
301 }
302 } else {
303 // no MSHR
304 assert(pkt->req->masterId() < system->maxMasters());
305 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
306
307 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
308 // We use forward_time here because there is an
309 // writeback or writeclean, forwarded to WriteBuffer.
310 allocateWriteBuffer(pkt, forward_time);
311 } else {
312 if (blk && blk->isValid()) {
313 // If we have a write miss to a valid block, we
314 // need to mark the block non-readable. Otherwise
315 // if we allow reads while there's an outstanding
316 // write miss, the read could return stale data
317 // out of the cache block... a more aggressive
318 // system could detect the overlap (if any) and
319 // forward data out of the MSHRs, but we don't do
320 // that yet. Note that we do need to leave the
321 // block valid so that it stays in the cache, in
322 // case we get an upgrade response (and hence no
323 // new data) when the write miss completes.
324 // As long as CPUs do proper store/load forwarding
325 // internally, and have a sufficiently weak memory
326 // model, this is probably unnecessary, but at some
327 // point it must have seemed like we needed it...
328 assert((pkt->needsWritable() && !blk->isWritable()) ||
329 pkt->req->isCacheMaintenance());
330 blk->status &= ~BlkReadable;
331 }
332 // Here we are using forward_time, modelling the latency of
333 // a miss (outbound) just as forwardLatency, neglecting the
334 // lookupLatency component.
335 allocateMissBuffer(pkt, forward_time);
336 }
337 }
338}
339
340void
341BaseCache::recvTimingReq(PacketPtr pkt)
342{
343 // anything that is merely forwarded pays for the forward latency and
344 // the delay provided by the crossbar
345 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
346
347 Cycles lat;
348 CacheBlk *blk = nullptr;
349 bool satisfied = false;
350 {
351 PacketList writebacks;
352 // Note that lat is passed by reference here. The function
353 // access() will set the lat value.
354 satisfied = access(pkt, blk, lat, writebacks);
355
356 // copy writebacks to write buffer here to ensure they logically
357 // precede anything happening below
358 doWritebacks(writebacks, forward_time);
359 }
360
361 // Here we charge the headerDelay that takes into account the latencies
362 // of the bus, if the packet comes from it.
363 // The latency charged is just the value set by the access() function.
364 // In case of a hit we are neglecting response latency.
365 // In case of a miss we are neglecting forward latency.
366 Tick request_time = clockEdge(lat) + pkt->headerDelay;
367 // Here we reset the timing of the packet.
368 pkt->headerDelay = pkt->payloadDelay = 0;
369
370 if (satisfied) {
371 // notify before anything else as later handleTimingReqHit might turn
372 // the packet in a response
373 ppHit->notify(pkt);
374
375 if (prefetcher && blk && blk->wasPrefetched()) {
376 blk->status &= ~BlkHWPrefetched;
377 }
378
379 handleTimingReqHit(pkt, blk, request_time);
380 } else {
381 handleTimingReqMiss(pkt, blk, forward_time, request_time);
382
383 ppMiss->notify(pkt);
384 }
385
386 if (prefetcher) {
387 // track time of availability of next prefetch, if any
388 Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
389 if (next_pf_time != MaxTick) {
390 schedMemSideSendEvent(next_pf_time);
391 }
392 }
393}
394
395void
396BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
397{
398 Tick completion_time = clockEdge(responseLatency) +
399 pkt->headerDelay + pkt->payloadDelay;
400
401 // Reset the bus additional time as it is now accounted for
402 pkt->headerDelay = pkt->payloadDelay = 0;
403
404 cpuSidePort.schedTimingResp(pkt, completion_time);
405}
406
407void
408BaseCache::recvTimingResp(PacketPtr pkt)
409{
410 assert(pkt->isResponse());
411
412 // all header delay should be paid for by the crossbar, unless
413 // this is a prefetch response from above
414 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
415 "%s saw a non-zero packet delay\n", name());
416
417 const bool is_error = pkt->isError();
418
419 if (is_error) {
420 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
421 pkt->print());
422 }
423
424 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
425 pkt->print());
426
427 // if this is a write, we should be looking at an uncacheable
428 // write
429 if (pkt->isWrite()) {
430 assert(pkt->req->isUncacheable());
431 handleUncacheableWriteResp(pkt);
432 return;
433 }
434
435 // we have dealt with any (uncacheable) writes above, from here on
436 // we know we are dealing with an MSHR due to a miss or a prefetch
437 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
438 assert(mshr);
439
440 if (mshr == noTargetMSHR) {
441 // we always clear at least one target
442 clearBlocked(Blocked_NoTargets);
443 noTargetMSHR = nullptr;
444 }
445
446 // Initial target is used just for stats
447 MSHR::Target *initial_tgt = mshr->getTarget();
448 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
449 Tick miss_latency = curTick() - initial_tgt->recvTime;
450
451 if (pkt->req->isUncacheable()) {
452 assert(pkt->req->masterId() < system->maxMasters());
453 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
454 miss_latency;
455 } else {
456 assert(pkt->req->masterId() < system->maxMasters());
457 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
458 miss_latency;
459 }
460
461 PacketList writebacks;
462
463 bool is_fill = !mshr->isForward &&
464 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
465 mshr->wasWholeLineWrite);
466
467 // make sure that if the mshr was due to a whole line write then
468 // the response is an invalidation
469 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
470
471 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
472
473 if (is_fill && !is_error) {
474 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
475 pkt->getAddr());
476
477 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
478 writeAllocator->allocate() : mshr->allocOnFill();
479 blk = handleFill(pkt, blk, writebacks, allocate);
480 assert(blk != nullptr);
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheRepl.hh"
56#include "debug/CacheVerbose.hh"
57#include "mem/cache/mshr.hh"
58#include "mem/cache/prefetch/base.hh"
59#include "mem/cache/queue_entry.hh"
60#include "params/BaseCache.hh"
61#include "params/WriteAllocator.hh"
62#include "sim/core.hh"
63
64class BaseMasterPort;
65class BaseSlavePort;
66
67using namespace std;
68
69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70 BaseCache *_cache,
71 const std::string &_label)
72 : QueuedSlavePort(_name, _cache, queue),
73 queue(*_cache, *this, true, _label),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name)
76{
77}
78
79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
80 : MemObject(p),
81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
85 tags(p->tags),
86 prefetcher(p->prefetcher),
87 writeAllocator(p->write_allocator),
88 writebackClean(p->writeback_clean),
89 tempBlockWriteback(nullptr),
90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
91 name(), false,
92 EventBase::Delayed_Writeback_Pri),
93 blkSize(blk_size),
94 lookupLatency(p->tag_latency),
95 dataLatency(p->data_latency),
96 forwardLatency(p->tag_latency),
97 fillLatency(p->data_latency),
98 responseLatency(p->response_latency),
99 sequentialAccess(p->sequential_access),
100 numTarget(p->tgts_per_mshr),
101 forwardSnoops(true),
102 clusivity(p->clusivity),
103 isReadOnly(p->is_read_only),
104 blocked(0),
105 order(0),
106 noTargetMSHR(nullptr),
107 missCount(p->max_miss_count),
108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
109 system(p->system)
110{
111 // the MSHR queue has no reserve entries as we check the MSHR
112 // queue on every single allocation, whereas the write queue has
113 // as many reserve entries as we have MSHRs, since every MSHR may
114 // eventually require a writeback, and we do not check the write
115 // buffer before committing to an MSHR
116
117 // forward snoops is overridden in init() once we can query
118 // whether the connected master is actually snooping or not
119
120 tempBlock = new TempCacheBlk(blkSize);
121
122 tags->tagsInit();
123 if (prefetcher)
124 prefetcher->setCache(this);
125}
126
127BaseCache::~BaseCache()
128{
129 delete tempBlock;
130}
131
132void
133BaseCache::CacheSlavePort::setBlocked()
134{
135 assert(!blocked);
136 DPRINTF(CachePort, "Port is blocking new requests\n");
137 blocked = true;
138 // if we already scheduled a retry in this cycle, but it has not yet
139 // happened, cancel it
140 if (sendRetryEvent.scheduled()) {
141 owner.deschedule(sendRetryEvent);
142 DPRINTF(CachePort, "Port descheduled retry\n");
143 mustSendRetry = true;
144 }
145}
146
147void
148BaseCache::CacheSlavePort::clearBlocked()
149{
150 assert(blocked);
151 DPRINTF(CachePort, "Port is accepting new requests\n");
152 blocked = false;
153 if (mustSendRetry) {
154 // @TODO: need to find a better time (next cycle?)
155 owner.schedule(sendRetryEvent, curTick() + 1);
156 }
157}
158
159void
160BaseCache::CacheSlavePort::processSendRetry()
161{
162 DPRINTF(CachePort, "Port is sending retry\n");
163
164 // reset the flag and call retry
165 mustSendRetry = false;
166 sendRetryReq();
167}
168
169Addr
170BaseCache::regenerateBlkAddr(CacheBlk* blk)
171{
172 if (blk != tempBlock) {
173 return tags->regenerateBlkAddr(blk);
174 } else {
175 return tempBlock->getAddr();
176 }
177}
178
179void
180BaseCache::init()
181{
182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
183 fatal("Cache ports on %s are not connected\n", name());
184 cpuSidePort.sendRangeChange();
185 forwardSnoops = cpuSidePort.isSnooping();
186}
187
188BaseMasterPort &
189BaseCache::getMasterPort(const std::string &if_name, PortID idx)
190{
191 if (if_name == "mem_side") {
192 return memSidePort;
193 } else {
194 return MemObject::getMasterPort(if_name, idx);
195 }
196}
197
198BaseSlavePort &
199BaseCache::getSlavePort(const std::string &if_name, PortID idx)
200{
201 if (if_name == "cpu_side") {
202 return cpuSidePort;
203 } else {
204 return MemObject::getSlavePort(if_name, idx);
205 }
206}
207
208bool
209BaseCache::inRange(Addr addr) const
210{
211 for (const auto& r : addrRanges) {
212 if (r.contains(addr)) {
213 return true;
214 }
215 }
216 return false;
217}
218
219void
220BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
221{
222 if (pkt->needsResponse()) {
223 pkt->makeTimingResponse();
224 // @todo: Make someone pay for this
225 pkt->headerDelay = pkt->payloadDelay = 0;
226
227 // In this case we are considering request_time that takes
228 // into account the delay of the xbar, if any, and just
229 // lat, neglecting responseLatency, modelling hit latency
230 // just as the value of lat overriden by access(), which calls
231 // the calculateAccessLatency() function.
232 cpuSidePort.schedTimingResp(pkt, request_time);
233 } else {
234 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
235 pkt->print());
236
237 // queue the packet for deletion, as the sending cache is
238 // still relying on it; if the block is found in access(),
239 // CleanEvict and Writeback messages will be deleted
240 // here as well
241 pendingDelete.reset(pkt);
242 }
243}
244
245void
246BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
247 Tick forward_time, Tick request_time)
248{
249 if (writeAllocator &&
250 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
251 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
252 pkt->getBlockAddr(blkSize));
253 }
254
255 if (mshr) {
256 /// MSHR hit
257 /// @note writebacks will be checked in getNextMSHR()
258 /// for any conflicting requests to the same block
259
260 //@todo remove hw_pf here
261
262 // Coalesce unless it was a software prefetch (see above).
263 if (pkt) {
264 assert(!pkt->isWriteback());
265 // CleanEvicts corresponding to blocks which have
266 // outstanding requests in MSHRs are simply sunk here
267 if (pkt->cmd == MemCmd::CleanEvict) {
268 pendingDelete.reset(pkt);
269 } else if (pkt->cmd == MemCmd::WriteClean) {
270 // A WriteClean should never coalesce with any
271 // outstanding cache maintenance requests.
272
273 // We use forward_time here because there is an
274 // uncached memory write, forwarded to WriteBuffer.
275 allocateWriteBuffer(pkt, forward_time);
276 } else {
277 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
278 pkt->print());
279
280 assert(pkt->req->masterId() < system->maxMasters());
281 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
282
283 // We use forward_time here because it is the same
284 // considering new targets. We have multiple
285 // requests for the same address here. It
286 // specifies the latency to allocate an internal
287 // buffer and to schedule an event to the queued
288 // port and also takes into account the additional
289 // delay of the xbar.
290 mshr->allocateTarget(pkt, forward_time, order++,
291 allocOnFill(pkt->cmd));
292 if (mshr->getNumTargets() == numTarget) {
293 noTargetMSHR = mshr;
294 setBlocked(Blocked_NoTargets);
295 // need to be careful with this... if this mshr isn't
296 // ready yet (i.e. time > curTick()), we don't want to
297 // move it ahead of mshrs that are ready
298 // mshrQueue.moveToFront(mshr);
299 }
300 }
301 }
302 } else {
303 // no MSHR
304 assert(pkt->req->masterId() < system->maxMasters());
305 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
306
307 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
308 // We use forward_time here because there is an
309 // writeback or writeclean, forwarded to WriteBuffer.
310 allocateWriteBuffer(pkt, forward_time);
311 } else {
312 if (blk && blk->isValid()) {
313 // If we have a write miss to a valid block, we
314 // need to mark the block non-readable. Otherwise
315 // if we allow reads while there's an outstanding
316 // write miss, the read could return stale data
317 // out of the cache block... a more aggressive
318 // system could detect the overlap (if any) and
319 // forward data out of the MSHRs, but we don't do
320 // that yet. Note that we do need to leave the
321 // block valid so that it stays in the cache, in
322 // case we get an upgrade response (and hence no
323 // new data) when the write miss completes.
324 // As long as CPUs do proper store/load forwarding
325 // internally, and have a sufficiently weak memory
326 // model, this is probably unnecessary, but at some
327 // point it must have seemed like we needed it...
328 assert((pkt->needsWritable() && !blk->isWritable()) ||
329 pkt->req->isCacheMaintenance());
330 blk->status &= ~BlkReadable;
331 }
332 // Here we are using forward_time, modelling the latency of
333 // a miss (outbound) just as forwardLatency, neglecting the
334 // lookupLatency component.
335 allocateMissBuffer(pkt, forward_time);
336 }
337 }
338}
339
340void
341BaseCache::recvTimingReq(PacketPtr pkt)
342{
343 // anything that is merely forwarded pays for the forward latency and
344 // the delay provided by the crossbar
345 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
346
347 Cycles lat;
348 CacheBlk *blk = nullptr;
349 bool satisfied = false;
350 {
351 PacketList writebacks;
352 // Note that lat is passed by reference here. The function
353 // access() will set the lat value.
354 satisfied = access(pkt, blk, lat, writebacks);
355
356 // copy writebacks to write buffer here to ensure they logically
357 // precede anything happening below
358 doWritebacks(writebacks, forward_time);
359 }
360
361 // Here we charge the headerDelay that takes into account the latencies
362 // of the bus, if the packet comes from it.
363 // The latency charged is just the value set by the access() function.
364 // In case of a hit we are neglecting response latency.
365 // In case of a miss we are neglecting forward latency.
366 Tick request_time = clockEdge(lat) + pkt->headerDelay;
367 // Here we reset the timing of the packet.
368 pkt->headerDelay = pkt->payloadDelay = 0;
369
370 if (satisfied) {
371 // notify before anything else as later handleTimingReqHit might turn
372 // the packet in a response
373 ppHit->notify(pkt);
374
375 if (prefetcher && blk && blk->wasPrefetched()) {
376 blk->status &= ~BlkHWPrefetched;
377 }
378
379 handleTimingReqHit(pkt, blk, request_time);
380 } else {
381 handleTimingReqMiss(pkt, blk, forward_time, request_time);
382
383 ppMiss->notify(pkt);
384 }
385
386 if (prefetcher) {
387 // track time of availability of next prefetch, if any
388 Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
389 if (next_pf_time != MaxTick) {
390 schedMemSideSendEvent(next_pf_time);
391 }
392 }
393}
394
395void
396BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
397{
398 Tick completion_time = clockEdge(responseLatency) +
399 pkt->headerDelay + pkt->payloadDelay;
400
401 // Reset the bus additional time as it is now accounted for
402 pkt->headerDelay = pkt->payloadDelay = 0;
403
404 cpuSidePort.schedTimingResp(pkt, completion_time);
405}
406
407void
408BaseCache::recvTimingResp(PacketPtr pkt)
409{
410 assert(pkt->isResponse());
411
412 // all header delay should be paid for by the crossbar, unless
413 // this is a prefetch response from above
414 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
415 "%s saw a non-zero packet delay\n", name());
416
417 const bool is_error = pkt->isError();
418
419 if (is_error) {
420 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
421 pkt->print());
422 }
423
424 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
425 pkt->print());
426
427 // if this is a write, we should be looking at an uncacheable
428 // write
429 if (pkt->isWrite()) {
430 assert(pkt->req->isUncacheable());
431 handleUncacheableWriteResp(pkt);
432 return;
433 }
434
435 // we have dealt with any (uncacheable) writes above, from here on
436 // we know we are dealing with an MSHR due to a miss or a prefetch
437 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
438 assert(mshr);
439
440 if (mshr == noTargetMSHR) {
441 // we always clear at least one target
442 clearBlocked(Blocked_NoTargets);
443 noTargetMSHR = nullptr;
444 }
445
446 // Initial target is used just for stats
447 MSHR::Target *initial_tgt = mshr->getTarget();
448 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
449 Tick miss_latency = curTick() - initial_tgt->recvTime;
450
451 if (pkt->req->isUncacheable()) {
452 assert(pkt->req->masterId() < system->maxMasters());
453 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
454 miss_latency;
455 } else {
456 assert(pkt->req->masterId() < system->maxMasters());
457 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
458 miss_latency;
459 }
460
461 PacketList writebacks;
462
463 bool is_fill = !mshr->isForward &&
464 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
465 mshr->wasWholeLineWrite);
466
467 // make sure that if the mshr was due to a whole line write then
468 // the response is an invalidation
469 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
470
471 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
472
473 if (is_fill && !is_error) {
474 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
475 pkt->getAddr());
476
477 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
478 writeAllocator->allocate() : mshr->allocOnFill();
479 blk = handleFill(pkt, blk, writebacks, allocate);
480 assert(blk != nullptr);
481 ppFill->notify(pkt);
481 }
482
483 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
484 // The block was marked not readable while there was a pending
485 // cache maintenance operation, restore its flag.
486 blk->status |= BlkReadable;
487
488 // This was a cache clean operation (without invalidate)
489 // and we have a copy of the block already. Since there
490 // is no invalidation, we can promote targets that don't
491 // require a writable copy
492 mshr->promoteReadable();
493 }
494
495 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
496 // If at this point the referenced block is writable and the
497 // response is not a cache invalidate, we promote targets that
498 // were deferred as we couldn't guarrantee a writable copy
499 mshr->promoteWritable();
500 }
501
502 serviceMSHRTargets(mshr, pkt, blk);
503
504 if (mshr->promoteDeferredTargets()) {
505 // avoid later read getting stale data while write miss is
506 // outstanding.. see comment in timingAccess()
507 if (blk) {
508 blk->status &= ~BlkReadable;
509 }
510 mshrQueue.markPending(mshr);
511 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
512 } else {
513 // while we deallocate an mshr from the queue we still have to
514 // check the isFull condition before and after as we might
515 // have been using the reserved entries already
516 const bool was_full = mshrQueue.isFull();
517 mshrQueue.deallocate(mshr);
518 if (was_full && !mshrQueue.isFull()) {
519 clearBlocked(Blocked_NoMSHRs);
520 }
521
522 // Request the bus for a prefetch if this deallocation freed enough
523 // MSHRs for a prefetch to take place
524 if (prefetcher && mshrQueue.canPrefetch()) {
525 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
526 clockEdge());
527 if (next_pf_time != MaxTick)
528 schedMemSideSendEvent(next_pf_time);
529 }
530 }
531
532 // if we used temp block, check to see if its valid and then clear it out
533 if (blk == tempBlock && tempBlock->isValid()) {
534 evictBlock(blk, writebacks);
535 }
536
537 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
538 // copy writebacks to write buffer
539 doWritebacks(writebacks, forward_time);
540
541 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
542 delete pkt;
543}
544
545
546Tick
547BaseCache::recvAtomic(PacketPtr pkt)
548{
549 // should assert here that there are no outstanding MSHRs or
550 // writebacks... that would mean that someone used an atomic
551 // access in timing mode
552
553 // We use lookupLatency here because it is used to specify the latency
554 // to access.
555 Cycles lat = lookupLatency;
556
557 CacheBlk *blk = nullptr;
558 PacketList writebacks;
559 bool satisfied = access(pkt, blk, lat, writebacks);
560
561 if (pkt->isClean() && blk && blk->isDirty()) {
562 // A cache clean opearation is looking for a dirty
563 // block. If a dirty block is encountered a WriteClean
564 // will update any copies to the path to the memory
565 // until the point of reference.
566 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
567 __func__, pkt->print(), blk->print());
568 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
569 writebacks.push_back(wb_pkt);
570 pkt->setSatisfied();
571 }
572
573 // handle writebacks resulting from the access here to ensure they
574 // logically precede anything happening below
575 doWritebacksAtomic(writebacks);
576 assert(writebacks.empty());
577
578 if (!satisfied) {
579 lat += handleAtomicReqMiss(pkt, blk, writebacks);
580 }
581
582 // Note that we don't invoke the prefetcher at all in atomic mode.
583 // It's not clear how to do it properly, particularly for
584 // prefetchers that aggressively generate prefetch candidates and
585 // rely on bandwidth contention to throttle them; these will tend
586 // to pollute the cache in atomic mode since there is no bandwidth
587 // contention. If we ever do want to enable prefetching in atomic
588 // mode, though, this is the place to do it... see timingAccess()
589 // for an example (though we'd want to issue the prefetch(es)
590 // immediately rather than calling requestMemSideBus() as we do
591 // there).
592
593 // do any writebacks resulting from the response handling
594 doWritebacksAtomic(writebacks);
595
596 // if we used temp block, check to see if its valid and if so
597 // clear it out, but only do so after the call to recvAtomic is
598 // finished so that any downstream observers (such as a snoop
599 // filter), first see the fill, and only then see the eviction
600 if (blk == tempBlock && tempBlock->isValid()) {
601 // the atomic CPU calls recvAtomic for fetch and load/store
602 // sequentuially, and we may already have a tempBlock
603 // writeback from the fetch that we have not yet sent
604 if (tempBlockWriteback) {
605 // if that is the case, write the prevoius one back, and
606 // do not schedule any new event
607 writebackTempBlockAtomic();
608 } else {
609 // the writeback/clean eviction happens after the call to
610 // recvAtomic has finished (but before any successive
611 // calls), so that the response handling from the fill is
612 // allowed to happen first
613 schedule(writebackTempBlockAtomicEvent, curTick());
614 }
615
616 tempBlockWriteback = evictBlock(blk);
617 }
618
619 if (pkt->needsResponse()) {
620 pkt->makeAtomicResponse();
621 }
622
623 return lat * clockPeriod();
624}
625
626void
627BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
628{
629 Addr blk_addr = pkt->getBlockAddr(blkSize);
630 bool is_secure = pkt->isSecure();
631 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
632 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
633
634 pkt->pushLabel(name());
635
636 CacheBlkPrintWrapper cbpw(blk);
637
638 // Note that just because an L2/L3 has valid data doesn't mean an
639 // L1 doesn't have a more up-to-date modified copy that still
640 // needs to be found. As a result we always update the request if
641 // we have it, but only declare it satisfied if we are the owner.
642
643 // see if we have data at all (owned or otherwise)
644 bool have_data = blk && blk->isValid()
645 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
646 blk->data);
647
648 // data we have is dirty if marked as such or if we have an
649 // in-service MSHR that is pending a modified line
650 bool have_dirty =
651 have_data && (blk->isDirty() ||
652 (mshr && mshr->inService && mshr->isPendingModified()));
653
654 bool done = have_dirty ||
655 cpuSidePort.trySatisfyFunctional(pkt) ||
656 mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
657 writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
658 memSidePort.trySatisfyFunctional(pkt);
659
660 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
661 (blk && blk->isValid()) ? "valid " : "",
662 have_data ? "data " : "", done ? "done " : "");
663
664 // We're leaving the cache, so pop cache->name() label
665 pkt->popLabel();
666
667 if (done) {
668 pkt->makeResponse();
669 } else {
670 // if it came as a request from the CPU side then make sure it
671 // continues towards the memory side
672 if (from_cpu_side) {
673 memSidePort.sendFunctional(pkt);
674 } else if (cpuSidePort.isSnooping()) {
675 // if it came from the memory side, it must be a snoop request
676 // and we should only forward it if we are forwarding snoops
677 cpuSidePort.sendFunctionalSnoop(pkt);
678 }
679 }
680}
681
682
683void
684BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
685{
686 assert(pkt->isRequest());
687
688 uint64_t overwrite_val;
689 bool overwrite_mem;
690 uint64_t condition_val64;
691 uint32_t condition_val32;
692
693 int offset = pkt->getOffset(blkSize);
694 uint8_t *blk_data = blk->data + offset;
695
696 assert(sizeof(uint64_t) >= pkt->getSize());
697
698 overwrite_mem = true;
699 // keep a copy of our possible write value, and copy what is at the
700 // memory address into the packet
701 pkt->writeData((uint8_t *)&overwrite_val);
702 pkt->setData(blk_data);
703
704 if (pkt->req->isCondSwap()) {
705 if (pkt->getSize() == sizeof(uint64_t)) {
706 condition_val64 = pkt->req->getExtraData();
707 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
708 sizeof(uint64_t));
709 } else if (pkt->getSize() == sizeof(uint32_t)) {
710 condition_val32 = (uint32_t)pkt->req->getExtraData();
711 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
712 sizeof(uint32_t));
713 } else
714 panic("Invalid size for conditional read/write\n");
715 }
716
717 if (overwrite_mem) {
718 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
719 blk->status |= BlkDirty;
720 }
721}
722
723QueueEntry*
724BaseCache::getNextQueueEntry()
725{
726 // Check both MSHR queue and write buffer for potential requests,
727 // note that null does not mean there is no request, it could
728 // simply be that it is not ready
729 MSHR *miss_mshr = mshrQueue.getNext();
730 WriteQueueEntry *wq_entry = writeBuffer.getNext();
731
732 // If we got a write buffer request ready, first priority is a
733 // full write buffer, otherwise we favour the miss requests
734 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
735 // need to search MSHR queue for conflicting earlier miss.
736 MSHR *conflict_mshr =
737 mshrQueue.findPending(wq_entry->blkAddr,
738 wq_entry->isSecure);
739
740 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
741 // Service misses in order until conflict is cleared.
742 return conflict_mshr;
743
744 // @todo Note that we ignore the ready time of the conflict here
745 }
746
747 // No conflicts; issue write
748 return wq_entry;
749 } else if (miss_mshr) {
750 // need to check for conflicting earlier writeback
751 WriteQueueEntry *conflict_mshr =
752 writeBuffer.findPending(miss_mshr->blkAddr,
753 miss_mshr->isSecure);
754 if (conflict_mshr) {
755 // not sure why we don't check order here... it was in the
756 // original code but commented out.
757
758 // The only way this happens is if we are
759 // doing a write and we didn't have permissions
760 // then subsequently saw a writeback (owned got evicted)
761 // We need to make sure to perform the writeback first
762 // To preserve the dirty data, then we can issue the write
763
764 // should we return wq_entry here instead? I.e. do we
765 // have to flush writes in order? I don't think so... not
766 // for Alpha anyway. Maybe for x86?
767 return conflict_mshr;
768
769 // @todo Note that we ignore the ready time of the conflict here
770 }
771
772 // No conflicts; issue read
773 return miss_mshr;
774 }
775
776 // fall through... no pending requests. Try a prefetch.
777 assert(!miss_mshr && !wq_entry);
778 if (prefetcher && mshrQueue.canPrefetch()) {
779 // If we have a miss queue slot, we can try a prefetch
780 PacketPtr pkt = prefetcher->getPacket();
781 if (pkt) {
782 Addr pf_addr = pkt->getBlockAddr(blkSize);
783 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
784 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
785 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
786 // Update statistic on number of prefetches issued
787 // (hwpf_mshr_misses)
788 assert(pkt->req->masterId() < system->maxMasters());
789 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
790
791 // allocate an MSHR and return it, note
792 // that we send the packet straight away, so do not
793 // schedule the send
794 return allocateMissBuffer(pkt, curTick(), false);
795 } else {
796 // free the request and packet
797 delete pkt;
798 }
799 }
800 }
801
802 return nullptr;
803}
804
805void
806BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
807{
808 assert(pkt->isRequest());
809
810 assert(blk && blk->isValid());
811 // Occasionally this is not true... if we are a lower-level cache
812 // satisfying a string of Read and ReadEx requests from
813 // upper-level caches, a Read will mark the block as shared but we
814 // can satisfy a following ReadEx anyway since we can rely on the
815 // Read requester(s) to have buffered the ReadEx snoop and to
816 // invalidate their blocks after receiving them.
817 // assert(!pkt->needsWritable() || blk->isWritable());
818 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
819
820 // Check RMW operations first since both isRead() and
821 // isWrite() will be true for them
822 if (pkt->cmd == MemCmd::SwapReq) {
823 if (pkt->isAtomicOp()) {
824 // extract data from cache and save it into the data field in
825 // the packet as a return value from this atomic op
826 int offset = tags->extractBlkOffset(pkt->getAddr());
827 uint8_t *blk_data = blk->data + offset;
828 pkt->setData(blk_data);
829
830 // execute AMO operation
831 (*(pkt->getAtomicOp()))(blk_data);
832
833 // set block status to dirty
834 blk->status |= BlkDirty;
835 } else {
836 cmpAndSwap(blk, pkt);
837 }
838 } else if (pkt->isWrite()) {
839 // we have the block in a writable state and can go ahead,
840 // note that the line may be also be considered writable in
841 // downstream caches along the path to memory, but always
842 // Exclusive, and never Modified
843 assert(blk->isWritable());
844 // Write or WriteLine at the first cache with block in writable state
845 if (blk->checkWrite(pkt)) {
846 pkt->writeDataToBlock(blk->data, blkSize);
847 }
848 // Always mark the line as dirty (and thus transition to the
849 // Modified state) even if we are a failed StoreCond so we
850 // supply data to any snoops that have appended themselves to
851 // this cache before knowing the store will fail.
852 blk->status |= BlkDirty;
853 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
854 } else if (pkt->isRead()) {
855 if (pkt->isLLSC()) {
856 blk->trackLoadLocked(pkt);
857 }
858
859 // all read responses have a data payload
860 assert(pkt->hasRespData());
861 pkt->setDataFromBlock(blk->data, blkSize);
862 } else if (pkt->isUpgrade()) {
863 // sanity check
864 assert(!pkt->hasSharers());
865
866 if (blk->isDirty()) {
867 // we were in the Owned state, and a cache above us that
868 // has the line in Shared state needs to be made aware
869 // that the data it already has is in fact dirty
870 pkt->setCacheResponding();
871 blk->status &= ~BlkDirty;
872 }
873 } else if (pkt->isClean()) {
874 blk->status &= ~BlkDirty;
875 } else {
876 assert(pkt->isInvalidate());
877 invalidateBlock(blk);
878 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
879 pkt->print());
880 }
881}
882
883/////////////////////////////////////////////////////
884//
885// Access path: requests coming in from the CPU side
886//
887/////////////////////////////////////////////////////
888Cycles
889BaseCache::calculateAccessLatency(const CacheBlk* blk,
890 const Cycles lookup_lat) const
891{
892 Cycles lat(lookup_lat);
893
894 if (blk != nullptr) {
895 // First access tags, then data
896 if (sequentialAccess) {
897 lat += dataLatency;
898 // Latency is dictated by the slowest of tag and data latencies
899 } else {
900 lat = std::max(lookup_lat, dataLatency);
901 }
902
903 // Check if the block to be accessed is available. If not, apply the
904 // access latency on top of when the block is ready to be accessed.
905 const Tick when_ready = blk->getWhenReady();
906 if (when_ready > curTick() &&
907 ticksToCycles(when_ready - curTick()) > lat) {
908 lat += ticksToCycles(when_ready - curTick());
909 }
910 }
911
912 return lat;
913}
914
915bool
916BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
917 PacketList &writebacks)
918{
919 // sanity check
920 assert(pkt->isRequest());
921
922 chatty_assert(!(isReadOnly && pkt->isWrite()),
923 "Should never see a write in a read-only cache %s\n",
924 name());
925
926 // Access block in the tags
927 Cycles tag_latency(0);
928 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
929
930 // Calculate access latency
931 lat = calculateAccessLatency(blk, tag_latency);
932
933 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
934 blk ? "hit " + blk->print() : "miss");
935
936 if (pkt->req->isCacheMaintenance()) {
937 // A cache maintenance operation is always forwarded to the
938 // memory below even if the block is found in dirty state.
939
940 // We defer any changes to the state of the block until we
941 // create and mark as in service the mshr for the downstream
942 // packet.
943 return false;
944 }
945
946 if (pkt->isEviction()) {
947 // We check for presence of block in above caches before issuing
948 // Writeback or CleanEvict to write buffer. Therefore the only
949 // possible cases can be of a CleanEvict packet coming from above
950 // encountering a Writeback generated in this cache peer cache and
951 // waiting in the write buffer. Cases of upper level peer caches
952 // generating CleanEvict and Writeback or simply CleanEvict and
953 // CleanEvict almost simultaneously will be caught by snoops sent out
954 // by crossbar.
955 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
956 pkt->isSecure());
957 if (wb_entry) {
958 assert(wb_entry->getNumTargets() == 1);
959 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
960 assert(wbPkt->isWriteback());
961
962 if (pkt->isCleanEviction()) {
963 // The CleanEvict and WritebackClean snoops into other
964 // peer caches of the same level while traversing the
965 // crossbar. If a copy of the block is found, the
966 // packet is deleted in the crossbar. Hence, none of
967 // the other upper level caches connected to this
968 // cache have the block, so we can clear the
969 // BLOCK_CACHED flag in the Writeback if set and
970 // discard the CleanEvict by returning true.
971 wbPkt->clearBlockCached();
972 return true;
973 } else {
974 assert(pkt->cmd == MemCmd::WritebackDirty);
975 // Dirty writeback from above trumps our clean
976 // writeback... discard here
977 // Note: markInService will remove entry from writeback buffer.
978 markInService(wb_entry);
979 delete wbPkt;
980 }
981 }
982 }
983
984 // Writeback handling is special case. We can write the block into
985 // the cache without having a writeable copy (or any copy at all).
986 if (pkt->isWriteback()) {
987 assert(blkSize == pkt->getSize());
988
989 // we could get a clean writeback while we are having
990 // outstanding accesses to a block, do the simple thing for
991 // now and drop the clean writeback so that we do not upset
992 // any ordering/decisions about ownership already taken
993 if (pkt->cmd == MemCmd::WritebackClean &&
994 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
995 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
996 "dropping\n", pkt->getAddr());
997 return true;
998 }
999
1000 if (!blk) {
1001 // need to do a replacement
1002 blk = allocateBlock(pkt, writebacks);
1003 if (!blk) {
1004 // no replaceable block available: give up, fwd to next level.
1005 incMissCount(pkt);
1006 return false;
1007 }
1008
1009 blk->status |= BlkReadable;
1010 }
1011 // only mark the block dirty if we got a writeback command,
1012 // and leave it as is for a clean writeback
1013 if (pkt->cmd == MemCmd::WritebackDirty) {
1014 // TODO: the coherent cache can assert(!blk->isDirty());
1015 blk->status |= BlkDirty;
1016 }
1017 // if the packet does not have sharers, it is passing
1018 // writable, and we got the writeback in Modified or Exclusive
1019 // state, if not we are in the Owned or Shared state
1020 if (!pkt->hasSharers()) {
1021 blk->status |= BlkWritable;
1022 }
1023 // nothing else to do; writeback doesn't expect response
1024 assert(!pkt->needsResponse());
1025 pkt->writeDataToBlock(blk->data, blkSize);
1026 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1027 incHitCount(pkt);
1028 // populate the time when the block will be ready to access.
1029 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1030 pkt->payloadDelay);
1031 return true;
1032 } else if (pkt->cmd == MemCmd::CleanEvict) {
1033 if (blk) {
1034 // Found the block in the tags, need to stop CleanEvict from
1035 // propagating further down the hierarchy. Returning true will
1036 // treat the CleanEvict like a satisfied write request and delete
1037 // it.
1038 return true;
1039 }
1040 // We didn't find the block here, propagate the CleanEvict further
1041 // down the memory hierarchy. Returning false will treat the CleanEvict
1042 // like a Writeback which could not find a replaceable block so has to
1043 // go to next level.
1044 return false;
1045 } else if (pkt->cmd == MemCmd::WriteClean) {
1046 // WriteClean handling is a special case. We can allocate a
1047 // block directly if it doesn't exist and we can update the
1048 // block immediately. The WriteClean transfers the ownership
1049 // of the block as well.
1050 assert(blkSize == pkt->getSize());
1051
1052 if (!blk) {
1053 if (pkt->writeThrough()) {
1054 // if this is a write through packet, we don't try to
1055 // allocate if the block is not present
1056 return false;
1057 } else {
1058 // a writeback that misses needs to allocate a new block
1059 blk = allocateBlock(pkt, writebacks);
1060 if (!blk) {
1061 // no replaceable block available: give up, fwd to
1062 // next level.
1063 incMissCount(pkt);
1064 return false;
1065 }
1066
1067 blk->status |= BlkReadable;
1068 }
1069 }
1070
1071 // at this point either this is a writeback or a write-through
1072 // write clean operation and the block is already in this
1073 // cache, we need to update the data and the block flags
1074 assert(blk);
1075 // TODO: the coherent cache can assert(!blk->isDirty());
1076 if (!pkt->writeThrough()) {
1077 blk->status |= BlkDirty;
1078 }
1079 // nothing else to do; writeback doesn't expect response
1080 assert(!pkt->needsResponse());
1081 pkt->writeDataToBlock(blk->data, blkSize);
1082 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1083
1084 incHitCount(pkt);
1085 // populate the time when the block will be ready to access.
1086 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1087 pkt->payloadDelay);
1088 // if this a write-through packet it will be sent to cache
1089 // below
1090 return !pkt->writeThrough();
1091 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1092 blk->isReadable())) {
1093 // OK to satisfy access
1094 incHitCount(pkt);
1095 satisfyRequest(pkt, blk);
1096 maintainClusivity(pkt->fromCache(), blk);
1097
1098 return true;
1099 }
1100
1101 // Can't satisfy access normally... either no block (blk == nullptr)
1102 // or have block but need writable
1103
1104 incMissCount(pkt);
1105
1106 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1107 // complete miss on store conditional... just give up now
1108 pkt->req->setExtraData(0);
1109 return true;
1110 }
1111
1112 return false;
1113}
1114
1115void
1116BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1117{
1118 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1119 clusivity == Enums::mostly_excl) {
1120 // if we have responded to a cache, and our block is still
1121 // valid, but not dirty, and this cache is mostly exclusive
1122 // with respect to the cache above, drop the block
1123 invalidateBlock(blk);
1124 }
1125}
1126
1127CacheBlk*
1128BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1129 bool allocate)
1130{
1131 assert(pkt->isResponse());
1132 Addr addr = pkt->getAddr();
1133 bool is_secure = pkt->isSecure();
1134#if TRACING_ON
1135 CacheBlk::State old_state = blk ? blk->status : 0;
1136#endif
1137
1138 // When handling a fill, we should have no writes to this line.
1139 assert(addr == pkt->getBlockAddr(blkSize));
1140 assert(!writeBuffer.findMatch(addr, is_secure));
1141
1142 if (!blk) {
1143 // better have read new data...
1144 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1145
1146 // need to do a replacement if allocating, otherwise we stick
1147 // with the temporary storage
1148 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1149
1150 if (!blk) {
1151 // No replaceable block or a mostly exclusive
1152 // cache... just use temporary storage to complete the
1153 // current request and then get rid of it
1154 blk = tempBlock;
1155 tempBlock->insert(addr, is_secure);
1156 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1157 is_secure ? "s" : "ns");
1158 }
1159 } else {
1160 // existing block... probably an upgrade
1161 // don't clear block status... if block is already dirty we
1162 // don't want to lose that
1163 }
1164
1165 // Block is guaranteed to be valid at this point
1166 assert(blk->isValid());
1167 assert(blk->isSecure() == is_secure);
1168 assert(regenerateBlkAddr(blk) == addr);
1169
1170 blk->status |= BlkReadable;
1171
1172 // sanity check for whole-line writes, which should always be
1173 // marked as writable as part of the fill, and then later marked
1174 // dirty as part of satisfyRequest
1175 if (pkt->cmd == MemCmd::InvalidateResp) {
1176 assert(!pkt->hasSharers());
1177 }
1178
1179 // here we deal with setting the appropriate state of the line,
1180 // and we start by looking at the hasSharers flag, and ignore the
1181 // cacheResponding flag (normally signalling dirty data) if the
1182 // packet has sharers, thus the line is never allocated as Owned
1183 // (dirty but not writable), and always ends up being either
1184 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1185 // for more details
1186 if (!pkt->hasSharers()) {
1187 // we could get a writable line from memory (rather than a
1188 // cache) even in a read-only cache, note that we set this bit
1189 // even for a read-only cache, possibly revisit this decision
1190 blk->status |= BlkWritable;
1191
1192 // check if we got this via cache-to-cache transfer (i.e., from a
1193 // cache that had the block in Modified or Owned state)
1194 if (pkt->cacheResponding()) {
1195 // we got the block in Modified state, and invalidated the
1196 // owners copy
1197 blk->status |= BlkDirty;
1198
1199 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1200 "in read-only cache %s\n", name());
1201 }
1202 }
1203
1204 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1205 addr, is_secure ? "s" : "ns", old_state, blk->print());
1206
1207 // if we got new data, copy it in (checking for a read response
1208 // and a response that has data is the same in the end)
1209 if (pkt->isRead()) {
1210 // sanity checks
1211 assert(pkt->hasData());
1212 assert(pkt->getSize() == blkSize);
1213
1214 pkt->writeDataToBlock(blk->data, blkSize);
1215 }
1216 // We pay for fillLatency here.
1217 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay);
1218
1219 return blk;
1220}
1221
1222CacheBlk*
1223BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1224{
1225 // Get address
1226 const Addr addr = pkt->getAddr();
1227
1228 // Get secure bit
1229 const bool is_secure = pkt->isSecure();
1230
1231 // Find replacement victim
1232 std::vector<CacheBlk*> evict_blks;
1233 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1234
1235 // It is valid to return nullptr if there is no victim
1236 if (!victim)
1237 return nullptr;
1238
1239 // Print victim block's information
1240 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1241
1242 // Check for transient state allocations. If any of the entries listed
1243 // for eviction has a transient state, the allocation fails
1244 for (const auto& blk : evict_blks) {
1245 if (blk->isValid()) {
1246 Addr repl_addr = regenerateBlkAddr(blk);
1247 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1248 if (repl_mshr) {
1249 // must be an outstanding upgrade or clean request
1250 // on a block we're about to replace...
1251 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1252 repl_mshr->isCleaning());
1253
1254 // too hard to replace block with transient state
1255 // allocation failed, block not inserted
1256 return nullptr;
1257 }
1258 }
1259 }
1260
1261 // The victim will be replaced by a new entry, so increase the replacement
1262 // counter if a valid block is being replaced
1263 if (victim->isValid()) {
1264 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1265 "(%s): %s\n", regenerateBlkAddr(victim),
1266 victim->isSecure() ? "s" : "ns",
1267 addr, is_secure ? "s" : "ns",
1268 victim->isDirty() ? "writeback" : "clean");
1269
1270 replacements++;
1271 }
1272
1273 // Evict valid blocks associated to this victim block
1274 for (const auto& blk : evict_blks) {
1275 if (blk->isValid()) {
1276 if (blk->wasPrefetched()) {
1277 unusedPrefetches++;
1278 }
1279
1280 evictBlock(blk, writebacks);
1281 }
1282 }
1283
1284 // Insert new block at victimized entry
1285 tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1286 pkt->req->taskId(), victim);
1287
1288 return victim;
1289}
1290
1291void
1292BaseCache::invalidateBlock(CacheBlk *blk)
1293{
1294 // If handling a block present in the Tags, let it do its invalidation
1295 // process, which will update stats and invalidate the block itself
1296 if (blk != tempBlock) {
1297 tags->invalidate(blk);
1298 } else {
1299 tempBlock->invalidate();
1300 }
1301}
1302
1303void
1304BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1305{
1306 PacketPtr pkt = evictBlock(blk);
1307 if (pkt) {
1308 writebacks.push_back(pkt);
1309 }
1310}
1311
1312PacketPtr
1313BaseCache::writebackBlk(CacheBlk *blk)
1314{
1315 chatty_assert(!isReadOnly || writebackClean,
1316 "Writeback from read-only cache");
1317 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1318
1319 writebacks[Request::wbMasterId]++;
1320
1321 RequestPtr req = std::make_shared<Request>(
1322 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1323
1324 if (blk->isSecure())
1325 req->setFlags(Request::SECURE);
1326
1327 req->taskId(blk->task_id);
1328
1329 PacketPtr pkt =
1330 new Packet(req, blk->isDirty() ?
1331 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1332
1333 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1334 pkt->print(), blk->isWritable(), blk->isDirty());
1335
1336 if (blk->isWritable()) {
1337 // not asserting shared means we pass the block in modified
1338 // state, mark our own block non-writeable
1339 blk->status &= ~BlkWritable;
1340 } else {
1341 // we are in the Owned state, tell the receiver
1342 pkt->setHasSharers();
1343 }
1344
1345 // make sure the block is not marked dirty
1346 blk->status &= ~BlkDirty;
1347
1348 pkt->allocate();
1349 pkt->setDataFromBlock(blk->data, blkSize);
1350
1351 return pkt;
1352}
1353
1354PacketPtr
1355BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1356{
1357 RequestPtr req = std::make_shared<Request>(
1358 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1359
1360 if (blk->isSecure()) {
1361 req->setFlags(Request::SECURE);
1362 }
1363 req->taskId(blk->task_id);
1364
1365 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1366
1367 if (dest) {
1368 req->setFlags(dest);
1369 pkt->setWriteThrough();
1370 }
1371
1372 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1373 blk->isWritable(), blk->isDirty());
1374
1375 if (blk->isWritable()) {
1376 // not asserting shared means we pass the block in modified
1377 // state, mark our own block non-writeable
1378 blk->status &= ~BlkWritable;
1379 } else {
1380 // we are in the Owned state, tell the receiver
1381 pkt->setHasSharers();
1382 }
1383
1384 // make sure the block is not marked dirty
1385 blk->status &= ~BlkDirty;
1386
1387 pkt->allocate();
1388 pkt->setDataFromBlock(blk->data, blkSize);
1389
1390 return pkt;
1391}
1392
1393
1394void
1395BaseCache::memWriteback()
1396{
1397 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1398}
1399
1400void
1401BaseCache::memInvalidate()
1402{
1403 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1404}
1405
1406bool
1407BaseCache::isDirty() const
1408{
1409 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1410}
1411
1412bool
1413BaseCache::coalesce() const
1414{
1415 return writeAllocator && writeAllocator->coalesce();
1416}
1417
1418void
1419BaseCache::writebackVisitor(CacheBlk &blk)
1420{
1421 if (blk.isDirty()) {
1422 assert(blk.isValid());
1423
1424 RequestPtr request = std::make_shared<Request>(
1425 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1426
1427 request->taskId(blk.task_id);
1428 if (blk.isSecure()) {
1429 request->setFlags(Request::SECURE);
1430 }
1431
1432 Packet packet(request, MemCmd::WriteReq);
1433 packet.dataStatic(blk.data);
1434
1435 memSidePort.sendFunctional(&packet);
1436
1437 blk.status &= ~BlkDirty;
1438 }
1439}
1440
1441void
1442BaseCache::invalidateVisitor(CacheBlk &blk)
1443{
1444 if (blk.isDirty())
1445 warn_once("Invalidating dirty cache lines. " \
1446 "Expect things to break.\n");
1447
1448 if (blk.isValid()) {
1449 assert(!blk.isDirty());
1450 invalidateBlock(&blk);
1451 }
1452}
1453
1454Tick
1455BaseCache::nextQueueReadyTime() const
1456{
1457 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1458 writeBuffer.nextReadyTime());
1459
1460 // Don't signal prefetch ready time if no MSHRs available
1461 // Will signal once enoguh MSHRs are deallocated
1462 if (prefetcher && mshrQueue.canPrefetch()) {
1463 nextReady = std::min(nextReady,
1464 prefetcher->nextPrefetchReadyTime());
1465 }
1466
1467 return nextReady;
1468}
1469
1470
1471bool
1472BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1473{
1474 assert(mshr);
1475
1476 // use request from 1st target
1477 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1478
1479 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1480
1481 // if the cache is in write coalescing mode or (additionally) in
1482 // no allocation mode, and we have a write packet with an MSHR
1483 // that is not a whole-line write (due to incompatible flags etc),
1484 // then reset the write mode
1485 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1486 if (!mshr->isWholeLineWrite()) {
1487 // if we are currently write coalescing, hold on the
1488 // MSHR as many cycles extra as we need to completely
1489 // write a cache line
1490 if (writeAllocator->delay(mshr->blkAddr)) {
1491 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1492 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1493 "for write coalescing\n", tgt_pkt->print(), delay);
1494 mshrQueue.delay(mshr, delay);
1495 return false;
1496 } else {
1497 writeAllocator->reset();
1498 }
1499 } else {
1500 writeAllocator->resetDelay(mshr->blkAddr);
1501 }
1502 }
1503
1504 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1505
1506 // either a prefetch that is not present upstream, or a normal
1507 // MSHR request, proceed to get the packet to send downstream
1508 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1509 mshr->isWholeLineWrite());
1510
1511 mshr->isForward = (pkt == nullptr);
1512
1513 if (mshr->isForward) {
1514 // not a cache block request, but a response is expected
1515 // make copy of current packet to forward, keep current
1516 // copy for response handling
1517 pkt = new Packet(tgt_pkt, false, true);
1518 assert(!pkt->isWrite());
1519 }
1520
1521 // play it safe and append (rather than set) the sender state,
1522 // as forwarded packets may already have existing state
1523 pkt->pushSenderState(mshr);
1524
1525 if (pkt->isClean() && blk && blk->isDirty()) {
1526 // A cache clean opearation is looking for a dirty block. Mark
1527 // the packet so that the destination xbar can determine that
1528 // there will be a follow-up write packet as well.
1529 pkt->setSatisfied();
1530 }
1531
1532 if (!memSidePort.sendTimingReq(pkt)) {
1533 // we are awaiting a retry, but we
1534 // delete the packet and will be creating a new packet
1535 // when we get the opportunity
1536 delete pkt;
1537
1538 // note that we have now masked any requestBus and
1539 // schedSendEvent (we will wait for a retry before
1540 // doing anything), and this is so even if we do not
1541 // care about this packet and might override it before
1542 // it gets retried
1543 return true;
1544 } else {
1545 // As part of the call to sendTimingReq the packet is
1546 // forwarded to all neighbouring caches (and any caches
1547 // above them) as a snoop. Thus at this point we know if
1548 // any of the neighbouring caches are responding, and if
1549 // so, we know it is dirty, and we can determine if it is
1550 // being passed as Modified, making our MSHR the ordering
1551 // point
1552 bool pending_modified_resp = !pkt->hasSharers() &&
1553 pkt->cacheResponding();
1554 markInService(mshr, pending_modified_resp);
1555
1556 if (pkt->isClean() && blk && blk->isDirty()) {
1557 // A cache clean opearation is looking for a dirty
1558 // block. If a dirty block is encountered a WriteClean
1559 // will update any copies to the path to the memory
1560 // until the point of reference.
1561 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1562 __func__, pkt->print(), blk->print());
1563 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1564 pkt->id);
1565 PacketList writebacks;
1566 writebacks.push_back(wb_pkt);
1567 doWritebacks(writebacks, 0);
1568 }
1569
1570 return false;
1571 }
1572}
1573
1574bool
1575BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1576{
1577 assert(wq_entry);
1578
1579 // always a single target for write queue entries
1580 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1581
1582 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1583
1584 // forward as is, both for evictions and uncacheable writes
1585 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1586 // note that we have now masked any requestBus and
1587 // schedSendEvent (we will wait for a retry before
1588 // doing anything), and this is so even if we do not
1589 // care about this packet and might override it before
1590 // it gets retried
1591 return true;
1592 } else {
1593 markInService(wq_entry);
1594 return false;
1595 }
1596}
1597
1598void
1599BaseCache::serialize(CheckpointOut &cp) const
1600{
1601 bool dirty(isDirty());
1602
1603 if (dirty) {
1604 warn("*** The cache still contains dirty data. ***\n");
1605 warn(" Make sure to drain the system using the correct flags.\n");
1606 warn(" This checkpoint will not restore correctly " \
1607 "and dirty data in the cache will be lost!\n");
1608 }
1609
1610 // Since we don't checkpoint the data in the cache, any dirty data
1611 // will be lost when restoring from a checkpoint of a system that
1612 // wasn't drained properly. Flag the checkpoint as invalid if the
1613 // cache contains dirty data.
1614 bool bad_checkpoint(dirty);
1615 SERIALIZE_SCALAR(bad_checkpoint);
1616}
1617
1618void
1619BaseCache::unserialize(CheckpointIn &cp)
1620{
1621 bool bad_checkpoint;
1622 UNSERIALIZE_SCALAR(bad_checkpoint);
1623 if (bad_checkpoint) {
1624 fatal("Restoring from checkpoints with dirty caches is not "
1625 "supported in the classic memory system. Please remove any "
1626 "caches or drain them properly before taking checkpoints.\n");
1627 }
1628}
1629
1630void
1631BaseCache::regStats()
1632{
1633 MemObject::regStats();
1634
1635 using namespace Stats;
1636
1637 // Hit statistics
1638 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1639 MemCmd cmd(access_idx);
1640 const string &cstr = cmd.toString();
1641
1642 hits[access_idx]
1643 .init(system->maxMasters())
1644 .name(name() + "." + cstr + "_hits")
1645 .desc("number of " + cstr + " hits")
1646 .flags(total | nozero | nonan)
1647 ;
1648 for (int i = 0; i < system->maxMasters(); i++) {
1649 hits[access_idx].subname(i, system->getMasterName(i));
1650 }
1651 }
1652
1653// These macros make it easier to sum the right subset of commands and
1654// to change the subset of commands that are considered "demand" vs
1655// "non-demand"
1656#define SUM_DEMAND(s) \
1657 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1658 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1659
1660// should writebacks be included here? prior code was inconsistent...
1661#define SUM_NON_DEMAND(s) \
1662 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1663
1664 demandHits
1665 .name(name() + ".demand_hits")
1666 .desc("number of demand (read+write) hits")
1667 .flags(total | nozero | nonan)
1668 ;
1669 demandHits = SUM_DEMAND(hits);
1670 for (int i = 0; i < system->maxMasters(); i++) {
1671 demandHits.subname(i, system->getMasterName(i));
1672 }
1673
1674 overallHits
1675 .name(name() + ".overall_hits")
1676 .desc("number of overall hits")
1677 .flags(total | nozero | nonan)
1678 ;
1679 overallHits = demandHits + SUM_NON_DEMAND(hits);
1680 for (int i = 0; i < system->maxMasters(); i++) {
1681 overallHits.subname(i, system->getMasterName(i));
1682 }
1683
1684 // Miss statistics
1685 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1686 MemCmd cmd(access_idx);
1687 const string &cstr = cmd.toString();
1688
1689 misses[access_idx]
1690 .init(system->maxMasters())
1691 .name(name() + "." + cstr + "_misses")
1692 .desc("number of " + cstr + " misses")
1693 .flags(total | nozero | nonan)
1694 ;
1695 for (int i = 0; i < system->maxMasters(); i++) {
1696 misses[access_idx].subname(i, system->getMasterName(i));
1697 }
1698 }
1699
1700 demandMisses
1701 .name(name() + ".demand_misses")
1702 .desc("number of demand (read+write) misses")
1703 .flags(total | nozero | nonan)
1704 ;
1705 demandMisses = SUM_DEMAND(misses);
1706 for (int i = 0; i < system->maxMasters(); i++) {
1707 demandMisses.subname(i, system->getMasterName(i));
1708 }
1709
1710 overallMisses
1711 .name(name() + ".overall_misses")
1712 .desc("number of overall misses")
1713 .flags(total | nozero | nonan)
1714 ;
1715 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1716 for (int i = 0; i < system->maxMasters(); i++) {
1717 overallMisses.subname(i, system->getMasterName(i));
1718 }
1719
1720 // Miss latency statistics
1721 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1722 MemCmd cmd(access_idx);
1723 const string &cstr = cmd.toString();
1724
1725 missLatency[access_idx]
1726 .init(system->maxMasters())
1727 .name(name() + "." + cstr + "_miss_latency")
1728 .desc("number of " + cstr + " miss cycles")
1729 .flags(total | nozero | nonan)
1730 ;
1731 for (int i = 0; i < system->maxMasters(); i++) {
1732 missLatency[access_idx].subname(i, system->getMasterName(i));
1733 }
1734 }
1735
1736 demandMissLatency
1737 .name(name() + ".demand_miss_latency")
1738 .desc("number of demand (read+write) miss cycles")
1739 .flags(total | nozero | nonan)
1740 ;
1741 demandMissLatency = SUM_DEMAND(missLatency);
1742 for (int i = 0; i < system->maxMasters(); i++) {
1743 demandMissLatency.subname(i, system->getMasterName(i));
1744 }
1745
1746 overallMissLatency
1747 .name(name() + ".overall_miss_latency")
1748 .desc("number of overall miss cycles")
1749 .flags(total | nozero | nonan)
1750 ;
1751 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1752 for (int i = 0; i < system->maxMasters(); i++) {
1753 overallMissLatency.subname(i, system->getMasterName(i));
1754 }
1755
1756 // access formulas
1757 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1758 MemCmd cmd(access_idx);
1759 const string &cstr = cmd.toString();
1760
1761 accesses[access_idx]
1762 .name(name() + "." + cstr + "_accesses")
1763 .desc("number of " + cstr + " accesses(hits+misses)")
1764 .flags(total | nozero | nonan)
1765 ;
1766 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1767
1768 for (int i = 0; i < system->maxMasters(); i++) {
1769 accesses[access_idx].subname(i, system->getMasterName(i));
1770 }
1771 }
1772
1773 demandAccesses
1774 .name(name() + ".demand_accesses")
1775 .desc("number of demand (read+write) accesses")
1776 .flags(total | nozero | nonan)
1777 ;
1778 demandAccesses = demandHits + demandMisses;
1779 for (int i = 0; i < system->maxMasters(); i++) {
1780 demandAccesses.subname(i, system->getMasterName(i));
1781 }
1782
1783 overallAccesses
1784 .name(name() + ".overall_accesses")
1785 .desc("number of overall (read+write) accesses")
1786 .flags(total | nozero | nonan)
1787 ;
1788 overallAccesses = overallHits + overallMisses;
1789 for (int i = 0; i < system->maxMasters(); i++) {
1790 overallAccesses.subname(i, system->getMasterName(i));
1791 }
1792
1793 // miss rate formulas
1794 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1795 MemCmd cmd(access_idx);
1796 const string &cstr = cmd.toString();
1797
1798 missRate[access_idx]
1799 .name(name() + "." + cstr + "_miss_rate")
1800 .desc("miss rate for " + cstr + " accesses")
1801 .flags(total | nozero | nonan)
1802 ;
1803 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1804
1805 for (int i = 0; i < system->maxMasters(); i++) {
1806 missRate[access_idx].subname(i, system->getMasterName(i));
1807 }
1808 }
1809
1810 demandMissRate
1811 .name(name() + ".demand_miss_rate")
1812 .desc("miss rate for demand accesses")
1813 .flags(total | nozero | nonan)
1814 ;
1815 demandMissRate = demandMisses / demandAccesses;
1816 for (int i = 0; i < system->maxMasters(); i++) {
1817 demandMissRate.subname(i, system->getMasterName(i));
1818 }
1819
1820 overallMissRate
1821 .name(name() + ".overall_miss_rate")
1822 .desc("miss rate for overall accesses")
1823 .flags(total | nozero | nonan)
1824 ;
1825 overallMissRate = overallMisses / overallAccesses;
1826 for (int i = 0; i < system->maxMasters(); i++) {
1827 overallMissRate.subname(i, system->getMasterName(i));
1828 }
1829
1830 // miss latency formulas
1831 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1832 MemCmd cmd(access_idx);
1833 const string &cstr = cmd.toString();
1834
1835 avgMissLatency[access_idx]
1836 .name(name() + "." + cstr + "_avg_miss_latency")
1837 .desc("average " + cstr + " miss latency")
1838 .flags(total | nozero | nonan)
1839 ;
1840 avgMissLatency[access_idx] =
1841 missLatency[access_idx] / misses[access_idx];
1842
1843 for (int i = 0; i < system->maxMasters(); i++) {
1844 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1845 }
1846 }
1847
1848 demandAvgMissLatency
1849 .name(name() + ".demand_avg_miss_latency")
1850 .desc("average overall miss latency")
1851 .flags(total | nozero | nonan)
1852 ;
1853 demandAvgMissLatency = demandMissLatency / demandMisses;
1854 for (int i = 0; i < system->maxMasters(); i++) {
1855 demandAvgMissLatency.subname(i, system->getMasterName(i));
1856 }
1857
1858 overallAvgMissLatency
1859 .name(name() + ".overall_avg_miss_latency")
1860 .desc("average overall miss latency")
1861 .flags(total | nozero | nonan)
1862 ;
1863 overallAvgMissLatency = overallMissLatency / overallMisses;
1864 for (int i = 0; i < system->maxMasters(); i++) {
1865 overallAvgMissLatency.subname(i, system->getMasterName(i));
1866 }
1867
1868 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1869 blocked_cycles
1870 .name(name() + ".blocked_cycles")
1871 .desc("number of cycles access was blocked")
1872 .subname(Blocked_NoMSHRs, "no_mshrs")
1873 .subname(Blocked_NoTargets, "no_targets")
1874 ;
1875
1876
1877 blocked_causes.init(NUM_BLOCKED_CAUSES);
1878 blocked_causes
1879 .name(name() + ".blocked")
1880 .desc("number of cycles access was blocked")
1881 .subname(Blocked_NoMSHRs, "no_mshrs")
1882 .subname(Blocked_NoTargets, "no_targets")
1883 ;
1884
1885 avg_blocked
1886 .name(name() + ".avg_blocked_cycles")
1887 .desc("average number of cycles each access was blocked")
1888 .subname(Blocked_NoMSHRs, "no_mshrs")
1889 .subname(Blocked_NoTargets, "no_targets")
1890 ;
1891
1892 avg_blocked = blocked_cycles / blocked_causes;
1893
1894 unusedPrefetches
1895 .name(name() + ".unused_prefetches")
1896 .desc("number of HardPF blocks evicted w/o reference")
1897 .flags(nozero)
1898 ;
1899
1900 writebacks
1901 .init(system->maxMasters())
1902 .name(name() + ".writebacks")
1903 .desc("number of writebacks")
1904 .flags(total | nozero | nonan)
1905 ;
1906 for (int i = 0; i < system->maxMasters(); i++) {
1907 writebacks.subname(i, system->getMasterName(i));
1908 }
1909
1910 // MSHR statistics
1911 // MSHR hit statistics
1912 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1913 MemCmd cmd(access_idx);
1914 const string &cstr = cmd.toString();
1915
1916 mshr_hits[access_idx]
1917 .init(system->maxMasters())
1918 .name(name() + "." + cstr + "_mshr_hits")
1919 .desc("number of " + cstr + " MSHR hits")
1920 .flags(total | nozero | nonan)
1921 ;
1922 for (int i = 0; i < system->maxMasters(); i++) {
1923 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1924 }
1925 }
1926
1927 demandMshrHits
1928 .name(name() + ".demand_mshr_hits")
1929 .desc("number of demand (read+write) MSHR hits")
1930 .flags(total | nozero | nonan)
1931 ;
1932 demandMshrHits = SUM_DEMAND(mshr_hits);
1933 for (int i = 0; i < system->maxMasters(); i++) {
1934 demandMshrHits.subname(i, system->getMasterName(i));
1935 }
1936
1937 overallMshrHits
1938 .name(name() + ".overall_mshr_hits")
1939 .desc("number of overall MSHR hits")
1940 .flags(total | nozero | nonan)
1941 ;
1942 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1943 for (int i = 0; i < system->maxMasters(); i++) {
1944 overallMshrHits.subname(i, system->getMasterName(i));
1945 }
1946
1947 // MSHR miss statistics
1948 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1949 MemCmd cmd(access_idx);
1950 const string &cstr = cmd.toString();
1951
1952 mshr_misses[access_idx]
1953 .init(system->maxMasters())
1954 .name(name() + "." + cstr + "_mshr_misses")
1955 .desc("number of " + cstr + " MSHR misses")
1956 .flags(total | nozero | nonan)
1957 ;
1958 for (int i = 0; i < system->maxMasters(); i++) {
1959 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1960 }
1961 }
1962
1963 demandMshrMisses
1964 .name(name() + ".demand_mshr_misses")
1965 .desc("number of demand (read+write) MSHR misses")
1966 .flags(total | nozero | nonan)
1967 ;
1968 demandMshrMisses = SUM_DEMAND(mshr_misses);
1969 for (int i = 0; i < system->maxMasters(); i++) {
1970 demandMshrMisses.subname(i, system->getMasterName(i));
1971 }
1972
1973 overallMshrMisses
1974 .name(name() + ".overall_mshr_misses")
1975 .desc("number of overall MSHR misses")
1976 .flags(total | nozero | nonan)
1977 ;
1978 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1979 for (int i = 0; i < system->maxMasters(); i++) {
1980 overallMshrMisses.subname(i, system->getMasterName(i));
1981 }
1982
1983 // MSHR miss latency statistics
1984 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1985 MemCmd cmd(access_idx);
1986 const string &cstr = cmd.toString();
1987
1988 mshr_miss_latency[access_idx]
1989 .init(system->maxMasters())
1990 .name(name() + "." + cstr + "_mshr_miss_latency")
1991 .desc("number of " + cstr + " MSHR miss cycles")
1992 .flags(total | nozero | nonan)
1993 ;
1994 for (int i = 0; i < system->maxMasters(); i++) {
1995 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1996 }
1997 }
1998
1999 demandMshrMissLatency
2000 .name(name() + ".demand_mshr_miss_latency")
2001 .desc("number of demand (read+write) MSHR miss cycles")
2002 .flags(total | nozero | nonan)
2003 ;
2004 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2005 for (int i = 0; i < system->maxMasters(); i++) {
2006 demandMshrMissLatency.subname(i, system->getMasterName(i));
2007 }
2008
2009 overallMshrMissLatency
2010 .name(name() + ".overall_mshr_miss_latency")
2011 .desc("number of overall MSHR miss cycles")
2012 .flags(total | nozero | nonan)
2013 ;
2014 overallMshrMissLatency =
2015 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2016 for (int i = 0; i < system->maxMasters(); i++) {
2017 overallMshrMissLatency.subname(i, system->getMasterName(i));
2018 }
2019
2020 // MSHR uncacheable statistics
2021 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2022 MemCmd cmd(access_idx);
2023 const string &cstr = cmd.toString();
2024
2025 mshr_uncacheable[access_idx]
2026 .init(system->maxMasters())
2027 .name(name() + "." + cstr + "_mshr_uncacheable")
2028 .desc("number of " + cstr + " MSHR uncacheable")
2029 .flags(total | nozero | nonan)
2030 ;
2031 for (int i = 0; i < system->maxMasters(); i++) {
2032 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2033 }
2034 }
2035
2036 overallMshrUncacheable
2037 .name(name() + ".overall_mshr_uncacheable_misses")
2038 .desc("number of overall MSHR uncacheable misses")
2039 .flags(total | nozero | nonan)
2040 ;
2041 overallMshrUncacheable =
2042 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2043 for (int i = 0; i < system->maxMasters(); i++) {
2044 overallMshrUncacheable.subname(i, system->getMasterName(i));
2045 }
2046
2047 // MSHR miss latency statistics
2048 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2049 MemCmd cmd(access_idx);
2050 const string &cstr = cmd.toString();
2051
2052 mshr_uncacheable_lat[access_idx]
2053 .init(system->maxMasters())
2054 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2055 .desc("number of " + cstr + " MSHR uncacheable cycles")
2056 .flags(total | nozero | nonan)
2057 ;
2058 for (int i = 0; i < system->maxMasters(); i++) {
2059 mshr_uncacheable_lat[access_idx].subname(
2060 i, system->getMasterName(i));
2061 }
2062 }
2063
2064 overallMshrUncacheableLatency
2065 .name(name() + ".overall_mshr_uncacheable_latency")
2066 .desc("number of overall MSHR uncacheable cycles")
2067 .flags(total | nozero | nonan)
2068 ;
2069 overallMshrUncacheableLatency =
2070 SUM_DEMAND(mshr_uncacheable_lat) +
2071 SUM_NON_DEMAND(mshr_uncacheable_lat);
2072 for (int i = 0; i < system->maxMasters(); i++) {
2073 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2074 }
2075
2076#if 0
2077 // MSHR access formulas
2078 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2079 MemCmd cmd(access_idx);
2080 const string &cstr = cmd.toString();
2081
2082 mshrAccesses[access_idx]
2083 .name(name() + "." + cstr + "_mshr_accesses")
2084 .desc("number of " + cstr + " mshr accesses(hits+misses)")
2085 .flags(total | nozero | nonan)
2086 ;
2087 mshrAccesses[access_idx] =
2088 mshr_hits[access_idx] + mshr_misses[access_idx]
2089 + mshr_uncacheable[access_idx];
2090 }
2091
2092 demandMshrAccesses
2093 .name(name() + ".demand_mshr_accesses")
2094 .desc("number of demand (read+write) mshr accesses")
2095 .flags(total | nozero | nonan)
2096 ;
2097 demandMshrAccesses = demandMshrHits + demandMshrMisses;
2098
2099 overallMshrAccesses
2100 .name(name() + ".overall_mshr_accesses")
2101 .desc("number of overall (read+write) mshr accesses")
2102 .flags(total | nozero | nonan)
2103 ;
2104 overallMshrAccesses = overallMshrHits + overallMshrMisses
2105 + overallMshrUncacheable;
2106#endif
2107
2108 // MSHR miss rate formulas
2109 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2110 MemCmd cmd(access_idx);
2111 const string &cstr = cmd.toString();
2112
2113 mshrMissRate[access_idx]
2114 .name(name() + "." + cstr + "_mshr_miss_rate")
2115 .desc("mshr miss rate for " + cstr + " accesses")
2116 .flags(total | nozero | nonan)
2117 ;
2118 mshrMissRate[access_idx] =
2119 mshr_misses[access_idx] / accesses[access_idx];
2120
2121 for (int i = 0; i < system->maxMasters(); i++) {
2122 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2123 }
2124 }
2125
2126 demandMshrMissRate
2127 .name(name() + ".demand_mshr_miss_rate")
2128 .desc("mshr miss rate for demand accesses")
2129 .flags(total | nozero | nonan)
2130 ;
2131 demandMshrMissRate = demandMshrMisses / demandAccesses;
2132 for (int i = 0; i < system->maxMasters(); i++) {
2133 demandMshrMissRate.subname(i, system->getMasterName(i));
2134 }
2135
2136 overallMshrMissRate
2137 .name(name() + ".overall_mshr_miss_rate")
2138 .desc("mshr miss rate for overall accesses")
2139 .flags(total | nozero | nonan)
2140 ;
2141 overallMshrMissRate = overallMshrMisses / overallAccesses;
2142 for (int i = 0; i < system->maxMasters(); i++) {
2143 overallMshrMissRate.subname(i, system->getMasterName(i));
2144 }
2145
2146 // mshrMiss latency formulas
2147 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2148 MemCmd cmd(access_idx);
2149 const string &cstr = cmd.toString();
2150
2151 avgMshrMissLatency[access_idx]
2152 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2153 .desc("average " + cstr + " mshr miss latency")
2154 .flags(total | nozero | nonan)
2155 ;
2156 avgMshrMissLatency[access_idx] =
2157 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2158
2159 for (int i = 0; i < system->maxMasters(); i++) {
2160 avgMshrMissLatency[access_idx].subname(
2161 i, system->getMasterName(i));
2162 }
2163 }
2164
2165 demandAvgMshrMissLatency
2166 .name(name() + ".demand_avg_mshr_miss_latency")
2167 .desc("average overall mshr miss latency")
2168 .flags(total | nozero | nonan)
2169 ;
2170 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2171 for (int i = 0; i < system->maxMasters(); i++) {
2172 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2173 }
2174
2175 overallAvgMshrMissLatency
2176 .name(name() + ".overall_avg_mshr_miss_latency")
2177 .desc("average overall mshr miss latency")
2178 .flags(total | nozero | nonan)
2179 ;
2180 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2181 for (int i = 0; i < system->maxMasters(); i++) {
2182 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2183 }
2184
2185 // mshrUncacheable latency formulas
2186 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2187 MemCmd cmd(access_idx);
2188 const string &cstr = cmd.toString();
2189
2190 avgMshrUncacheableLatency[access_idx]
2191 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2192 .desc("average " + cstr + " mshr uncacheable latency")
2193 .flags(total | nozero | nonan)
2194 ;
2195 avgMshrUncacheableLatency[access_idx] =
2196 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2197
2198 for (int i = 0; i < system->maxMasters(); i++) {
2199 avgMshrUncacheableLatency[access_idx].subname(
2200 i, system->getMasterName(i));
2201 }
2202 }
2203
2204 overallAvgMshrUncacheableLatency
2205 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2206 .desc("average overall mshr uncacheable latency")
2207 .flags(total | nozero | nonan)
2208 ;
2209 overallAvgMshrUncacheableLatency =
2210 overallMshrUncacheableLatency / overallMshrUncacheable;
2211 for (int i = 0; i < system->maxMasters(); i++) {
2212 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2213 }
2214
2215 replacements
2216 .name(name() + ".replacements")
2217 .desc("number of replacements")
2218 ;
2219}
2220
2221void
2222BaseCache::regProbePoints()
2223{
2224 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2225 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
482 }
483
484 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
485 // The block was marked not readable while there was a pending
486 // cache maintenance operation, restore its flag.
487 blk->status |= BlkReadable;
488
489 // This was a cache clean operation (without invalidate)
490 // and we have a copy of the block already. Since there
491 // is no invalidation, we can promote targets that don't
492 // require a writable copy
493 mshr->promoteReadable();
494 }
495
496 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
497 // If at this point the referenced block is writable and the
498 // response is not a cache invalidate, we promote targets that
499 // were deferred as we couldn't guarrantee a writable copy
500 mshr->promoteWritable();
501 }
502
503 serviceMSHRTargets(mshr, pkt, blk);
504
505 if (mshr->promoteDeferredTargets()) {
506 // avoid later read getting stale data while write miss is
507 // outstanding.. see comment in timingAccess()
508 if (blk) {
509 blk->status &= ~BlkReadable;
510 }
511 mshrQueue.markPending(mshr);
512 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
513 } else {
514 // while we deallocate an mshr from the queue we still have to
515 // check the isFull condition before and after as we might
516 // have been using the reserved entries already
517 const bool was_full = mshrQueue.isFull();
518 mshrQueue.deallocate(mshr);
519 if (was_full && !mshrQueue.isFull()) {
520 clearBlocked(Blocked_NoMSHRs);
521 }
522
523 // Request the bus for a prefetch if this deallocation freed enough
524 // MSHRs for a prefetch to take place
525 if (prefetcher && mshrQueue.canPrefetch()) {
526 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
527 clockEdge());
528 if (next_pf_time != MaxTick)
529 schedMemSideSendEvent(next_pf_time);
530 }
531 }
532
533 // if we used temp block, check to see if its valid and then clear it out
534 if (blk == tempBlock && tempBlock->isValid()) {
535 evictBlock(blk, writebacks);
536 }
537
538 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
539 // copy writebacks to write buffer
540 doWritebacks(writebacks, forward_time);
541
542 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
543 delete pkt;
544}
545
546
547Tick
548BaseCache::recvAtomic(PacketPtr pkt)
549{
550 // should assert here that there are no outstanding MSHRs or
551 // writebacks... that would mean that someone used an atomic
552 // access in timing mode
553
554 // We use lookupLatency here because it is used to specify the latency
555 // to access.
556 Cycles lat = lookupLatency;
557
558 CacheBlk *blk = nullptr;
559 PacketList writebacks;
560 bool satisfied = access(pkt, blk, lat, writebacks);
561
562 if (pkt->isClean() && blk && blk->isDirty()) {
563 // A cache clean opearation is looking for a dirty
564 // block. If a dirty block is encountered a WriteClean
565 // will update any copies to the path to the memory
566 // until the point of reference.
567 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
568 __func__, pkt->print(), blk->print());
569 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
570 writebacks.push_back(wb_pkt);
571 pkt->setSatisfied();
572 }
573
574 // handle writebacks resulting from the access here to ensure they
575 // logically precede anything happening below
576 doWritebacksAtomic(writebacks);
577 assert(writebacks.empty());
578
579 if (!satisfied) {
580 lat += handleAtomicReqMiss(pkt, blk, writebacks);
581 }
582
583 // Note that we don't invoke the prefetcher at all in atomic mode.
584 // It's not clear how to do it properly, particularly for
585 // prefetchers that aggressively generate prefetch candidates and
586 // rely on bandwidth contention to throttle them; these will tend
587 // to pollute the cache in atomic mode since there is no bandwidth
588 // contention. If we ever do want to enable prefetching in atomic
589 // mode, though, this is the place to do it... see timingAccess()
590 // for an example (though we'd want to issue the prefetch(es)
591 // immediately rather than calling requestMemSideBus() as we do
592 // there).
593
594 // do any writebacks resulting from the response handling
595 doWritebacksAtomic(writebacks);
596
597 // if we used temp block, check to see if its valid and if so
598 // clear it out, but only do so after the call to recvAtomic is
599 // finished so that any downstream observers (such as a snoop
600 // filter), first see the fill, and only then see the eviction
601 if (blk == tempBlock && tempBlock->isValid()) {
602 // the atomic CPU calls recvAtomic for fetch and load/store
603 // sequentuially, and we may already have a tempBlock
604 // writeback from the fetch that we have not yet sent
605 if (tempBlockWriteback) {
606 // if that is the case, write the prevoius one back, and
607 // do not schedule any new event
608 writebackTempBlockAtomic();
609 } else {
610 // the writeback/clean eviction happens after the call to
611 // recvAtomic has finished (but before any successive
612 // calls), so that the response handling from the fill is
613 // allowed to happen first
614 schedule(writebackTempBlockAtomicEvent, curTick());
615 }
616
617 tempBlockWriteback = evictBlock(blk);
618 }
619
620 if (pkt->needsResponse()) {
621 pkt->makeAtomicResponse();
622 }
623
624 return lat * clockPeriod();
625}
626
627void
628BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
629{
630 Addr blk_addr = pkt->getBlockAddr(blkSize);
631 bool is_secure = pkt->isSecure();
632 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
633 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
634
635 pkt->pushLabel(name());
636
637 CacheBlkPrintWrapper cbpw(blk);
638
639 // Note that just because an L2/L3 has valid data doesn't mean an
640 // L1 doesn't have a more up-to-date modified copy that still
641 // needs to be found. As a result we always update the request if
642 // we have it, but only declare it satisfied if we are the owner.
643
644 // see if we have data at all (owned or otherwise)
645 bool have_data = blk && blk->isValid()
646 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
647 blk->data);
648
649 // data we have is dirty if marked as such or if we have an
650 // in-service MSHR that is pending a modified line
651 bool have_dirty =
652 have_data && (blk->isDirty() ||
653 (mshr && mshr->inService && mshr->isPendingModified()));
654
655 bool done = have_dirty ||
656 cpuSidePort.trySatisfyFunctional(pkt) ||
657 mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
658 writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
659 memSidePort.trySatisfyFunctional(pkt);
660
661 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
662 (blk && blk->isValid()) ? "valid " : "",
663 have_data ? "data " : "", done ? "done " : "");
664
665 // We're leaving the cache, so pop cache->name() label
666 pkt->popLabel();
667
668 if (done) {
669 pkt->makeResponse();
670 } else {
671 // if it came as a request from the CPU side then make sure it
672 // continues towards the memory side
673 if (from_cpu_side) {
674 memSidePort.sendFunctional(pkt);
675 } else if (cpuSidePort.isSnooping()) {
676 // if it came from the memory side, it must be a snoop request
677 // and we should only forward it if we are forwarding snoops
678 cpuSidePort.sendFunctionalSnoop(pkt);
679 }
680 }
681}
682
683
684void
685BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
686{
687 assert(pkt->isRequest());
688
689 uint64_t overwrite_val;
690 bool overwrite_mem;
691 uint64_t condition_val64;
692 uint32_t condition_val32;
693
694 int offset = pkt->getOffset(blkSize);
695 uint8_t *blk_data = blk->data + offset;
696
697 assert(sizeof(uint64_t) >= pkt->getSize());
698
699 overwrite_mem = true;
700 // keep a copy of our possible write value, and copy what is at the
701 // memory address into the packet
702 pkt->writeData((uint8_t *)&overwrite_val);
703 pkt->setData(blk_data);
704
705 if (pkt->req->isCondSwap()) {
706 if (pkt->getSize() == sizeof(uint64_t)) {
707 condition_val64 = pkt->req->getExtraData();
708 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
709 sizeof(uint64_t));
710 } else if (pkt->getSize() == sizeof(uint32_t)) {
711 condition_val32 = (uint32_t)pkt->req->getExtraData();
712 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
713 sizeof(uint32_t));
714 } else
715 panic("Invalid size for conditional read/write\n");
716 }
717
718 if (overwrite_mem) {
719 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
720 blk->status |= BlkDirty;
721 }
722}
723
724QueueEntry*
725BaseCache::getNextQueueEntry()
726{
727 // Check both MSHR queue and write buffer for potential requests,
728 // note that null does not mean there is no request, it could
729 // simply be that it is not ready
730 MSHR *miss_mshr = mshrQueue.getNext();
731 WriteQueueEntry *wq_entry = writeBuffer.getNext();
732
733 // If we got a write buffer request ready, first priority is a
734 // full write buffer, otherwise we favour the miss requests
735 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
736 // need to search MSHR queue for conflicting earlier miss.
737 MSHR *conflict_mshr =
738 mshrQueue.findPending(wq_entry->blkAddr,
739 wq_entry->isSecure);
740
741 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
742 // Service misses in order until conflict is cleared.
743 return conflict_mshr;
744
745 // @todo Note that we ignore the ready time of the conflict here
746 }
747
748 // No conflicts; issue write
749 return wq_entry;
750 } else if (miss_mshr) {
751 // need to check for conflicting earlier writeback
752 WriteQueueEntry *conflict_mshr =
753 writeBuffer.findPending(miss_mshr->blkAddr,
754 miss_mshr->isSecure);
755 if (conflict_mshr) {
756 // not sure why we don't check order here... it was in the
757 // original code but commented out.
758
759 // The only way this happens is if we are
760 // doing a write and we didn't have permissions
761 // then subsequently saw a writeback (owned got evicted)
762 // We need to make sure to perform the writeback first
763 // To preserve the dirty data, then we can issue the write
764
765 // should we return wq_entry here instead? I.e. do we
766 // have to flush writes in order? I don't think so... not
767 // for Alpha anyway. Maybe for x86?
768 return conflict_mshr;
769
770 // @todo Note that we ignore the ready time of the conflict here
771 }
772
773 // No conflicts; issue read
774 return miss_mshr;
775 }
776
777 // fall through... no pending requests. Try a prefetch.
778 assert(!miss_mshr && !wq_entry);
779 if (prefetcher && mshrQueue.canPrefetch()) {
780 // If we have a miss queue slot, we can try a prefetch
781 PacketPtr pkt = prefetcher->getPacket();
782 if (pkt) {
783 Addr pf_addr = pkt->getBlockAddr(blkSize);
784 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
785 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
786 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
787 // Update statistic on number of prefetches issued
788 // (hwpf_mshr_misses)
789 assert(pkt->req->masterId() < system->maxMasters());
790 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
791
792 // allocate an MSHR and return it, note
793 // that we send the packet straight away, so do not
794 // schedule the send
795 return allocateMissBuffer(pkt, curTick(), false);
796 } else {
797 // free the request and packet
798 delete pkt;
799 }
800 }
801 }
802
803 return nullptr;
804}
805
806void
807BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
808{
809 assert(pkt->isRequest());
810
811 assert(blk && blk->isValid());
812 // Occasionally this is not true... if we are a lower-level cache
813 // satisfying a string of Read and ReadEx requests from
814 // upper-level caches, a Read will mark the block as shared but we
815 // can satisfy a following ReadEx anyway since we can rely on the
816 // Read requester(s) to have buffered the ReadEx snoop and to
817 // invalidate their blocks after receiving them.
818 // assert(!pkt->needsWritable() || blk->isWritable());
819 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
820
821 // Check RMW operations first since both isRead() and
822 // isWrite() will be true for them
823 if (pkt->cmd == MemCmd::SwapReq) {
824 if (pkt->isAtomicOp()) {
825 // extract data from cache and save it into the data field in
826 // the packet as a return value from this atomic op
827 int offset = tags->extractBlkOffset(pkt->getAddr());
828 uint8_t *blk_data = blk->data + offset;
829 pkt->setData(blk_data);
830
831 // execute AMO operation
832 (*(pkt->getAtomicOp()))(blk_data);
833
834 // set block status to dirty
835 blk->status |= BlkDirty;
836 } else {
837 cmpAndSwap(blk, pkt);
838 }
839 } else if (pkt->isWrite()) {
840 // we have the block in a writable state and can go ahead,
841 // note that the line may be also be considered writable in
842 // downstream caches along the path to memory, but always
843 // Exclusive, and never Modified
844 assert(blk->isWritable());
845 // Write or WriteLine at the first cache with block in writable state
846 if (blk->checkWrite(pkt)) {
847 pkt->writeDataToBlock(blk->data, blkSize);
848 }
849 // Always mark the line as dirty (and thus transition to the
850 // Modified state) even if we are a failed StoreCond so we
851 // supply data to any snoops that have appended themselves to
852 // this cache before knowing the store will fail.
853 blk->status |= BlkDirty;
854 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
855 } else if (pkt->isRead()) {
856 if (pkt->isLLSC()) {
857 blk->trackLoadLocked(pkt);
858 }
859
860 // all read responses have a data payload
861 assert(pkt->hasRespData());
862 pkt->setDataFromBlock(blk->data, blkSize);
863 } else if (pkt->isUpgrade()) {
864 // sanity check
865 assert(!pkt->hasSharers());
866
867 if (blk->isDirty()) {
868 // we were in the Owned state, and a cache above us that
869 // has the line in Shared state needs to be made aware
870 // that the data it already has is in fact dirty
871 pkt->setCacheResponding();
872 blk->status &= ~BlkDirty;
873 }
874 } else if (pkt->isClean()) {
875 blk->status &= ~BlkDirty;
876 } else {
877 assert(pkt->isInvalidate());
878 invalidateBlock(blk);
879 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
880 pkt->print());
881 }
882}
883
884/////////////////////////////////////////////////////
885//
886// Access path: requests coming in from the CPU side
887//
888/////////////////////////////////////////////////////
889Cycles
890BaseCache::calculateAccessLatency(const CacheBlk* blk,
891 const Cycles lookup_lat) const
892{
893 Cycles lat(lookup_lat);
894
895 if (blk != nullptr) {
896 // First access tags, then data
897 if (sequentialAccess) {
898 lat += dataLatency;
899 // Latency is dictated by the slowest of tag and data latencies
900 } else {
901 lat = std::max(lookup_lat, dataLatency);
902 }
903
904 // Check if the block to be accessed is available. If not, apply the
905 // access latency on top of when the block is ready to be accessed.
906 const Tick when_ready = blk->getWhenReady();
907 if (when_ready > curTick() &&
908 ticksToCycles(when_ready - curTick()) > lat) {
909 lat += ticksToCycles(when_ready - curTick());
910 }
911 }
912
913 return lat;
914}
915
916bool
917BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
918 PacketList &writebacks)
919{
920 // sanity check
921 assert(pkt->isRequest());
922
923 chatty_assert(!(isReadOnly && pkt->isWrite()),
924 "Should never see a write in a read-only cache %s\n",
925 name());
926
927 // Access block in the tags
928 Cycles tag_latency(0);
929 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
930
931 // Calculate access latency
932 lat = calculateAccessLatency(blk, tag_latency);
933
934 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
935 blk ? "hit " + blk->print() : "miss");
936
937 if (pkt->req->isCacheMaintenance()) {
938 // A cache maintenance operation is always forwarded to the
939 // memory below even if the block is found in dirty state.
940
941 // We defer any changes to the state of the block until we
942 // create and mark as in service the mshr for the downstream
943 // packet.
944 return false;
945 }
946
947 if (pkt->isEviction()) {
948 // We check for presence of block in above caches before issuing
949 // Writeback or CleanEvict to write buffer. Therefore the only
950 // possible cases can be of a CleanEvict packet coming from above
951 // encountering a Writeback generated in this cache peer cache and
952 // waiting in the write buffer. Cases of upper level peer caches
953 // generating CleanEvict and Writeback or simply CleanEvict and
954 // CleanEvict almost simultaneously will be caught by snoops sent out
955 // by crossbar.
956 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
957 pkt->isSecure());
958 if (wb_entry) {
959 assert(wb_entry->getNumTargets() == 1);
960 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
961 assert(wbPkt->isWriteback());
962
963 if (pkt->isCleanEviction()) {
964 // The CleanEvict and WritebackClean snoops into other
965 // peer caches of the same level while traversing the
966 // crossbar. If a copy of the block is found, the
967 // packet is deleted in the crossbar. Hence, none of
968 // the other upper level caches connected to this
969 // cache have the block, so we can clear the
970 // BLOCK_CACHED flag in the Writeback if set and
971 // discard the CleanEvict by returning true.
972 wbPkt->clearBlockCached();
973 return true;
974 } else {
975 assert(pkt->cmd == MemCmd::WritebackDirty);
976 // Dirty writeback from above trumps our clean
977 // writeback... discard here
978 // Note: markInService will remove entry from writeback buffer.
979 markInService(wb_entry);
980 delete wbPkt;
981 }
982 }
983 }
984
985 // Writeback handling is special case. We can write the block into
986 // the cache without having a writeable copy (or any copy at all).
987 if (pkt->isWriteback()) {
988 assert(blkSize == pkt->getSize());
989
990 // we could get a clean writeback while we are having
991 // outstanding accesses to a block, do the simple thing for
992 // now and drop the clean writeback so that we do not upset
993 // any ordering/decisions about ownership already taken
994 if (pkt->cmd == MemCmd::WritebackClean &&
995 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
996 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
997 "dropping\n", pkt->getAddr());
998 return true;
999 }
1000
1001 if (!blk) {
1002 // need to do a replacement
1003 blk = allocateBlock(pkt, writebacks);
1004 if (!blk) {
1005 // no replaceable block available: give up, fwd to next level.
1006 incMissCount(pkt);
1007 return false;
1008 }
1009
1010 blk->status |= BlkReadable;
1011 }
1012 // only mark the block dirty if we got a writeback command,
1013 // and leave it as is for a clean writeback
1014 if (pkt->cmd == MemCmd::WritebackDirty) {
1015 // TODO: the coherent cache can assert(!blk->isDirty());
1016 blk->status |= BlkDirty;
1017 }
1018 // if the packet does not have sharers, it is passing
1019 // writable, and we got the writeback in Modified or Exclusive
1020 // state, if not we are in the Owned or Shared state
1021 if (!pkt->hasSharers()) {
1022 blk->status |= BlkWritable;
1023 }
1024 // nothing else to do; writeback doesn't expect response
1025 assert(!pkt->needsResponse());
1026 pkt->writeDataToBlock(blk->data, blkSize);
1027 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1028 incHitCount(pkt);
1029 // populate the time when the block will be ready to access.
1030 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1031 pkt->payloadDelay);
1032 return true;
1033 } else if (pkt->cmd == MemCmd::CleanEvict) {
1034 if (blk) {
1035 // Found the block in the tags, need to stop CleanEvict from
1036 // propagating further down the hierarchy. Returning true will
1037 // treat the CleanEvict like a satisfied write request and delete
1038 // it.
1039 return true;
1040 }
1041 // We didn't find the block here, propagate the CleanEvict further
1042 // down the memory hierarchy. Returning false will treat the CleanEvict
1043 // like a Writeback which could not find a replaceable block so has to
1044 // go to next level.
1045 return false;
1046 } else if (pkt->cmd == MemCmd::WriteClean) {
1047 // WriteClean handling is a special case. We can allocate a
1048 // block directly if it doesn't exist and we can update the
1049 // block immediately. The WriteClean transfers the ownership
1050 // of the block as well.
1051 assert(blkSize == pkt->getSize());
1052
1053 if (!blk) {
1054 if (pkt->writeThrough()) {
1055 // if this is a write through packet, we don't try to
1056 // allocate if the block is not present
1057 return false;
1058 } else {
1059 // a writeback that misses needs to allocate a new block
1060 blk = allocateBlock(pkt, writebacks);
1061 if (!blk) {
1062 // no replaceable block available: give up, fwd to
1063 // next level.
1064 incMissCount(pkt);
1065 return false;
1066 }
1067
1068 blk->status |= BlkReadable;
1069 }
1070 }
1071
1072 // at this point either this is a writeback or a write-through
1073 // write clean operation and the block is already in this
1074 // cache, we need to update the data and the block flags
1075 assert(blk);
1076 // TODO: the coherent cache can assert(!blk->isDirty());
1077 if (!pkt->writeThrough()) {
1078 blk->status |= BlkDirty;
1079 }
1080 // nothing else to do; writeback doesn't expect response
1081 assert(!pkt->needsResponse());
1082 pkt->writeDataToBlock(blk->data, blkSize);
1083 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1084
1085 incHitCount(pkt);
1086 // populate the time when the block will be ready to access.
1087 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1088 pkt->payloadDelay);
1089 // if this a write-through packet it will be sent to cache
1090 // below
1091 return !pkt->writeThrough();
1092 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1093 blk->isReadable())) {
1094 // OK to satisfy access
1095 incHitCount(pkt);
1096 satisfyRequest(pkt, blk);
1097 maintainClusivity(pkt->fromCache(), blk);
1098
1099 return true;
1100 }
1101
1102 // Can't satisfy access normally... either no block (blk == nullptr)
1103 // or have block but need writable
1104
1105 incMissCount(pkt);
1106
1107 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1108 // complete miss on store conditional... just give up now
1109 pkt->req->setExtraData(0);
1110 return true;
1111 }
1112
1113 return false;
1114}
1115
1116void
1117BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1118{
1119 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1120 clusivity == Enums::mostly_excl) {
1121 // if we have responded to a cache, and our block is still
1122 // valid, but not dirty, and this cache is mostly exclusive
1123 // with respect to the cache above, drop the block
1124 invalidateBlock(blk);
1125 }
1126}
1127
1128CacheBlk*
1129BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1130 bool allocate)
1131{
1132 assert(pkt->isResponse());
1133 Addr addr = pkt->getAddr();
1134 bool is_secure = pkt->isSecure();
1135#if TRACING_ON
1136 CacheBlk::State old_state = blk ? blk->status : 0;
1137#endif
1138
1139 // When handling a fill, we should have no writes to this line.
1140 assert(addr == pkt->getBlockAddr(blkSize));
1141 assert(!writeBuffer.findMatch(addr, is_secure));
1142
1143 if (!blk) {
1144 // better have read new data...
1145 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1146
1147 // need to do a replacement if allocating, otherwise we stick
1148 // with the temporary storage
1149 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1150
1151 if (!blk) {
1152 // No replaceable block or a mostly exclusive
1153 // cache... just use temporary storage to complete the
1154 // current request and then get rid of it
1155 blk = tempBlock;
1156 tempBlock->insert(addr, is_secure);
1157 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1158 is_secure ? "s" : "ns");
1159 }
1160 } else {
1161 // existing block... probably an upgrade
1162 // don't clear block status... if block is already dirty we
1163 // don't want to lose that
1164 }
1165
1166 // Block is guaranteed to be valid at this point
1167 assert(blk->isValid());
1168 assert(blk->isSecure() == is_secure);
1169 assert(regenerateBlkAddr(blk) == addr);
1170
1171 blk->status |= BlkReadable;
1172
1173 // sanity check for whole-line writes, which should always be
1174 // marked as writable as part of the fill, and then later marked
1175 // dirty as part of satisfyRequest
1176 if (pkt->cmd == MemCmd::InvalidateResp) {
1177 assert(!pkt->hasSharers());
1178 }
1179
1180 // here we deal with setting the appropriate state of the line,
1181 // and we start by looking at the hasSharers flag, and ignore the
1182 // cacheResponding flag (normally signalling dirty data) if the
1183 // packet has sharers, thus the line is never allocated as Owned
1184 // (dirty but not writable), and always ends up being either
1185 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1186 // for more details
1187 if (!pkt->hasSharers()) {
1188 // we could get a writable line from memory (rather than a
1189 // cache) even in a read-only cache, note that we set this bit
1190 // even for a read-only cache, possibly revisit this decision
1191 blk->status |= BlkWritable;
1192
1193 // check if we got this via cache-to-cache transfer (i.e., from a
1194 // cache that had the block in Modified or Owned state)
1195 if (pkt->cacheResponding()) {
1196 // we got the block in Modified state, and invalidated the
1197 // owners copy
1198 blk->status |= BlkDirty;
1199
1200 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1201 "in read-only cache %s\n", name());
1202 }
1203 }
1204
1205 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1206 addr, is_secure ? "s" : "ns", old_state, blk->print());
1207
1208 // if we got new data, copy it in (checking for a read response
1209 // and a response that has data is the same in the end)
1210 if (pkt->isRead()) {
1211 // sanity checks
1212 assert(pkt->hasData());
1213 assert(pkt->getSize() == blkSize);
1214
1215 pkt->writeDataToBlock(blk->data, blkSize);
1216 }
1217 // We pay for fillLatency here.
1218 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay);
1219
1220 return blk;
1221}
1222
1223CacheBlk*
1224BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1225{
1226 // Get address
1227 const Addr addr = pkt->getAddr();
1228
1229 // Get secure bit
1230 const bool is_secure = pkt->isSecure();
1231
1232 // Find replacement victim
1233 std::vector<CacheBlk*> evict_blks;
1234 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1235
1236 // It is valid to return nullptr if there is no victim
1237 if (!victim)
1238 return nullptr;
1239
1240 // Print victim block's information
1241 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1242
1243 // Check for transient state allocations. If any of the entries listed
1244 // for eviction has a transient state, the allocation fails
1245 for (const auto& blk : evict_blks) {
1246 if (blk->isValid()) {
1247 Addr repl_addr = regenerateBlkAddr(blk);
1248 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1249 if (repl_mshr) {
1250 // must be an outstanding upgrade or clean request
1251 // on a block we're about to replace...
1252 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1253 repl_mshr->isCleaning());
1254
1255 // too hard to replace block with transient state
1256 // allocation failed, block not inserted
1257 return nullptr;
1258 }
1259 }
1260 }
1261
1262 // The victim will be replaced by a new entry, so increase the replacement
1263 // counter if a valid block is being replaced
1264 if (victim->isValid()) {
1265 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1266 "(%s): %s\n", regenerateBlkAddr(victim),
1267 victim->isSecure() ? "s" : "ns",
1268 addr, is_secure ? "s" : "ns",
1269 victim->isDirty() ? "writeback" : "clean");
1270
1271 replacements++;
1272 }
1273
1274 // Evict valid blocks associated to this victim block
1275 for (const auto& blk : evict_blks) {
1276 if (blk->isValid()) {
1277 if (blk->wasPrefetched()) {
1278 unusedPrefetches++;
1279 }
1280
1281 evictBlock(blk, writebacks);
1282 }
1283 }
1284
1285 // Insert new block at victimized entry
1286 tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1287 pkt->req->taskId(), victim);
1288
1289 return victim;
1290}
1291
1292void
1293BaseCache::invalidateBlock(CacheBlk *blk)
1294{
1295 // If handling a block present in the Tags, let it do its invalidation
1296 // process, which will update stats and invalidate the block itself
1297 if (blk != tempBlock) {
1298 tags->invalidate(blk);
1299 } else {
1300 tempBlock->invalidate();
1301 }
1302}
1303
1304void
1305BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1306{
1307 PacketPtr pkt = evictBlock(blk);
1308 if (pkt) {
1309 writebacks.push_back(pkt);
1310 }
1311}
1312
1313PacketPtr
1314BaseCache::writebackBlk(CacheBlk *blk)
1315{
1316 chatty_assert(!isReadOnly || writebackClean,
1317 "Writeback from read-only cache");
1318 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1319
1320 writebacks[Request::wbMasterId]++;
1321
1322 RequestPtr req = std::make_shared<Request>(
1323 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1324
1325 if (blk->isSecure())
1326 req->setFlags(Request::SECURE);
1327
1328 req->taskId(blk->task_id);
1329
1330 PacketPtr pkt =
1331 new Packet(req, blk->isDirty() ?
1332 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1333
1334 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1335 pkt->print(), blk->isWritable(), blk->isDirty());
1336
1337 if (blk->isWritable()) {
1338 // not asserting shared means we pass the block in modified
1339 // state, mark our own block non-writeable
1340 blk->status &= ~BlkWritable;
1341 } else {
1342 // we are in the Owned state, tell the receiver
1343 pkt->setHasSharers();
1344 }
1345
1346 // make sure the block is not marked dirty
1347 blk->status &= ~BlkDirty;
1348
1349 pkt->allocate();
1350 pkt->setDataFromBlock(blk->data, blkSize);
1351
1352 return pkt;
1353}
1354
1355PacketPtr
1356BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1357{
1358 RequestPtr req = std::make_shared<Request>(
1359 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1360
1361 if (blk->isSecure()) {
1362 req->setFlags(Request::SECURE);
1363 }
1364 req->taskId(blk->task_id);
1365
1366 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1367
1368 if (dest) {
1369 req->setFlags(dest);
1370 pkt->setWriteThrough();
1371 }
1372
1373 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1374 blk->isWritable(), blk->isDirty());
1375
1376 if (blk->isWritable()) {
1377 // not asserting shared means we pass the block in modified
1378 // state, mark our own block non-writeable
1379 blk->status &= ~BlkWritable;
1380 } else {
1381 // we are in the Owned state, tell the receiver
1382 pkt->setHasSharers();
1383 }
1384
1385 // make sure the block is not marked dirty
1386 blk->status &= ~BlkDirty;
1387
1388 pkt->allocate();
1389 pkt->setDataFromBlock(blk->data, blkSize);
1390
1391 return pkt;
1392}
1393
1394
1395void
1396BaseCache::memWriteback()
1397{
1398 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1399}
1400
1401void
1402BaseCache::memInvalidate()
1403{
1404 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1405}
1406
1407bool
1408BaseCache::isDirty() const
1409{
1410 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1411}
1412
1413bool
1414BaseCache::coalesce() const
1415{
1416 return writeAllocator && writeAllocator->coalesce();
1417}
1418
1419void
1420BaseCache::writebackVisitor(CacheBlk &blk)
1421{
1422 if (blk.isDirty()) {
1423 assert(blk.isValid());
1424
1425 RequestPtr request = std::make_shared<Request>(
1426 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1427
1428 request->taskId(blk.task_id);
1429 if (blk.isSecure()) {
1430 request->setFlags(Request::SECURE);
1431 }
1432
1433 Packet packet(request, MemCmd::WriteReq);
1434 packet.dataStatic(blk.data);
1435
1436 memSidePort.sendFunctional(&packet);
1437
1438 blk.status &= ~BlkDirty;
1439 }
1440}
1441
1442void
1443BaseCache::invalidateVisitor(CacheBlk &blk)
1444{
1445 if (blk.isDirty())
1446 warn_once("Invalidating dirty cache lines. " \
1447 "Expect things to break.\n");
1448
1449 if (blk.isValid()) {
1450 assert(!blk.isDirty());
1451 invalidateBlock(&blk);
1452 }
1453}
1454
1455Tick
1456BaseCache::nextQueueReadyTime() const
1457{
1458 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1459 writeBuffer.nextReadyTime());
1460
1461 // Don't signal prefetch ready time if no MSHRs available
1462 // Will signal once enoguh MSHRs are deallocated
1463 if (prefetcher && mshrQueue.canPrefetch()) {
1464 nextReady = std::min(nextReady,
1465 prefetcher->nextPrefetchReadyTime());
1466 }
1467
1468 return nextReady;
1469}
1470
1471
1472bool
1473BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1474{
1475 assert(mshr);
1476
1477 // use request from 1st target
1478 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1479
1480 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1481
1482 // if the cache is in write coalescing mode or (additionally) in
1483 // no allocation mode, and we have a write packet with an MSHR
1484 // that is not a whole-line write (due to incompatible flags etc),
1485 // then reset the write mode
1486 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1487 if (!mshr->isWholeLineWrite()) {
1488 // if we are currently write coalescing, hold on the
1489 // MSHR as many cycles extra as we need to completely
1490 // write a cache line
1491 if (writeAllocator->delay(mshr->blkAddr)) {
1492 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1493 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1494 "for write coalescing\n", tgt_pkt->print(), delay);
1495 mshrQueue.delay(mshr, delay);
1496 return false;
1497 } else {
1498 writeAllocator->reset();
1499 }
1500 } else {
1501 writeAllocator->resetDelay(mshr->blkAddr);
1502 }
1503 }
1504
1505 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1506
1507 // either a prefetch that is not present upstream, or a normal
1508 // MSHR request, proceed to get the packet to send downstream
1509 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1510 mshr->isWholeLineWrite());
1511
1512 mshr->isForward = (pkt == nullptr);
1513
1514 if (mshr->isForward) {
1515 // not a cache block request, but a response is expected
1516 // make copy of current packet to forward, keep current
1517 // copy for response handling
1518 pkt = new Packet(tgt_pkt, false, true);
1519 assert(!pkt->isWrite());
1520 }
1521
1522 // play it safe and append (rather than set) the sender state,
1523 // as forwarded packets may already have existing state
1524 pkt->pushSenderState(mshr);
1525
1526 if (pkt->isClean() && blk && blk->isDirty()) {
1527 // A cache clean opearation is looking for a dirty block. Mark
1528 // the packet so that the destination xbar can determine that
1529 // there will be a follow-up write packet as well.
1530 pkt->setSatisfied();
1531 }
1532
1533 if (!memSidePort.sendTimingReq(pkt)) {
1534 // we are awaiting a retry, but we
1535 // delete the packet and will be creating a new packet
1536 // when we get the opportunity
1537 delete pkt;
1538
1539 // note that we have now masked any requestBus and
1540 // schedSendEvent (we will wait for a retry before
1541 // doing anything), and this is so even if we do not
1542 // care about this packet and might override it before
1543 // it gets retried
1544 return true;
1545 } else {
1546 // As part of the call to sendTimingReq the packet is
1547 // forwarded to all neighbouring caches (and any caches
1548 // above them) as a snoop. Thus at this point we know if
1549 // any of the neighbouring caches are responding, and if
1550 // so, we know it is dirty, and we can determine if it is
1551 // being passed as Modified, making our MSHR the ordering
1552 // point
1553 bool pending_modified_resp = !pkt->hasSharers() &&
1554 pkt->cacheResponding();
1555 markInService(mshr, pending_modified_resp);
1556
1557 if (pkt->isClean() && blk && blk->isDirty()) {
1558 // A cache clean opearation is looking for a dirty
1559 // block. If a dirty block is encountered a WriteClean
1560 // will update any copies to the path to the memory
1561 // until the point of reference.
1562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1563 __func__, pkt->print(), blk->print());
1564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1565 pkt->id);
1566 PacketList writebacks;
1567 writebacks.push_back(wb_pkt);
1568 doWritebacks(writebacks, 0);
1569 }
1570
1571 return false;
1572 }
1573}
1574
1575bool
1576BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1577{
1578 assert(wq_entry);
1579
1580 // always a single target for write queue entries
1581 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1582
1583 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1584
1585 // forward as is, both for evictions and uncacheable writes
1586 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1587 // note that we have now masked any requestBus and
1588 // schedSendEvent (we will wait for a retry before
1589 // doing anything), and this is so even if we do not
1590 // care about this packet and might override it before
1591 // it gets retried
1592 return true;
1593 } else {
1594 markInService(wq_entry);
1595 return false;
1596 }
1597}
1598
1599void
1600BaseCache::serialize(CheckpointOut &cp) const
1601{
1602 bool dirty(isDirty());
1603
1604 if (dirty) {
1605 warn("*** The cache still contains dirty data. ***\n");
1606 warn(" Make sure to drain the system using the correct flags.\n");
1607 warn(" This checkpoint will not restore correctly " \
1608 "and dirty data in the cache will be lost!\n");
1609 }
1610
1611 // Since we don't checkpoint the data in the cache, any dirty data
1612 // will be lost when restoring from a checkpoint of a system that
1613 // wasn't drained properly. Flag the checkpoint as invalid if the
1614 // cache contains dirty data.
1615 bool bad_checkpoint(dirty);
1616 SERIALIZE_SCALAR(bad_checkpoint);
1617}
1618
1619void
1620BaseCache::unserialize(CheckpointIn &cp)
1621{
1622 bool bad_checkpoint;
1623 UNSERIALIZE_SCALAR(bad_checkpoint);
1624 if (bad_checkpoint) {
1625 fatal("Restoring from checkpoints with dirty caches is not "
1626 "supported in the classic memory system. Please remove any "
1627 "caches or drain them properly before taking checkpoints.\n");
1628 }
1629}
1630
1631void
1632BaseCache::regStats()
1633{
1634 MemObject::regStats();
1635
1636 using namespace Stats;
1637
1638 // Hit statistics
1639 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1640 MemCmd cmd(access_idx);
1641 const string &cstr = cmd.toString();
1642
1643 hits[access_idx]
1644 .init(system->maxMasters())
1645 .name(name() + "." + cstr + "_hits")
1646 .desc("number of " + cstr + " hits")
1647 .flags(total | nozero | nonan)
1648 ;
1649 for (int i = 0; i < system->maxMasters(); i++) {
1650 hits[access_idx].subname(i, system->getMasterName(i));
1651 }
1652 }
1653
1654// These macros make it easier to sum the right subset of commands and
1655// to change the subset of commands that are considered "demand" vs
1656// "non-demand"
1657#define SUM_DEMAND(s) \
1658 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1659 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1660
1661// should writebacks be included here? prior code was inconsistent...
1662#define SUM_NON_DEMAND(s) \
1663 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1664
1665 demandHits
1666 .name(name() + ".demand_hits")
1667 .desc("number of demand (read+write) hits")
1668 .flags(total | nozero | nonan)
1669 ;
1670 demandHits = SUM_DEMAND(hits);
1671 for (int i = 0; i < system->maxMasters(); i++) {
1672 demandHits.subname(i, system->getMasterName(i));
1673 }
1674
1675 overallHits
1676 .name(name() + ".overall_hits")
1677 .desc("number of overall hits")
1678 .flags(total | nozero | nonan)
1679 ;
1680 overallHits = demandHits + SUM_NON_DEMAND(hits);
1681 for (int i = 0; i < system->maxMasters(); i++) {
1682 overallHits.subname(i, system->getMasterName(i));
1683 }
1684
1685 // Miss statistics
1686 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1687 MemCmd cmd(access_idx);
1688 const string &cstr = cmd.toString();
1689
1690 misses[access_idx]
1691 .init(system->maxMasters())
1692 .name(name() + "." + cstr + "_misses")
1693 .desc("number of " + cstr + " misses")
1694 .flags(total | nozero | nonan)
1695 ;
1696 for (int i = 0; i < system->maxMasters(); i++) {
1697 misses[access_idx].subname(i, system->getMasterName(i));
1698 }
1699 }
1700
1701 demandMisses
1702 .name(name() + ".demand_misses")
1703 .desc("number of demand (read+write) misses")
1704 .flags(total | nozero | nonan)
1705 ;
1706 demandMisses = SUM_DEMAND(misses);
1707 for (int i = 0; i < system->maxMasters(); i++) {
1708 demandMisses.subname(i, system->getMasterName(i));
1709 }
1710
1711 overallMisses
1712 .name(name() + ".overall_misses")
1713 .desc("number of overall misses")
1714 .flags(total | nozero | nonan)
1715 ;
1716 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1717 for (int i = 0; i < system->maxMasters(); i++) {
1718 overallMisses.subname(i, system->getMasterName(i));
1719 }
1720
1721 // Miss latency statistics
1722 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1723 MemCmd cmd(access_idx);
1724 const string &cstr = cmd.toString();
1725
1726 missLatency[access_idx]
1727 .init(system->maxMasters())
1728 .name(name() + "." + cstr + "_miss_latency")
1729 .desc("number of " + cstr + " miss cycles")
1730 .flags(total | nozero | nonan)
1731 ;
1732 for (int i = 0; i < system->maxMasters(); i++) {
1733 missLatency[access_idx].subname(i, system->getMasterName(i));
1734 }
1735 }
1736
1737 demandMissLatency
1738 .name(name() + ".demand_miss_latency")
1739 .desc("number of demand (read+write) miss cycles")
1740 .flags(total | nozero | nonan)
1741 ;
1742 demandMissLatency = SUM_DEMAND(missLatency);
1743 for (int i = 0; i < system->maxMasters(); i++) {
1744 demandMissLatency.subname(i, system->getMasterName(i));
1745 }
1746
1747 overallMissLatency
1748 .name(name() + ".overall_miss_latency")
1749 .desc("number of overall miss cycles")
1750 .flags(total | nozero | nonan)
1751 ;
1752 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1753 for (int i = 0; i < system->maxMasters(); i++) {
1754 overallMissLatency.subname(i, system->getMasterName(i));
1755 }
1756
1757 // access formulas
1758 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1759 MemCmd cmd(access_idx);
1760 const string &cstr = cmd.toString();
1761
1762 accesses[access_idx]
1763 .name(name() + "." + cstr + "_accesses")
1764 .desc("number of " + cstr + " accesses(hits+misses)")
1765 .flags(total | nozero | nonan)
1766 ;
1767 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1768
1769 for (int i = 0; i < system->maxMasters(); i++) {
1770 accesses[access_idx].subname(i, system->getMasterName(i));
1771 }
1772 }
1773
1774 demandAccesses
1775 .name(name() + ".demand_accesses")
1776 .desc("number of demand (read+write) accesses")
1777 .flags(total | nozero | nonan)
1778 ;
1779 demandAccesses = demandHits + demandMisses;
1780 for (int i = 0; i < system->maxMasters(); i++) {
1781 demandAccesses.subname(i, system->getMasterName(i));
1782 }
1783
1784 overallAccesses
1785 .name(name() + ".overall_accesses")
1786 .desc("number of overall (read+write) accesses")
1787 .flags(total | nozero | nonan)
1788 ;
1789 overallAccesses = overallHits + overallMisses;
1790 for (int i = 0; i < system->maxMasters(); i++) {
1791 overallAccesses.subname(i, system->getMasterName(i));
1792 }
1793
1794 // miss rate formulas
1795 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1796 MemCmd cmd(access_idx);
1797 const string &cstr = cmd.toString();
1798
1799 missRate[access_idx]
1800 .name(name() + "." + cstr + "_miss_rate")
1801 .desc("miss rate for " + cstr + " accesses")
1802 .flags(total | nozero | nonan)
1803 ;
1804 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1805
1806 for (int i = 0; i < system->maxMasters(); i++) {
1807 missRate[access_idx].subname(i, system->getMasterName(i));
1808 }
1809 }
1810
1811 demandMissRate
1812 .name(name() + ".demand_miss_rate")
1813 .desc("miss rate for demand accesses")
1814 .flags(total | nozero | nonan)
1815 ;
1816 demandMissRate = demandMisses / demandAccesses;
1817 for (int i = 0; i < system->maxMasters(); i++) {
1818 demandMissRate.subname(i, system->getMasterName(i));
1819 }
1820
1821 overallMissRate
1822 .name(name() + ".overall_miss_rate")
1823 .desc("miss rate for overall accesses")
1824 .flags(total | nozero | nonan)
1825 ;
1826 overallMissRate = overallMisses / overallAccesses;
1827 for (int i = 0; i < system->maxMasters(); i++) {
1828 overallMissRate.subname(i, system->getMasterName(i));
1829 }
1830
1831 // miss latency formulas
1832 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1833 MemCmd cmd(access_idx);
1834 const string &cstr = cmd.toString();
1835
1836 avgMissLatency[access_idx]
1837 .name(name() + "." + cstr + "_avg_miss_latency")
1838 .desc("average " + cstr + " miss latency")
1839 .flags(total | nozero | nonan)
1840 ;
1841 avgMissLatency[access_idx] =
1842 missLatency[access_idx] / misses[access_idx];
1843
1844 for (int i = 0; i < system->maxMasters(); i++) {
1845 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1846 }
1847 }
1848
1849 demandAvgMissLatency
1850 .name(name() + ".demand_avg_miss_latency")
1851 .desc("average overall miss latency")
1852 .flags(total | nozero | nonan)
1853 ;
1854 demandAvgMissLatency = demandMissLatency / demandMisses;
1855 for (int i = 0; i < system->maxMasters(); i++) {
1856 demandAvgMissLatency.subname(i, system->getMasterName(i));
1857 }
1858
1859 overallAvgMissLatency
1860 .name(name() + ".overall_avg_miss_latency")
1861 .desc("average overall miss latency")
1862 .flags(total | nozero | nonan)
1863 ;
1864 overallAvgMissLatency = overallMissLatency / overallMisses;
1865 for (int i = 0; i < system->maxMasters(); i++) {
1866 overallAvgMissLatency.subname(i, system->getMasterName(i));
1867 }
1868
1869 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1870 blocked_cycles
1871 .name(name() + ".blocked_cycles")
1872 .desc("number of cycles access was blocked")
1873 .subname(Blocked_NoMSHRs, "no_mshrs")
1874 .subname(Blocked_NoTargets, "no_targets")
1875 ;
1876
1877
1878 blocked_causes.init(NUM_BLOCKED_CAUSES);
1879 blocked_causes
1880 .name(name() + ".blocked")
1881 .desc("number of cycles access was blocked")
1882 .subname(Blocked_NoMSHRs, "no_mshrs")
1883 .subname(Blocked_NoTargets, "no_targets")
1884 ;
1885
1886 avg_blocked
1887 .name(name() + ".avg_blocked_cycles")
1888 .desc("average number of cycles each access was blocked")
1889 .subname(Blocked_NoMSHRs, "no_mshrs")
1890 .subname(Blocked_NoTargets, "no_targets")
1891 ;
1892
1893 avg_blocked = blocked_cycles / blocked_causes;
1894
1895 unusedPrefetches
1896 .name(name() + ".unused_prefetches")
1897 .desc("number of HardPF blocks evicted w/o reference")
1898 .flags(nozero)
1899 ;
1900
1901 writebacks
1902 .init(system->maxMasters())
1903 .name(name() + ".writebacks")
1904 .desc("number of writebacks")
1905 .flags(total | nozero | nonan)
1906 ;
1907 for (int i = 0; i < system->maxMasters(); i++) {
1908 writebacks.subname(i, system->getMasterName(i));
1909 }
1910
1911 // MSHR statistics
1912 // MSHR hit statistics
1913 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1914 MemCmd cmd(access_idx);
1915 const string &cstr = cmd.toString();
1916
1917 mshr_hits[access_idx]
1918 .init(system->maxMasters())
1919 .name(name() + "." + cstr + "_mshr_hits")
1920 .desc("number of " + cstr + " MSHR hits")
1921 .flags(total | nozero | nonan)
1922 ;
1923 for (int i = 0; i < system->maxMasters(); i++) {
1924 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1925 }
1926 }
1927
1928 demandMshrHits
1929 .name(name() + ".demand_mshr_hits")
1930 .desc("number of demand (read+write) MSHR hits")
1931 .flags(total | nozero | nonan)
1932 ;
1933 demandMshrHits = SUM_DEMAND(mshr_hits);
1934 for (int i = 0; i < system->maxMasters(); i++) {
1935 demandMshrHits.subname(i, system->getMasterName(i));
1936 }
1937
1938 overallMshrHits
1939 .name(name() + ".overall_mshr_hits")
1940 .desc("number of overall MSHR hits")
1941 .flags(total | nozero | nonan)
1942 ;
1943 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1944 for (int i = 0; i < system->maxMasters(); i++) {
1945 overallMshrHits.subname(i, system->getMasterName(i));
1946 }
1947
1948 // MSHR miss statistics
1949 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1950 MemCmd cmd(access_idx);
1951 const string &cstr = cmd.toString();
1952
1953 mshr_misses[access_idx]
1954 .init(system->maxMasters())
1955 .name(name() + "." + cstr + "_mshr_misses")
1956 .desc("number of " + cstr + " MSHR misses")
1957 .flags(total | nozero | nonan)
1958 ;
1959 for (int i = 0; i < system->maxMasters(); i++) {
1960 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1961 }
1962 }
1963
1964 demandMshrMisses
1965 .name(name() + ".demand_mshr_misses")
1966 .desc("number of demand (read+write) MSHR misses")
1967 .flags(total | nozero | nonan)
1968 ;
1969 demandMshrMisses = SUM_DEMAND(mshr_misses);
1970 for (int i = 0; i < system->maxMasters(); i++) {
1971 demandMshrMisses.subname(i, system->getMasterName(i));
1972 }
1973
1974 overallMshrMisses
1975 .name(name() + ".overall_mshr_misses")
1976 .desc("number of overall MSHR misses")
1977 .flags(total | nozero | nonan)
1978 ;
1979 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1980 for (int i = 0; i < system->maxMasters(); i++) {
1981 overallMshrMisses.subname(i, system->getMasterName(i));
1982 }
1983
1984 // MSHR miss latency statistics
1985 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1986 MemCmd cmd(access_idx);
1987 const string &cstr = cmd.toString();
1988
1989 mshr_miss_latency[access_idx]
1990 .init(system->maxMasters())
1991 .name(name() + "." + cstr + "_mshr_miss_latency")
1992 .desc("number of " + cstr + " MSHR miss cycles")
1993 .flags(total | nozero | nonan)
1994 ;
1995 for (int i = 0; i < system->maxMasters(); i++) {
1996 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1997 }
1998 }
1999
2000 demandMshrMissLatency
2001 .name(name() + ".demand_mshr_miss_latency")
2002 .desc("number of demand (read+write) MSHR miss cycles")
2003 .flags(total | nozero | nonan)
2004 ;
2005 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2006 for (int i = 0; i < system->maxMasters(); i++) {
2007 demandMshrMissLatency.subname(i, system->getMasterName(i));
2008 }
2009
2010 overallMshrMissLatency
2011 .name(name() + ".overall_mshr_miss_latency")
2012 .desc("number of overall MSHR miss cycles")
2013 .flags(total | nozero | nonan)
2014 ;
2015 overallMshrMissLatency =
2016 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2017 for (int i = 0; i < system->maxMasters(); i++) {
2018 overallMshrMissLatency.subname(i, system->getMasterName(i));
2019 }
2020
2021 // MSHR uncacheable statistics
2022 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2023 MemCmd cmd(access_idx);
2024 const string &cstr = cmd.toString();
2025
2026 mshr_uncacheable[access_idx]
2027 .init(system->maxMasters())
2028 .name(name() + "." + cstr + "_mshr_uncacheable")
2029 .desc("number of " + cstr + " MSHR uncacheable")
2030 .flags(total | nozero | nonan)
2031 ;
2032 for (int i = 0; i < system->maxMasters(); i++) {
2033 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2034 }
2035 }
2036
2037 overallMshrUncacheable
2038 .name(name() + ".overall_mshr_uncacheable_misses")
2039 .desc("number of overall MSHR uncacheable misses")
2040 .flags(total | nozero | nonan)
2041 ;
2042 overallMshrUncacheable =
2043 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2044 for (int i = 0; i < system->maxMasters(); i++) {
2045 overallMshrUncacheable.subname(i, system->getMasterName(i));
2046 }
2047
2048 // MSHR miss latency statistics
2049 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2050 MemCmd cmd(access_idx);
2051 const string &cstr = cmd.toString();
2052
2053 mshr_uncacheable_lat[access_idx]
2054 .init(system->maxMasters())
2055 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2056 .desc("number of " + cstr + " MSHR uncacheable cycles")
2057 .flags(total | nozero | nonan)
2058 ;
2059 for (int i = 0; i < system->maxMasters(); i++) {
2060 mshr_uncacheable_lat[access_idx].subname(
2061 i, system->getMasterName(i));
2062 }
2063 }
2064
2065 overallMshrUncacheableLatency
2066 .name(name() + ".overall_mshr_uncacheable_latency")
2067 .desc("number of overall MSHR uncacheable cycles")
2068 .flags(total | nozero | nonan)
2069 ;
2070 overallMshrUncacheableLatency =
2071 SUM_DEMAND(mshr_uncacheable_lat) +
2072 SUM_NON_DEMAND(mshr_uncacheable_lat);
2073 for (int i = 0; i < system->maxMasters(); i++) {
2074 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2075 }
2076
2077#if 0
2078 // MSHR access formulas
2079 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2080 MemCmd cmd(access_idx);
2081 const string &cstr = cmd.toString();
2082
2083 mshrAccesses[access_idx]
2084 .name(name() + "." + cstr + "_mshr_accesses")
2085 .desc("number of " + cstr + " mshr accesses(hits+misses)")
2086 .flags(total | nozero | nonan)
2087 ;
2088 mshrAccesses[access_idx] =
2089 mshr_hits[access_idx] + mshr_misses[access_idx]
2090 + mshr_uncacheable[access_idx];
2091 }
2092
2093 demandMshrAccesses
2094 .name(name() + ".demand_mshr_accesses")
2095 .desc("number of demand (read+write) mshr accesses")
2096 .flags(total | nozero | nonan)
2097 ;
2098 demandMshrAccesses = demandMshrHits + demandMshrMisses;
2099
2100 overallMshrAccesses
2101 .name(name() + ".overall_mshr_accesses")
2102 .desc("number of overall (read+write) mshr accesses")
2103 .flags(total | nozero | nonan)
2104 ;
2105 overallMshrAccesses = overallMshrHits + overallMshrMisses
2106 + overallMshrUncacheable;
2107#endif
2108
2109 // MSHR miss rate formulas
2110 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2111 MemCmd cmd(access_idx);
2112 const string &cstr = cmd.toString();
2113
2114 mshrMissRate[access_idx]
2115 .name(name() + "." + cstr + "_mshr_miss_rate")
2116 .desc("mshr miss rate for " + cstr + " accesses")
2117 .flags(total | nozero | nonan)
2118 ;
2119 mshrMissRate[access_idx] =
2120 mshr_misses[access_idx] / accesses[access_idx];
2121
2122 for (int i = 0; i < system->maxMasters(); i++) {
2123 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2124 }
2125 }
2126
2127 demandMshrMissRate
2128 .name(name() + ".demand_mshr_miss_rate")
2129 .desc("mshr miss rate for demand accesses")
2130 .flags(total | nozero | nonan)
2131 ;
2132 demandMshrMissRate = demandMshrMisses / demandAccesses;
2133 for (int i = 0; i < system->maxMasters(); i++) {
2134 demandMshrMissRate.subname(i, system->getMasterName(i));
2135 }
2136
2137 overallMshrMissRate
2138 .name(name() + ".overall_mshr_miss_rate")
2139 .desc("mshr miss rate for overall accesses")
2140 .flags(total | nozero | nonan)
2141 ;
2142 overallMshrMissRate = overallMshrMisses / overallAccesses;
2143 for (int i = 0; i < system->maxMasters(); i++) {
2144 overallMshrMissRate.subname(i, system->getMasterName(i));
2145 }
2146
2147 // mshrMiss latency formulas
2148 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2149 MemCmd cmd(access_idx);
2150 const string &cstr = cmd.toString();
2151
2152 avgMshrMissLatency[access_idx]
2153 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2154 .desc("average " + cstr + " mshr miss latency")
2155 .flags(total | nozero | nonan)
2156 ;
2157 avgMshrMissLatency[access_idx] =
2158 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2159
2160 for (int i = 0; i < system->maxMasters(); i++) {
2161 avgMshrMissLatency[access_idx].subname(
2162 i, system->getMasterName(i));
2163 }
2164 }
2165
2166 demandAvgMshrMissLatency
2167 .name(name() + ".demand_avg_mshr_miss_latency")
2168 .desc("average overall mshr miss latency")
2169 .flags(total | nozero | nonan)
2170 ;
2171 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2172 for (int i = 0; i < system->maxMasters(); i++) {
2173 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2174 }
2175
2176 overallAvgMshrMissLatency
2177 .name(name() + ".overall_avg_mshr_miss_latency")
2178 .desc("average overall mshr miss latency")
2179 .flags(total | nozero | nonan)
2180 ;
2181 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2182 for (int i = 0; i < system->maxMasters(); i++) {
2183 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2184 }
2185
2186 // mshrUncacheable latency formulas
2187 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2188 MemCmd cmd(access_idx);
2189 const string &cstr = cmd.toString();
2190
2191 avgMshrUncacheableLatency[access_idx]
2192 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2193 .desc("average " + cstr + " mshr uncacheable latency")
2194 .flags(total | nozero | nonan)
2195 ;
2196 avgMshrUncacheableLatency[access_idx] =
2197 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2198
2199 for (int i = 0; i < system->maxMasters(); i++) {
2200 avgMshrUncacheableLatency[access_idx].subname(
2201 i, system->getMasterName(i));
2202 }
2203 }
2204
2205 overallAvgMshrUncacheableLatency
2206 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2207 .desc("average overall mshr uncacheable latency")
2208 .flags(total | nozero | nonan)
2209 ;
2210 overallAvgMshrUncacheableLatency =
2211 overallMshrUncacheableLatency / overallMshrUncacheable;
2212 for (int i = 0; i < system->maxMasters(); i++) {
2213 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2214 }
2215
2216 replacements
2217 .name(name() + ".replacements")
2218 .desc("number of replacements")
2219 ;
2220}
2221
2222void
2223BaseCache::regProbePoints()
2224{
2225 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2226 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2227 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2226}
2227
2228///////////////
2229//
2230// CpuSidePort
2231//
2232///////////////
2233bool
2234BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2235{
2236 // Snoops shouldn't happen when bypassing caches
2237 assert(!cache->system->bypassCaches());
2238
2239 assert(pkt->isResponse());
2240
2241 // Express snoop responses from master to slave, e.g., from L1 to L2
2242 cache->recvTimingSnoopResp(pkt);
2243 return true;
2244}
2245
2246
2247bool
2248BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2249{
2250 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2251 // always let express snoop packets through even if blocked
2252 return true;
2253 } else if (blocked || mustSendRetry) {
2254 // either already committed to send a retry, or blocked
2255 mustSendRetry = true;
2256 return false;
2257 }
2258 mustSendRetry = false;
2259 return true;
2260}
2261
2262bool
2263BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2264{
2265 assert(pkt->isRequest());
2266
2267 if (cache->system->bypassCaches()) {
2268 // Just forward the packet if caches are disabled.
2269 // @todo This should really enqueue the packet rather
2270 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2271 assert(success);
2272 return true;
2273 } else if (tryTiming(pkt)) {
2274 cache->recvTimingReq(pkt);
2275 return true;
2276 }
2277 return false;
2278}
2279
2280Tick
2281BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2282{
2283 if (cache->system->bypassCaches()) {
2284 // Forward the request if the system is in cache bypass mode.
2285 return cache->memSidePort.sendAtomic(pkt);
2286 } else {
2287 return cache->recvAtomic(pkt);
2288 }
2289}
2290
2291void
2292BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2293{
2294 if (cache->system->bypassCaches()) {
2295 // The cache should be flushed if we are in cache bypass mode,
2296 // so we don't need to check if we need to update anything.
2297 cache->memSidePort.sendFunctional(pkt);
2298 return;
2299 }
2300
2301 // functional request
2302 cache->functionalAccess(pkt, true);
2303}
2304
2305AddrRangeList
2306BaseCache::CpuSidePort::getAddrRanges() const
2307{
2308 return cache->getAddrRanges();
2309}
2310
2311
2312BaseCache::
2313CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2314 const std::string &_label)
2315 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2316{
2317}
2318
2319///////////////
2320//
2321// MemSidePort
2322//
2323///////////////
2324bool
2325BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2326{
2327 cache->recvTimingResp(pkt);
2328 return true;
2329}
2330
2331// Express snooping requests to memside port
2332void
2333BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2334{
2335 // Snoops shouldn't happen when bypassing caches
2336 assert(!cache->system->bypassCaches());
2337
2338 // handle snooping requests
2339 cache->recvTimingSnoopReq(pkt);
2340}
2341
2342Tick
2343BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2344{
2345 // Snoops shouldn't happen when bypassing caches
2346 assert(!cache->system->bypassCaches());
2347
2348 return cache->recvAtomicSnoop(pkt);
2349}
2350
2351void
2352BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2353{
2354 // Snoops shouldn't happen when bypassing caches
2355 assert(!cache->system->bypassCaches());
2356
2357 // functional snoop (note that in contrast to atomic we don't have
2358 // a specific functionalSnoop method, as they have the same
2359 // behaviour regardless)
2360 cache->functionalAccess(pkt, false);
2361}
2362
2363void
2364BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2365{
2366 // sanity check
2367 assert(!waitingOnRetry);
2368
2369 // there should never be any deferred request packets in the
2370 // queue, instead we resly on the cache to provide the packets
2371 // from the MSHR queue or write queue
2372 assert(deferredPacketReadyTime() == MaxTick);
2373
2374 // check for request packets (requests & writebacks)
2375 QueueEntry* entry = cache.getNextQueueEntry();
2376
2377 if (!entry) {
2378 // can happen if e.g. we attempt a writeback and fail, but
2379 // before the retry, the writeback is eliminated because
2380 // we snoop another cache's ReadEx.
2381 } else {
2382 // let our snoop responses go first if there are responses to
2383 // the same addresses
2384 if (checkConflictingSnoop(entry->blkAddr)) {
2385 return;
2386 }
2387 waitingOnRetry = entry->sendPacket(cache);
2388 }
2389
2390 // if we succeeded and are not waiting for a retry, schedule the
2391 // next send considering when the next queue is ready, note that
2392 // snoop responses have their own packet queue and thus schedule
2393 // their own events
2394 if (!waitingOnRetry) {
2395 schedSendEvent(cache.nextQueueReadyTime());
2396 }
2397}
2398
2399BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2400 BaseCache *_cache,
2401 const std::string &_label)
2402 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2403 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2404 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2405{
2406}
2407
2408void
2409WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2410 Addr blk_addr)
2411{
2412 // check if we are continuing where the last write ended
2413 if (nextAddr == write_addr) {
2414 delayCtr[blk_addr] = delayThreshold;
2415 // stop if we have already saturated
2416 if (mode != WriteMode::NO_ALLOCATE) {
2417 byteCount += write_size;
2418 // switch to streaming mode if we have passed the lower
2419 // threshold
2420 if (mode == WriteMode::ALLOCATE &&
2421 byteCount > coalesceLimit) {
2422 mode = WriteMode::COALESCE;
2423 DPRINTF(Cache, "Switched to write coalescing\n");
2424 } else if (mode == WriteMode::COALESCE &&
2425 byteCount > noAllocateLimit) {
2426 // and continue and switch to non-allocating mode if we
2427 // pass the upper threshold
2428 mode = WriteMode::NO_ALLOCATE;
2429 DPRINTF(Cache, "Switched to write-no-allocate\n");
2430 }
2431 }
2432 } else {
2433 // we did not see a write matching the previous one, start
2434 // over again
2435 byteCount = write_size;
2436 mode = WriteMode::ALLOCATE;
2437 resetDelay(blk_addr);
2438 }
2439 nextAddr = write_addr + write_size;
2440}
2441
2442WriteAllocator*
2443WriteAllocatorParams::create()
2444{
2445 return new WriteAllocator(this);
2446}
2228}
2229
2230///////////////
2231//
2232// CpuSidePort
2233//
2234///////////////
2235bool
2236BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2237{
2238 // Snoops shouldn't happen when bypassing caches
2239 assert(!cache->system->bypassCaches());
2240
2241 assert(pkt->isResponse());
2242
2243 // Express snoop responses from master to slave, e.g., from L1 to L2
2244 cache->recvTimingSnoopResp(pkt);
2245 return true;
2246}
2247
2248
2249bool
2250BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2251{
2252 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2253 // always let express snoop packets through even if blocked
2254 return true;
2255 } else if (blocked || mustSendRetry) {
2256 // either already committed to send a retry, or blocked
2257 mustSendRetry = true;
2258 return false;
2259 }
2260 mustSendRetry = false;
2261 return true;
2262}
2263
2264bool
2265BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2266{
2267 assert(pkt->isRequest());
2268
2269 if (cache->system->bypassCaches()) {
2270 // Just forward the packet if caches are disabled.
2271 // @todo This should really enqueue the packet rather
2272 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2273 assert(success);
2274 return true;
2275 } else if (tryTiming(pkt)) {
2276 cache->recvTimingReq(pkt);
2277 return true;
2278 }
2279 return false;
2280}
2281
2282Tick
2283BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2284{
2285 if (cache->system->bypassCaches()) {
2286 // Forward the request if the system is in cache bypass mode.
2287 return cache->memSidePort.sendAtomic(pkt);
2288 } else {
2289 return cache->recvAtomic(pkt);
2290 }
2291}
2292
2293void
2294BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2295{
2296 if (cache->system->bypassCaches()) {
2297 // The cache should be flushed if we are in cache bypass mode,
2298 // so we don't need to check if we need to update anything.
2299 cache->memSidePort.sendFunctional(pkt);
2300 return;
2301 }
2302
2303 // functional request
2304 cache->functionalAccess(pkt, true);
2305}
2306
2307AddrRangeList
2308BaseCache::CpuSidePort::getAddrRanges() const
2309{
2310 return cache->getAddrRanges();
2311}
2312
2313
2314BaseCache::
2315CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2316 const std::string &_label)
2317 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2318{
2319}
2320
2321///////////////
2322//
2323// MemSidePort
2324//
2325///////////////
2326bool
2327BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2328{
2329 cache->recvTimingResp(pkt);
2330 return true;
2331}
2332
2333// Express snooping requests to memside port
2334void
2335BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2336{
2337 // Snoops shouldn't happen when bypassing caches
2338 assert(!cache->system->bypassCaches());
2339
2340 // handle snooping requests
2341 cache->recvTimingSnoopReq(pkt);
2342}
2343
2344Tick
2345BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2346{
2347 // Snoops shouldn't happen when bypassing caches
2348 assert(!cache->system->bypassCaches());
2349
2350 return cache->recvAtomicSnoop(pkt);
2351}
2352
2353void
2354BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2355{
2356 // Snoops shouldn't happen when bypassing caches
2357 assert(!cache->system->bypassCaches());
2358
2359 // functional snoop (note that in contrast to atomic we don't have
2360 // a specific functionalSnoop method, as they have the same
2361 // behaviour regardless)
2362 cache->functionalAccess(pkt, false);
2363}
2364
2365void
2366BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2367{
2368 // sanity check
2369 assert(!waitingOnRetry);
2370
2371 // there should never be any deferred request packets in the
2372 // queue, instead we resly on the cache to provide the packets
2373 // from the MSHR queue or write queue
2374 assert(deferredPacketReadyTime() == MaxTick);
2375
2376 // check for request packets (requests & writebacks)
2377 QueueEntry* entry = cache.getNextQueueEntry();
2378
2379 if (!entry) {
2380 // can happen if e.g. we attempt a writeback and fail, but
2381 // before the retry, the writeback is eliminated because
2382 // we snoop another cache's ReadEx.
2383 } else {
2384 // let our snoop responses go first if there are responses to
2385 // the same addresses
2386 if (checkConflictingSnoop(entry->blkAddr)) {
2387 return;
2388 }
2389 waitingOnRetry = entry->sendPacket(cache);
2390 }
2391
2392 // if we succeeded and are not waiting for a retry, schedule the
2393 // next send considering when the next queue is ready, note that
2394 // snoop responses have their own packet queue and thus schedule
2395 // their own events
2396 if (!waitingOnRetry) {
2397 schedSendEvent(cache.nextQueueReadyTime());
2398 }
2399}
2400
2401BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2402 BaseCache *_cache,
2403 const std::string &_label)
2404 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2405 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2406 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2407{
2408}
2409
2410void
2411WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2412 Addr blk_addr)
2413{
2414 // check if we are continuing where the last write ended
2415 if (nextAddr == write_addr) {
2416 delayCtr[blk_addr] = delayThreshold;
2417 // stop if we have already saturated
2418 if (mode != WriteMode::NO_ALLOCATE) {
2419 byteCount += write_size;
2420 // switch to streaming mode if we have passed the lower
2421 // threshold
2422 if (mode == WriteMode::ALLOCATE &&
2423 byteCount > coalesceLimit) {
2424 mode = WriteMode::COALESCE;
2425 DPRINTF(Cache, "Switched to write coalescing\n");
2426 } else if (mode == WriteMode::COALESCE &&
2427 byteCount > noAllocateLimit) {
2428 // and continue and switch to non-allocating mode if we
2429 // pass the upper threshold
2430 mode = WriteMode::NO_ALLOCATE;
2431 DPRINTF(Cache, "Switched to write-no-allocate\n");
2432 }
2433 }
2434 } else {
2435 // we did not see a write matching the previous one, start
2436 // over again
2437 byteCount = write_size;
2438 mode = WriteMode::ALLOCATE;
2439 resetDelay(blk_addr);
2440 }
2441 nextAddr = write_addr + write_size;
2442}
2443
2444WriteAllocator*
2445WriteAllocatorParams::create()
2446{
2447 return new WriteAllocator(this);
2448}