base.cc (13745:1cf82fb6c4ab) base.cc (13746:723109f11d56)
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheRepl.hh"
56#include "debug/CacheVerbose.hh"
57#include "mem/cache/mshr.hh"
58#include "mem/cache/prefetch/base.hh"
59#include "mem/cache/queue_entry.hh"
60#include "params/BaseCache.hh"
61#include "params/WriteAllocator.hh"
62#include "sim/core.hh"
63
64class BaseMasterPort;
65class BaseSlavePort;
66
67using namespace std;
68
69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70 BaseCache *_cache,
71 const std::string &_label)
72 : QueuedSlavePort(_name, _cache, queue),
73 queue(*_cache, *this, true, _label),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name)
76{
77}
78
79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
80 : MemObject(p),
81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
85 tags(p->tags),
86 prefetcher(p->prefetcher),
87 writeAllocator(p->write_allocator),
88 writebackClean(p->writeback_clean),
89 tempBlockWriteback(nullptr),
90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
91 name(), false,
92 EventBase::Delayed_Writeback_Pri),
93 blkSize(blk_size),
94 lookupLatency(p->tag_latency),
95 dataLatency(p->data_latency),
96 forwardLatency(p->tag_latency),
97 fillLatency(p->data_latency),
98 responseLatency(p->response_latency),
99 sequentialAccess(p->sequential_access),
100 numTarget(p->tgts_per_mshr),
101 forwardSnoops(true),
102 clusivity(p->clusivity),
103 isReadOnly(p->is_read_only),
104 blocked(0),
105 order(0),
106 noTargetMSHR(nullptr),
107 missCount(p->max_miss_count),
108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
109 system(p->system)
110{
111 // the MSHR queue has no reserve entries as we check the MSHR
112 // queue on every single allocation, whereas the write queue has
113 // as many reserve entries as we have MSHRs, since every MSHR may
114 // eventually require a writeback, and we do not check the write
115 // buffer before committing to an MSHR
116
117 // forward snoops is overridden in init() once we can query
118 // whether the connected master is actually snooping or not
119
120 tempBlock = new TempCacheBlk(blkSize);
121
122 tags->tagsInit();
123 if (prefetcher)
124 prefetcher->setCache(this);
125}
126
127BaseCache::~BaseCache()
128{
129 delete tempBlock;
130}
131
132void
133BaseCache::CacheSlavePort::setBlocked()
134{
135 assert(!blocked);
136 DPRINTF(CachePort, "Port is blocking new requests\n");
137 blocked = true;
138 // if we already scheduled a retry in this cycle, but it has not yet
139 // happened, cancel it
140 if (sendRetryEvent.scheduled()) {
141 owner.deschedule(sendRetryEvent);
142 DPRINTF(CachePort, "Port descheduled retry\n");
143 mustSendRetry = true;
144 }
145}
146
147void
148BaseCache::CacheSlavePort::clearBlocked()
149{
150 assert(blocked);
151 DPRINTF(CachePort, "Port is accepting new requests\n");
152 blocked = false;
153 if (mustSendRetry) {
154 // @TODO: need to find a better time (next cycle?)
155 owner.schedule(sendRetryEvent, curTick() + 1);
156 }
157}
158
159void
160BaseCache::CacheSlavePort::processSendRetry()
161{
162 DPRINTF(CachePort, "Port is sending retry\n");
163
164 // reset the flag and call retry
165 mustSendRetry = false;
166 sendRetryReq();
167}
168
169Addr
170BaseCache::regenerateBlkAddr(CacheBlk* blk)
171{
172 if (blk != tempBlock) {
173 return tags->regenerateBlkAddr(blk);
174 } else {
175 return tempBlock->getAddr();
176 }
177}
178
179void
180BaseCache::init()
181{
182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
183 fatal("Cache ports on %s are not connected\n", name());
184 cpuSidePort.sendRangeChange();
185 forwardSnoops = cpuSidePort.isSnooping();
186}
187
188BaseMasterPort &
189BaseCache::getMasterPort(const std::string &if_name, PortID idx)
190{
191 if (if_name == "mem_side") {
192 return memSidePort;
193 } else {
194 return MemObject::getMasterPort(if_name, idx);
195 }
196}
197
198BaseSlavePort &
199BaseCache::getSlavePort(const std::string &if_name, PortID idx)
200{
201 if (if_name == "cpu_side") {
202 return cpuSidePort;
203 } else {
204 return MemObject::getSlavePort(if_name, idx);
205 }
206}
207
208bool
209BaseCache::inRange(Addr addr) const
210{
211 for (const auto& r : addrRanges) {
212 if (r.contains(addr)) {
213 return true;
214 }
215 }
216 return false;
217}
218
219void
220BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
221{
222 if (pkt->needsResponse()) {
223 // These delays should have been consumed by now
224 assert(pkt->headerDelay == 0);
225 assert(pkt->payloadDelay == 0);
226
227 pkt->makeTimingResponse();
228
229 // In this case we are considering request_time that takes
230 // into account the delay of the xbar, if any, and just
231 // lat, neglecting responseLatency, modelling hit latency
232 // just as the value of lat overriden by access(), which calls
233 // the calculateAccessLatency() function.
234 cpuSidePort.schedTimingResp(pkt, request_time);
235 } else {
236 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
237 pkt->print());
238
239 // queue the packet for deletion, as the sending cache is
240 // still relying on it; if the block is found in access(),
241 // CleanEvict and Writeback messages will be deleted
242 // here as well
243 pendingDelete.reset(pkt);
244 }
245}
246
247void
248BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
249 Tick forward_time, Tick request_time)
250{
251 if (writeAllocator &&
252 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
253 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
254 pkt->getBlockAddr(blkSize));
255 }
256
257 if (mshr) {
258 /// MSHR hit
259 /// @note writebacks will be checked in getNextMSHR()
260 /// for any conflicting requests to the same block
261
262 //@todo remove hw_pf here
263
264 // Coalesce unless it was a software prefetch (see above).
265 if (pkt) {
266 assert(!pkt->isWriteback());
267 // CleanEvicts corresponding to blocks which have
268 // outstanding requests in MSHRs are simply sunk here
269 if (pkt->cmd == MemCmd::CleanEvict) {
270 pendingDelete.reset(pkt);
271 } else if (pkt->cmd == MemCmd::WriteClean) {
272 // A WriteClean should never coalesce with any
273 // outstanding cache maintenance requests.
274
275 // We use forward_time here because there is an
276 // uncached memory write, forwarded to WriteBuffer.
277 allocateWriteBuffer(pkt, forward_time);
278 } else {
279 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
280 pkt->print());
281
282 assert(pkt->req->masterId() < system->maxMasters());
283 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
284
285 // We use forward_time here because it is the same
286 // considering new targets. We have multiple
287 // requests for the same address here. It
288 // specifies the latency to allocate an internal
289 // buffer and to schedule an event to the queued
290 // port and also takes into account the additional
291 // delay of the xbar.
292 mshr->allocateTarget(pkt, forward_time, order++,
293 allocOnFill(pkt->cmd));
294 if (mshr->getNumTargets() == numTarget) {
295 noTargetMSHR = mshr;
296 setBlocked(Blocked_NoTargets);
297 // need to be careful with this... if this mshr isn't
298 // ready yet (i.e. time > curTick()), we don't want to
299 // move it ahead of mshrs that are ready
300 // mshrQueue.moveToFront(mshr);
301 }
302 }
303 }
304 } else {
305 // no MSHR
306 assert(pkt->req->masterId() < system->maxMasters());
307 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
308
309 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
310 // We use forward_time here because there is an
311 // writeback or writeclean, forwarded to WriteBuffer.
312 allocateWriteBuffer(pkt, forward_time);
313 } else {
314 if (blk && blk->isValid()) {
315 // If we have a write miss to a valid block, we
316 // need to mark the block non-readable. Otherwise
317 // if we allow reads while there's an outstanding
318 // write miss, the read could return stale data
319 // out of the cache block... a more aggressive
320 // system could detect the overlap (if any) and
321 // forward data out of the MSHRs, but we don't do
322 // that yet. Note that we do need to leave the
323 // block valid so that it stays in the cache, in
324 // case we get an upgrade response (and hence no
325 // new data) when the write miss completes.
326 // As long as CPUs do proper store/load forwarding
327 // internally, and have a sufficiently weak memory
328 // model, this is probably unnecessary, but at some
329 // point it must have seemed like we needed it...
330 assert((pkt->needsWritable() && !blk->isWritable()) ||
331 pkt->req->isCacheMaintenance());
332 blk->status &= ~BlkReadable;
333 }
334 // Here we are using forward_time, modelling the latency of
335 // a miss (outbound) just as forwardLatency, neglecting the
336 // lookupLatency component.
337 allocateMissBuffer(pkt, forward_time);
338 }
339 }
340}
341
342void
343BaseCache::recvTimingReq(PacketPtr pkt)
344{
345 // anything that is merely forwarded pays for the forward latency and
346 // the delay provided by the crossbar
347 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
348
349 Cycles lat;
350 CacheBlk *blk = nullptr;
351 bool satisfied = false;
352 {
353 PacketList writebacks;
354 // Note that lat is passed by reference here. The function
355 // access() will set the lat value.
356 satisfied = access(pkt, blk, lat, writebacks);
357
358 // copy writebacks to write buffer here to ensure they logically
359 // precede anything happening below
360 doWritebacks(writebacks, forward_time);
361 }
362
363 // Here we charge the headerDelay that takes into account the latencies
364 // of the bus, if the packet comes from it.
365 // The latency charged is just the value set by the access() function.
366 // In case of a hit we are neglecting response latency.
367 // In case of a miss we are neglecting forward latency.
1/*
2 * Copyright (c) 2012-2013, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49#include "mem/cache/base.hh"
50
51#include "base/compiler.hh"
52#include "base/logging.hh"
53#include "debug/Cache.hh"
54#include "debug/CachePort.hh"
55#include "debug/CacheRepl.hh"
56#include "debug/CacheVerbose.hh"
57#include "mem/cache/mshr.hh"
58#include "mem/cache/prefetch/base.hh"
59#include "mem/cache/queue_entry.hh"
60#include "params/BaseCache.hh"
61#include "params/WriteAllocator.hh"
62#include "sim/core.hh"
63
64class BaseMasterPort;
65class BaseSlavePort;
66
67using namespace std;
68
69BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70 BaseCache *_cache,
71 const std::string &_label)
72 : QueuedSlavePort(_name, _cache, queue),
73 queue(*_cache, *this, true, _label),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name)
76{
77}
78
79BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
80 : MemObject(p),
81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
85 tags(p->tags),
86 prefetcher(p->prefetcher),
87 writeAllocator(p->write_allocator),
88 writebackClean(p->writeback_clean),
89 tempBlockWriteback(nullptr),
90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
91 name(), false,
92 EventBase::Delayed_Writeback_Pri),
93 blkSize(blk_size),
94 lookupLatency(p->tag_latency),
95 dataLatency(p->data_latency),
96 forwardLatency(p->tag_latency),
97 fillLatency(p->data_latency),
98 responseLatency(p->response_latency),
99 sequentialAccess(p->sequential_access),
100 numTarget(p->tgts_per_mshr),
101 forwardSnoops(true),
102 clusivity(p->clusivity),
103 isReadOnly(p->is_read_only),
104 blocked(0),
105 order(0),
106 noTargetMSHR(nullptr),
107 missCount(p->max_miss_count),
108 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
109 system(p->system)
110{
111 // the MSHR queue has no reserve entries as we check the MSHR
112 // queue on every single allocation, whereas the write queue has
113 // as many reserve entries as we have MSHRs, since every MSHR may
114 // eventually require a writeback, and we do not check the write
115 // buffer before committing to an MSHR
116
117 // forward snoops is overridden in init() once we can query
118 // whether the connected master is actually snooping or not
119
120 tempBlock = new TempCacheBlk(blkSize);
121
122 tags->tagsInit();
123 if (prefetcher)
124 prefetcher->setCache(this);
125}
126
127BaseCache::~BaseCache()
128{
129 delete tempBlock;
130}
131
132void
133BaseCache::CacheSlavePort::setBlocked()
134{
135 assert(!blocked);
136 DPRINTF(CachePort, "Port is blocking new requests\n");
137 blocked = true;
138 // if we already scheduled a retry in this cycle, but it has not yet
139 // happened, cancel it
140 if (sendRetryEvent.scheduled()) {
141 owner.deschedule(sendRetryEvent);
142 DPRINTF(CachePort, "Port descheduled retry\n");
143 mustSendRetry = true;
144 }
145}
146
147void
148BaseCache::CacheSlavePort::clearBlocked()
149{
150 assert(blocked);
151 DPRINTF(CachePort, "Port is accepting new requests\n");
152 blocked = false;
153 if (mustSendRetry) {
154 // @TODO: need to find a better time (next cycle?)
155 owner.schedule(sendRetryEvent, curTick() + 1);
156 }
157}
158
159void
160BaseCache::CacheSlavePort::processSendRetry()
161{
162 DPRINTF(CachePort, "Port is sending retry\n");
163
164 // reset the flag and call retry
165 mustSendRetry = false;
166 sendRetryReq();
167}
168
169Addr
170BaseCache::regenerateBlkAddr(CacheBlk* blk)
171{
172 if (blk != tempBlock) {
173 return tags->regenerateBlkAddr(blk);
174 } else {
175 return tempBlock->getAddr();
176 }
177}
178
179void
180BaseCache::init()
181{
182 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
183 fatal("Cache ports on %s are not connected\n", name());
184 cpuSidePort.sendRangeChange();
185 forwardSnoops = cpuSidePort.isSnooping();
186}
187
188BaseMasterPort &
189BaseCache::getMasterPort(const std::string &if_name, PortID idx)
190{
191 if (if_name == "mem_side") {
192 return memSidePort;
193 } else {
194 return MemObject::getMasterPort(if_name, idx);
195 }
196}
197
198BaseSlavePort &
199BaseCache::getSlavePort(const std::string &if_name, PortID idx)
200{
201 if (if_name == "cpu_side") {
202 return cpuSidePort;
203 } else {
204 return MemObject::getSlavePort(if_name, idx);
205 }
206}
207
208bool
209BaseCache::inRange(Addr addr) const
210{
211 for (const auto& r : addrRanges) {
212 if (r.contains(addr)) {
213 return true;
214 }
215 }
216 return false;
217}
218
219void
220BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
221{
222 if (pkt->needsResponse()) {
223 // These delays should have been consumed by now
224 assert(pkt->headerDelay == 0);
225 assert(pkt->payloadDelay == 0);
226
227 pkt->makeTimingResponse();
228
229 // In this case we are considering request_time that takes
230 // into account the delay of the xbar, if any, and just
231 // lat, neglecting responseLatency, modelling hit latency
232 // just as the value of lat overriden by access(), which calls
233 // the calculateAccessLatency() function.
234 cpuSidePort.schedTimingResp(pkt, request_time);
235 } else {
236 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
237 pkt->print());
238
239 // queue the packet for deletion, as the sending cache is
240 // still relying on it; if the block is found in access(),
241 // CleanEvict and Writeback messages will be deleted
242 // here as well
243 pendingDelete.reset(pkt);
244 }
245}
246
247void
248BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
249 Tick forward_time, Tick request_time)
250{
251 if (writeAllocator &&
252 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
253 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
254 pkt->getBlockAddr(blkSize));
255 }
256
257 if (mshr) {
258 /// MSHR hit
259 /// @note writebacks will be checked in getNextMSHR()
260 /// for any conflicting requests to the same block
261
262 //@todo remove hw_pf here
263
264 // Coalesce unless it was a software prefetch (see above).
265 if (pkt) {
266 assert(!pkt->isWriteback());
267 // CleanEvicts corresponding to blocks which have
268 // outstanding requests in MSHRs are simply sunk here
269 if (pkt->cmd == MemCmd::CleanEvict) {
270 pendingDelete.reset(pkt);
271 } else if (pkt->cmd == MemCmd::WriteClean) {
272 // A WriteClean should never coalesce with any
273 // outstanding cache maintenance requests.
274
275 // We use forward_time here because there is an
276 // uncached memory write, forwarded to WriteBuffer.
277 allocateWriteBuffer(pkt, forward_time);
278 } else {
279 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
280 pkt->print());
281
282 assert(pkt->req->masterId() < system->maxMasters());
283 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
284
285 // We use forward_time here because it is the same
286 // considering new targets. We have multiple
287 // requests for the same address here. It
288 // specifies the latency to allocate an internal
289 // buffer and to schedule an event to the queued
290 // port and also takes into account the additional
291 // delay of the xbar.
292 mshr->allocateTarget(pkt, forward_time, order++,
293 allocOnFill(pkt->cmd));
294 if (mshr->getNumTargets() == numTarget) {
295 noTargetMSHR = mshr;
296 setBlocked(Blocked_NoTargets);
297 // need to be careful with this... if this mshr isn't
298 // ready yet (i.e. time > curTick()), we don't want to
299 // move it ahead of mshrs that are ready
300 // mshrQueue.moveToFront(mshr);
301 }
302 }
303 }
304 } else {
305 // no MSHR
306 assert(pkt->req->masterId() < system->maxMasters());
307 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
308
309 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
310 // We use forward_time here because there is an
311 // writeback or writeclean, forwarded to WriteBuffer.
312 allocateWriteBuffer(pkt, forward_time);
313 } else {
314 if (blk && blk->isValid()) {
315 // If we have a write miss to a valid block, we
316 // need to mark the block non-readable. Otherwise
317 // if we allow reads while there's an outstanding
318 // write miss, the read could return stale data
319 // out of the cache block... a more aggressive
320 // system could detect the overlap (if any) and
321 // forward data out of the MSHRs, but we don't do
322 // that yet. Note that we do need to leave the
323 // block valid so that it stays in the cache, in
324 // case we get an upgrade response (and hence no
325 // new data) when the write miss completes.
326 // As long as CPUs do proper store/load forwarding
327 // internally, and have a sufficiently weak memory
328 // model, this is probably unnecessary, but at some
329 // point it must have seemed like we needed it...
330 assert((pkt->needsWritable() && !blk->isWritable()) ||
331 pkt->req->isCacheMaintenance());
332 blk->status &= ~BlkReadable;
333 }
334 // Here we are using forward_time, modelling the latency of
335 // a miss (outbound) just as forwardLatency, neglecting the
336 // lookupLatency component.
337 allocateMissBuffer(pkt, forward_time);
338 }
339 }
340}
341
342void
343BaseCache::recvTimingReq(PacketPtr pkt)
344{
345 // anything that is merely forwarded pays for the forward latency and
346 // the delay provided by the crossbar
347 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
348
349 Cycles lat;
350 CacheBlk *blk = nullptr;
351 bool satisfied = false;
352 {
353 PacketList writebacks;
354 // Note that lat is passed by reference here. The function
355 // access() will set the lat value.
356 satisfied = access(pkt, blk, lat, writebacks);
357
358 // copy writebacks to write buffer here to ensure they logically
359 // precede anything happening below
360 doWritebacks(writebacks, forward_time);
361 }
362
363 // Here we charge the headerDelay that takes into account the latencies
364 // of the bus, if the packet comes from it.
365 // The latency charged is just the value set by the access() function.
366 // In case of a hit we are neglecting response latency.
367 // In case of a miss we are neglecting forward latency.
368 Tick request_time = clockEdge(lat) + pkt->headerDelay;
368 Tick request_time = clockEdge(lat);
369 // Here we reset the timing of the packet.
370 pkt->headerDelay = pkt->payloadDelay = 0;
371
372 if (satisfied) {
373 // notify before anything else as later handleTimingReqHit might turn
374 // the packet in a response
375 ppHit->notify(pkt);
376
377 if (prefetcher && blk && blk->wasPrefetched()) {
378 blk->status &= ~BlkHWPrefetched;
379 }
380
381 handleTimingReqHit(pkt, blk, request_time);
382 } else {
383 handleTimingReqMiss(pkt, blk, forward_time, request_time);
384
385 ppMiss->notify(pkt);
386 }
387
388 if (prefetcher) {
389 // track time of availability of next prefetch, if any
390 Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
391 if (next_pf_time != MaxTick) {
392 schedMemSideSendEvent(next_pf_time);
393 }
394 }
395}
396
397void
398BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
399{
400 Tick completion_time = clockEdge(responseLatency) +
401 pkt->headerDelay + pkt->payloadDelay;
402
403 // Reset the bus additional time as it is now accounted for
404 pkt->headerDelay = pkt->payloadDelay = 0;
405
406 cpuSidePort.schedTimingResp(pkt, completion_time);
407}
408
409void
410BaseCache::recvTimingResp(PacketPtr pkt)
411{
412 assert(pkt->isResponse());
413
414 // all header delay should be paid for by the crossbar, unless
415 // this is a prefetch response from above
416 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
417 "%s saw a non-zero packet delay\n", name());
418
419 const bool is_error = pkt->isError();
420
421 if (is_error) {
422 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
423 pkt->print());
424 }
425
426 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
427 pkt->print());
428
429 // if this is a write, we should be looking at an uncacheable
430 // write
431 if (pkt->isWrite()) {
432 assert(pkt->req->isUncacheable());
433 handleUncacheableWriteResp(pkt);
434 return;
435 }
436
437 // we have dealt with any (uncacheable) writes above, from here on
438 // we know we are dealing with an MSHR due to a miss or a prefetch
439 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
440 assert(mshr);
441
442 if (mshr == noTargetMSHR) {
443 // we always clear at least one target
444 clearBlocked(Blocked_NoTargets);
445 noTargetMSHR = nullptr;
446 }
447
448 // Initial target is used just for stats
449 MSHR::Target *initial_tgt = mshr->getTarget();
450 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
451 Tick miss_latency = curTick() - initial_tgt->recvTime;
452
453 if (pkt->req->isUncacheable()) {
454 assert(pkt->req->masterId() < system->maxMasters());
455 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
456 miss_latency;
457 } else {
458 assert(pkt->req->masterId() < system->maxMasters());
459 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
460 miss_latency;
461 }
462
463 PacketList writebacks;
464
465 bool is_fill = !mshr->isForward &&
466 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
467 mshr->wasWholeLineWrite);
468
469 // make sure that if the mshr was due to a whole line write then
470 // the response is an invalidation
471 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
472
473 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
474
475 if (is_fill && !is_error) {
476 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
477 pkt->getAddr());
478
479 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
480 writeAllocator->allocate() : mshr->allocOnFill();
481 blk = handleFill(pkt, blk, writebacks, allocate);
482 assert(blk != nullptr);
483 ppFill->notify(pkt);
484 }
485
486 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
487 // The block was marked not readable while there was a pending
488 // cache maintenance operation, restore its flag.
489 blk->status |= BlkReadable;
490
491 // This was a cache clean operation (without invalidate)
492 // and we have a copy of the block already. Since there
493 // is no invalidation, we can promote targets that don't
494 // require a writable copy
495 mshr->promoteReadable();
496 }
497
498 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
499 // If at this point the referenced block is writable and the
500 // response is not a cache invalidate, we promote targets that
501 // were deferred as we couldn't guarrantee a writable copy
502 mshr->promoteWritable();
503 }
504
505 serviceMSHRTargets(mshr, pkt, blk);
506
507 if (mshr->promoteDeferredTargets()) {
508 // avoid later read getting stale data while write miss is
509 // outstanding.. see comment in timingAccess()
510 if (blk) {
511 blk->status &= ~BlkReadable;
512 }
513 mshrQueue.markPending(mshr);
514 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
515 } else {
516 // while we deallocate an mshr from the queue we still have to
517 // check the isFull condition before and after as we might
518 // have been using the reserved entries already
519 const bool was_full = mshrQueue.isFull();
520 mshrQueue.deallocate(mshr);
521 if (was_full && !mshrQueue.isFull()) {
522 clearBlocked(Blocked_NoMSHRs);
523 }
524
525 // Request the bus for a prefetch if this deallocation freed enough
526 // MSHRs for a prefetch to take place
527 if (prefetcher && mshrQueue.canPrefetch()) {
528 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
529 clockEdge());
530 if (next_pf_time != MaxTick)
531 schedMemSideSendEvent(next_pf_time);
532 }
533 }
534
535 // if we used temp block, check to see if its valid and then clear it out
536 if (blk == tempBlock && tempBlock->isValid()) {
537 evictBlock(blk, writebacks);
538 }
539
540 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
541 // copy writebacks to write buffer
542 doWritebacks(writebacks, forward_time);
543
544 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
545 delete pkt;
546}
547
548
549Tick
550BaseCache::recvAtomic(PacketPtr pkt)
551{
552 // should assert here that there are no outstanding MSHRs or
553 // writebacks... that would mean that someone used an atomic
554 // access in timing mode
555
556 // We use lookupLatency here because it is used to specify the latency
557 // to access.
558 Cycles lat = lookupLatency;
559
560 CacheBlk *blk = nullptr;
561 PacketList writebacks;
562 bool satisfied = access(pkt, blk, lat, writebacks);
563
564 if (pkt->isClean() && blk && blk->isDirty()) {
565 // A cache clean opearation is looking for a dirty
566 // block. If a dirty block is encountered a WriteClean
567 // will update any copies to the path to the memory
568 // until the point of reference.
569 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
570 __func__, pkt->print(), blk->print());
571 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
572 writebacks.push_back(wb_pkt);
573 pkt->setSatisfied();
574 }
575
576 // handle writebacks resulting from the access here to ensure they
577 // logically precede anything happening below
578 doWritebacksAtomic(writebacks);
579 assert(writebacks.empty());
580
581 if (!satisfied) {
582 lat += handleAtomicReqMiss(pkt, blk, writebacks);
583 }
584
585 // Note that we don't invoke the prefetcher at all in atomic mode.
586 // It's not clear how to do it properly, particularly for
587 // prefetchers that aggressively generate prefetch candidates and
588 // rely on bandwidth contention to throttle them; these will tend
589 // to pollute the cache in atomic mode since there is no bandwidth
590 // contention. If we ever do want to enable prefetching in atomic
591 // mode, though, this is the place to do it... see timingAccess()
592 // for an example (though we'd want to issue the prefetch(es)
593 // immediately rather than calling requestMemSideBus() as we do
594 // there).
595
596 // do any writebacks resulting from the response handling
597 doWritebacksAtomic(writebacks);
598
599 // if we used temp block, check to see if its valid and if so
600 // clear it out, but only do so after the call to recvAtomic is
601 // finished so that any downstream observers (such as a snoop
602 // filter), first see the fill, and only then see the eviction
603 if (blk == tempBlock && tempBlock->isValid()) {
604 // the atomic CPU calls recvAtomic for fetch and load/store
605 // sequentuially, and we may already have a tempBlock
606 // writeback from the fetch that we have not yet sent
607 if (tempBlockWriteback) {
608 // if that is the case, write the prevoius one back, and
609 // do not schedule any new event
610 writebackTempBlockAtomic();
611 } else {
612 // the writeback/clean eviction happens after the call to
613 // recvAtomic has finished (but before any successive
614 // calls), so that the response handling from the fill is
615 // allowed to happen first
616 schedule(writebackTempBlockAtomicEvent, curTick());
617 }
618
619 tempBlockWriteback = evictBlock(blk);
620 }
621
622 if (pkt->needsResponse()) {
623 pkt->makeAtomicResponse();
624 }
625
626 return lat * clockPeriod();
627}
628
629void
630BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
631{
632 Addr blk_addr = pkt->getBlockAddr(blkSize);
633 bool is_secure = pkt->isSecure();
634 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
635 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
636
637 pkt->pushLabel(name());
638
639 CacheBlkPrintWrapper cbpw(blk);
640
641 // Note that just because an L2/L3 has valid data doesn't mean an
642 // L1 doesn't have a more up-to-date modified copy that still
643 // needs to be found. As a result we always update the request if
644 // we have it, but only declare it satisfied if we are the owner.
645
646 // see if we have data at all (owned or otherwise)
647 bool have_data = blk && blk->isValid()
648 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
649 blk->data);
650
651 // data we have is dirty if marked as such or if we have an
652 // in-service MSHR that is pending a modified line
653 bool have_dirty =
654 have_data && (blk->isDirty() ||
655 (mshr && mshr->inService && mshr->isPendingModified()));
656
657 bool done = have_dirty ||
658 cpuSidePort.trySatisfyFunctional(pkt) ||
659 mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
660 writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
661 memSidePort.trySatisfyFunctional(pkt);
662
663 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
664 (blk && blk->isValid()) ? "valid " : "",
665 have_data ? "data " : "", done ? "done " : "");
666
667 // We're leaving the cache, so pop cache->name() label
668 pkt->popLabel();
669
670 if (done) {
671 pkt->makeResponse();
672 } else {
673 // if it came as a request from the CPU side then make sure it
674 // continues towards the memory side
675 if (from_cpu_side) {
676 memSidePort.sendFunctional(pkt);
677 } else if (cpuSidePort.isSnooping()) {
678 // if it came from the memory side, it must be a snoop request
679 // and we should only forward it if we are forwarding snoops
680 cpuSidePort.sendFunctionalSnoop(pkt);
681 }
682 }
683}
684
685
686void
687BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
688{
689 assert(pkt->isRequest());
690
691 uint64_t overwrite_val;
692 bool overwrite_mem;
693 uint64_t condition_val64;
694 uint32_t condition_val32;
695
696 int offset = pkt->getOffset(blkSize);
697 uint8_t *blk_data = blk->data + offset;
698
699 assert(sizeof(uint64_t) >= pkt->getSize());
700
701 overwrite_mem = true;
702 // keep a copy of our possible write value, and copy what is at the
703 // memory address into the packet
704 pkt->writeData((uint8_t *)&overwrite_val);
705 pkt->setData(blk_data);
706
707 if (pkt->req->isCondSwap()) {
708 if (pkt->getSize() == sizeof(uint64_t)) {
709 condition_val64 = pkt->req->getExtraData();
710 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
711 sizeof(uint64_t));
712 } else if (pkt->getSize() == sizeof(uint32_t)) {
713 condition_val32 = (uint32_t)pkt->req->getExtraData();
714 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
715 sizeof(uint32_t));
716 } else
717 panic("Invalid size for conditional read/write\n");
718 }
719
720 if (overwrite_mem) {
721 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
722 blk->status |= BlkDirty;
723 }
724}
725
726QueueEntry*
727BaseCache::getNextQueueEntry()
728{
729 // Check both MSHR queue and write buffer for potential requests,
730 // note that null does not mean there is no request, it could
731 // simply be that it is not ready
732 MSHR *miss_mshr = mshrQueue.getNext();
733 WriteQueueEntry *wq_entry = writeBuffer.getNext();
734
735 // If we got a write buffer request ready, first priority is a
736 // full write buffer, otherwise we favour the miss requests
737 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
738 // need to search MSHR queue for conflicting earlier miss.
739 MSHR *conflict_mshr =
740 mshrQueue.findPending(wq_entry->blkAddr,
741 wq_entry->isSecure);
742
743 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
744 // Service misses in order until conflict is cleared.
745 return conflict_mshr;
746
747 // @todo Note that we ignore the ready time of the conflict here
748 }
749
750 // No conflicts; issue write
751 return wq_entry;
752 } else if (miss_mshr) {
753 // need to check for conflicting earlier writeback
754 WriteQueueEntry *conflict_mshr =
755 writeBuffer.findPending(miss_mshr->blkAddr,
756 miss_mshr->isSecure);
757 if (conflict_mshr) {
758 // not sure why we don't check order here... it was in the
759 // original code but commented out.
760
761 // The only way this happens is if we are
762 // doing a write and we didn't have permissions
763 // then subsequently saw a writeback (owned got evicted)
764 // We need to make sure to perform the writeback first
765 // To preserve the dirty data, then we can issue the write
766
767 // should we return wq_entry here instead? I.e. do we
768 // have to flush writes in order? I don't think so... not
769 // for Alpha anyway. Maybe for x86?
770 return conflict_mshr;
771
772 // @todo Note that we ignore the ready time of the conflict here
773 }
774
775 // No conflicts; issue read
776 return miss_mshr;
777 }
778
779 // fall through... no pending requests. Try a prefetch.
780 assert(!miss_mshr && !wq_entry);
781 if (prefetcher && mshrQueue.canPrefetch()) {
782 // If we have a miss queue slot, we can try a prefetch
783 PacketPtr pkt = prefetcher->getPacket();
784 if (pkt) {
785 Addr pf_addr = pkt->getBlockAddr(blkSize);
786 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
787 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
788 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
789 // Update statistic on number of prefetches issued
790 // (hwpf_mshr_misses)
791 assert(pkt->req->masterId() < system->maxMasters());
792 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
793
794 // allocate an MSHR and return it, note
795 // that we send the packet straight away, so do not
796 // schedule the send
797 return allocateMissBuffer(pkt, curTick(), false);
798 } else {
799 // free the request and packet
800 delete pkt;
801 }
802 }
803 }
804
805 return nullptr;
806}
807
808void
809BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
810{
811 assert(pkt->isRequest());
812
813 assert(blk && blk->isValid());
814 // Occasionally this is not true... if we are a lower-level cache
815 // satisfying a string of Read and ReadEx requests from
816 // upper-level caches, a Read will mark the block as shared but we
817 // can satisfy a following ReadEx anyway since we can rely on the
818 // Read requester(s) to have buffered the ReadEx snoop and to
819 // invalidate their blocks after receiving them.
820 // assert(!pkt->needsWritable() || blk->isWritable());
821 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
822
823 // Check RMW operations first since both isRead() and
824 // isWrite() will be true for them
825 if (pkt->cmd == MemCmd::SwapReq) {
826 if (pkt->isAtomicOp()) {
827 // extract data from cache and save it into the data field in
828 // the packet as a return value from this atomic op
829 int offset = tags->extractBlkOffset(pkt->getAddr());
830 uint8_t *blk_data = blk->data + offset;
831 pkt->setData(blk_data);
832
833 // execute AMO operation
834 (*(pkt->getAtomicOp()))(blk_data);
835
836 // set block status to dirty
837 blk->status |= BlkDirty;
838 } else {
839 cmpAndSwap(blk, pkt);
840 }
841 } else if (pkt->isWrite()) {
842 // we have the block in a writable state and can go ahead,
843 // note that the line may be also be considered writable in
844 // downstream caches along the path to memory, but always
845 // Exclusive, and never Modified
846 assert(blk->isWritable());
847 // Write or WriteLine at the first cache with block in writable state
848 if (blk->checkWrite(pkt)) {
849 pkt->writeDataToBlock(blk->data, blkSize);
850 }
851 // Always mark the line as dirty (and thus transition to the
852 // Modified state) even if we are a failed StoreCond so we
853 // supply data to any snoops that have appended themselves to
854 // this cache before knowing the store will fail.
855 blk->status |= BlkDirty;
856 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
857 } else if (pkt->isRead()) {
858 if (pkt->isLLSC()) {
859 blk->trackLoadLocked(pkt);
860 }
861
862 // all read responses have a data payload
863 assert(pkt->hasRespData());
864 pkt->setDataFromBlock(blk->data, blkSize);
865 } else if (pkt->isUpgrade()) {
866 // sanity check
867 assert(!pkt->hasSharers());
868
869 if (blk->isDirty()) {
870 // we were in the Owned state, and a cache above us that
871 // has the line in Shared state needs to be made aware
872 // that the data it already has is in fact dirty
873 pkt->setCacheResponding();
874 blk->status &= ~BlkDirty;
875 }
876 } else if (pkt->isClean()) {
877 blk->status &= ~BlkDirty;
878 } else {
879 assert(pkt->isInvalidate());
880 invalidateBlock(blk);
881 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
882 pkt->print());
883 }
884}
885
886/////////////////////////////////////////////////////
887//
888// Access path: requests coming in from the CPU side
889//
890/////////////////////////////////////////////////////
891Cycles
369 // Here we reset the timing of the packet.
370 pkt->headerDelay = pkt->payloadDelay = 0;
371
372 if (satisfied) {
373 // notify before anything else as later handleTimingReqHit might turn
374 // the packet in a response
375 ppHit->notify(pkt);
376
377 if (prefetcher && blk && blk->wasPrefetched()) {
378 blk->status &= ~BlkHWPrefetched;
379 }
380
381 handleTimingReqHit(pkt, blk, request_time);
382 } else {
383 handleTimingReqMiss(pkt, blk, forward_time, request_time);
384
385 ppMiss->notify(pkt);
386 }
387
388 if (prefetcher) {
389 // track time of availability of next prefetch, if any
390 Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
391 if (next_pf_time != MaxTick) {
392 schedMemSideSendEvent(next_pf_time);
393 }
394 }
395}
396
397void
398BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
399{
400 Tick completion_time = clockEdge(responseLatency) +
401 pkt->headerDelay + pkt->payloadDelay;
402
403 // Reset the bus additional time as it is now accounted for
404 pkt->headerDelay = pkt->payloadDelay = 0;
405
406 cpuSidePort.schedTimingResp(pkt, completion_time);
407}
408
409void
410BaseCache::recvTimingResp(PacketPtr pkt)
411{
412 assert(pkt->isResponse());
413
414 // all header delay should be paid for by the crossbar, unless
415 // this is a prefetch response from above
416 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
417 "%s saw a non-zero packet delay\n", name());
418
419 const bool is_error = pkt->isError();
420
421 if (is_error) {
422 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
423 pkt->print());
424 }
425
426 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
427 pkt->print());
428
429 // if this is a write, we should be looking at an uncacheable
430 // write
431 if (pkt->isWrite()) {
432 assert(pkt->req->isUncacheable());
433 handleUncacheableWriteResp(pkt);
434 return;
435 }
436
437 // we have dealt with any (uncacheable) writes above, from here on
438 // we know we are dealing with an MSHR due to a miss or a prefetch
439 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
440 assert(mshr);
441
442 if (mshr == noTargetMSHR) {
443 // we always clear at least one target
444 clearBlocked(Blocked_NoTargets);
445 noTargetMSHR = nullptr;
446 }
447
448 // Initial target is used just for stats
449 MSHR::Target *initial_tgt = mshr->getTarget();
450 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
451 Tick miss_latency = curTick() - initial_tgt->recvTime;
452
453 if (pkt->req->isUncacheable()) {
454 assert(pkt->req->masterId() < system->maxMasters());
455 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
456 miss_latency;
457 } else {
458 assert(pkt->req->masterId() < system->maxMasters());
459 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
460 miss_latency;
461 }
462
463 PacketList writebacks;
464
465 bool is_fill = !mshr->isForward &&
466 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
467 mshr->wasWholeLineWrite);
468
469 // make sure that if the mshr was due to a whole line write then
470 // the response is an invalidation
471 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
472
473 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
474
475 if (is_fill && !is_error) {
476 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
477 pkt->getAddr());
478
479 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
480 writeAllocator->allocate() : mshr->allocOnFill();
481 blk = handleFill(pkt, blk, writebacks, allocate);
482 assert(blk != nullptr);
483 ppFill->notify(pkt);
484 }
485
486 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
487 // The block was marked not readable while there was a pending
488 // cache maintenance operation, restore its flag.
489 blk->status |= BlkReadable;
490
491 // This was a cache clean operation (without invalidate)
492 // and we have a copy of the block already. Since there
493 // is no invalidation, we can promote targets that don't
494 // require a writable copy
495 mshr->promoteReadable();
496 }
497
498 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
499 // If at this point the referenced block is writable and the
500 // response is not a cache invalidate, we promote targets that
501 // were deferred as we couldn't guarrantee a writable copy
502 mshr->promoteWritable();
503 }
504
505 serviceMSHRTargets(mshr, pkt, blk);
506
507 if (mshr->promoteDeferredTargets()) {
508 // avoid later read getting stale data while write miss is
509 // outstanding.. see comment in timingAccess()
510 if (blk) {
511 blk->status &= ~BlkReadable;
512 }
513 mshrQueue.markPending(mshr);
514 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
515 } else {
516 // while we deallocate an mshr from the queue we still have to
517 // check the isFull condition before and after as we might
518 // have been using the reserved entries already
519 const bool was_full = mshrQueue.isFull();
520 mshrQueue.deallocate(mshr);
521 if (was_full && !mshrQueue.isFull()) {
522 clearBlocked(Blocked_NoMSHRs);
523 }
524
525 // Request the bus for a prefetch if this deallocation freed enough
526 // MSHRs for a prefetch to take place
527 if (prefetcher && mshrQueue.canPrefetch()) {
528 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
529 clockEdge());
530 if (next_pf_time != MaxTick)
531 schedMemSideSendEvent(next_pf_time);
532 }
533 }
534
535 // if we used temp block, check to see if its valid and then clear it out
536 if (blk == tempBlock && tempBlock->isValid()) {
537 evictBlock(blk, writebacks);
538 }
539
540 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
541 // copy writebacks to write buffer
542 doWritebacks(writebacks, forward_time);
543
544 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
545 delete pkt;
546}
547
548
549Tick
550BaseCache::recvAtomic(PacketPtr pkt)
551{
552 // should assert here that there are no outstanding MSHRs or
553 // writebacks... that would mean that someone used an atomic
554 // access in timing mode
555
556 // We use lookupLatency here because it is used to specify the latency
557 // to access.
558 Cycles lat = lookupLatency;
559
560 CacheBlk *blk = nullptr;
561 PacketList writebacks;
562 bool satisfied = access(pkt, blk, lat, writebacks);
563
564 if (pkt->isClean() && blk && blk->isDirty()) {
565 // A cache clean opearation is looking for a dirty
566 // block. If a dirty block is encountered a WriteClean
567 // will update any copies to the path to the memory
568 // until the point of reference.
569 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
570 __func__, pkt->print(), blk->print());
571 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
572 writebacks.push_back(wb_pkt);
573 pkt->setSatisfied();
574 }
575
576 // handle writebacks resulting from the access here to ensure they
577 // logically precede anything happening below
578 doWritebacksAtomic(writebacks);
579 assert(writebacks.empty());
580
581 if (!satisfied) {
582 lat += handleAtomicReqMiss(pkt, blk, writebacks);
583 }
584
585 // Note that we don't invoke the prefetcher at all in atomic mode.
586 // It's not clear how to do it properly, particularly for
587 // prefetchers that aggressively generate prefetch candidates and
588 // rely on bandwidth contention to throttle them; these will tend
589 // to pollute the cache in atomic mode since there is no bandwidth
590 // contention. If we ever do want to enable prefetching in atomic
591 // mode, though, this is the place to do it... see timingAccess()
592 // for an example (though we'd want to issue the prefetch(es)
593 // immediately rather than calling requestMemSideBus() as we do
594 // there).
595
596 // do any writebacks resulting from the response handling
597 doWritebacksAtomic(writebacks);
598
599 // if we used temp block, check to see if its valid and if so
600 // clear it out, but only do so after the call to recvAtomic is
601 // finished so that any downstream observers (such as a snoop
602 // filter), first see the fill, and only then see the eviction
603 if (blk == tempBlock && tempBlock->isValid()) {
604 // the atomic CPU calls recvAtomic for fetch and load/store
605 // sequentuially, and we may already have a tempBlock
606 // writeback from the fetch that we have not yet sent
607 if (tempBlockWriteback) {
608 // if that is the case, write the prevoius one back, and
609 // do not schedule any new event
610 writebackTempBlockAtomic();
611 } else {
612 // the writeback/clean eviction happens after the call to
613 // recvAtomic has finished (but before any successive
614 // calls), so that the response handling from the fill is
615 // allowed to happen first
616 schedule(writebackTempBlockAtomicEvent, curTick());
617 }
618
619 tempBlockWriteback = evictBlock(blk);
620 }
621
622 if (pkt->needsResponse()) {
623 pkt->makeAtomicResponse();
624 }
625
626 return lat * clockPeriod();
627}
628
629void
630BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
631{
632 Addr blk_addr = pkt->getBlockAddr(blkSize);
633 bool is_secure = pkt->isSecure();
634 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
635 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
636
637 pkt->pushLabel(name());
638
639 CacheBlkPrintWrapper cbpw(blk);
640
641 // Note that just because an L2/L3 has valid data doesn't mean an
642 // L1 doesn't have a more up-to-date modified copy that still
643 // needs to be found. As a result we always update the request if
644 // we have it, but only declare it satisfied if we are the owner.
645
646 // see if we have data at all (owned or otherwise)
647 bool have_data = blk && blk->isValid()
648 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
649 blk->data);
650
651 // data we have is dirty if marked as such or if we have an
652 // in-service MSHR that is pending a modified line
653 bool have_dirty =
654 have_data && (blk->isDirty() ||
655 (mshr && mshr->inService && mshr->isPendingModified()));
656
657 bool done = have_dirty ||
658 cpuSidePort.trySatisfyFunctional(pkt) ||
659 mshrQueue.trySatisfyFunctional(pkt, blk_addr) ||
660 writeBuffer.trySatisfyFunctional(pkt, blk_addr) ||
661 memSidePort.trySatisfyFunctional(pkt);
662
663 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
664 (blk && blk->isValid()) ? "valid " : "",
665 have_data ? "data " : "", done ? "done " : "");
666
667 // We're leaving the cache, so pop cache->name() label
668 pkt->popLabel();
669
670 if (done) {
671 pkt->makeResponse();
672 } else {
673 // if it came as a request from the CPU side then make sure it
674 // continues towards the memory side
675 if (from_cpu_side) {
676 memSidePort.sendFunctional(pkt);
677 } else if (cpuSidePort.isSnooping()) {
678 // if it came from the memory side, it must be a snoop request
679 // and we should only forward it if we are forwarding snoops
680 cpuSidePort.sendFunctionalSnoop(pkt);
681 }
682 }
683}
684
685
686void
687BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
688{
689 assert(pkt->isRequest());
690
691 uint64_t overwrite_val;
692 bool overwrite_mem;
693 uint64_t condition_val64;
694 uint32_t condition_val32;
695
696 int offset = pkt->getOffset(blkSize);
697 uint8_t *blk_data = blk->data + offset;
698
699 assert(sizeof(uint64_t) >= pkt->getSize());
700
701 overwrite_mem = true;
702 // keep a copy of our possible write value, and copy what is at the
703 // memory address into the packet
704 pkt->writeData((uint8_t *)&overwrite_val);
705 pkt->setData(blk_data);
706
707 if (pkt->req->isCondSwap()) {
708 if (pkt->getSize() == sizeof(uint64_t)) {
709 condition_val64 = pkt->req->getExtraData();
710 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
711 sizeof(uint64_t));
712 } else if (pkt->getSize() == sizeof(uint32_t)) {
713 condition_val32 = (uint32_t)pkt->req->getExtraData();
714 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
715 sizeof(uint32_t));
716 } else
717 panic("Invalid size for conditional read/write\n");
718 }
719
720 if (overwrite_mem) {
721 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
722 blk->status |= BlkDirty;
723 }
724}
725
726QueueEntry*
727BaseCache::getNextQueueEntry()
728{
729 // Check both MSHR queue and write buffer for potential requests,
730 // note that null does not mean there is no request, it could
731 // simply be that it is not ready
732 MSHR *miss_mshr = mshrQueue.getNext();
733 WriteQueueEntry *wq_entry = writeBuffer.getNext();
734
735 // If we got a write buffer request ready, first priority is a
736 // full write buffer, otherwise we favour the miss requests
737 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
738 // need to search MSHR queue for conflicting earlier miss.
739 MSHR *conflict_mshr =
740 mshrQueue.findPending(wq_entry->blkAddr,
741 wq_entry->isSecure);
742
743 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
744 // Service misses in order until conflict is cleared.
745 return conflict_mshr;
746
747 // @todo Note that we ignore the ready time of the conflict here
748 }
749
750 // No conflicts; issue write
751 return wq_entry;
752 } else if (miss_mshr) {
753 // need to check for conflicting earlier writeback
754 WriteQueueEntry *conflict_mshr =
755 writeBuffer.findPending(miss_mshr->blkAddr,
756 miss_mshr->isSecure);
757 if (conflict_mshr) {
758 // not sure why we don't check order here... it was in the
759 // original code but commented out.
760
761 // The only way this happens is if we are
762 // doing a write and we didn't have permissions
763 // then subsequently saw a writeback (owned got evicted)
764 // We need to make sure to perform the writeback first
765 // To preserve the dirty data, then we can issue the write
766
767 // should we return wq_entry here instead? I.e. do we
768 // have to flush writes in order? I don't think so... not
769 // for Alpha anyway. Maybe for x86?
770 return conflict_mshr;
771
772 // @todo Note that we ignore the ready time of the conflict here
773 }
774
775 // No conflicts; issue read
776 return miss_mshr;
777 }
778
779 // fall through... no pending requests. Try a prefetch.
780 assert(!miss_mshr && !wq_entry);
781 if (prefetcher && mshrQueue.canPrefetch()) {
782 // If we have a miss queue slot, we can try a prefetch
783 PacketPtr pkt = prefetcher->getPacket();
784 if (pkt) {
785 Addr pf_addr = pkt->getBlockAddr(blkSize);
786 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
787 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
788 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
789 // Update statistic on number of prefetches issued
790 // (hwpf_mshr_misses)
791 assert(pkt->req->masterId() < system->maxMasters());
792 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
793
794 // allocate an MSHR and return it, note
795 // that we send the packet straight away, so do not
796 // schedule the send
797 return allocateMissBuffer(pkt, curTick(), false);
798 } else {
799 // free the request and packet
800 delete pkt;
801 }
802 }
803 }
804
805 return nullptr;
806}
807
808void
809BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
810{
811 assert(pkt->isRequest());
812
813 assert(blk && blk->isValid());
814 // Occasionally this is not true... if we are a lower-level cache
815 // satisfying a string of Read and ReadEx requests from
816 // upper-level caches, a Read will mark the block as shared but we
817 // can satisfy a following ReadEx anyway since we can rely on the
818 // Read requester(s) to have buffered the ReadEx snoop and to
819 // invalidate their blocks after receiving them.
820 // assert(!pkt->needsWritable() || blk->isWritable());
821 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
822
823 // Check RMW operations first since both isRead() and
824 // isWrite() will be true for them
825 if (pkt->cmd == MemCmd::SwapReq) {
826 if (pkt->isAtomicOp()) {
827 // extract data from cache and save it into the data field in
828 // the packet as a return value from this atomic op
829 int offset = tags->extractBlkOffset(pkt->getAddr());
830 uint8_t *blk_data = blk->data + offset;
831 pkt->setData(blk_data);
832
833 // execute AMO operation
834 (*(pkt->getAtomicOp()))(blk_data);
835
836 // set block status to dirty
837 blk->status |= BlkDirty;
838 } else {
839 cmpAndSwap(blk, pkt);
840 }
841 } else if (pkt->isWrite()) {
842 // we have the block in a writable state and can go ahead,
843 // note that the line may be also be considered writable in
844 // downstream caches along the path to memory, but always
845 // Exclusive, and never Modified
846 assert(blk->isWritable());
847 // Write or WriteLine at the first cache with block in writable state
848 if (blk->checkWrite(pkt)) {
849 pkt->writeDataToBlock(blk->data, blkSize);
850 }
851 // Always mark the line as dirty (and thus transition to the
852 // Modified state) even if we are a failed StoreCond so we
853 // supply data to any snoops that have appended themselves to
854 // this cache before knowing the store will fail.
855 blk->status |= BlkDirty;
856 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
857 } else if (pkt->isRead()) {
858 if (pkt->isLLSC()) {
859 blk->trackLoadLocked(pkt);
860 }
861
862 // all read responses have a data payload
863 assert(pkt->hasRespData());
864 pkt->setDataFromBlock(blk->data, blkSize);
865 } else if (pkt->isUpgrade()) {
866 // sanity check
867 assert(!pkt->hasSharers());
868
869 if (blk->isDirty()) {
870 // we were in the Owned state, and a cache above us that
871 // has the line in Shared state needs to be made aware
872 // that the data it already has is in fact dirty
873 pkt->setCacheResponding();
874 blk->status &= ~BlkDirty;
875 }
876 } else if (pkt->isClean()) {
877 blk->status &= ~BlkDirty;
878 } else {
879 assert(pkt->isInvalidate());
880 invalidateBlock(blk);
881 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
882 pkt->print());
883 }
884}
885
886/////////////////////////////////////////////////////
887//
888// Access path: requests coming in from the CPU side
889//
890/////////////////////////////////////////////////////
891Cycles
892BaseCache::calculateAccessLatency(const CacheBlk* blk,
892BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
893 const Cycles lookup_lat) const
894{
893 const Cycles lookup_lat) const
894{
895 Cycles lat(lookup_lat);
895 Cycles lat(0);
896
897 if (blk != nullptr) {
896
897 if (blk != nullptr) {
898 // First access tags, then data
898 // As soon as the access arrives, for sequential accesses first access
899 // tags, then the data entry. In the case of parallel accesses the
900 // latency is dictated by the slowest of tag and data latencies.
899 if (sequentialAccess) {
901 if (sequentialAccess) {
900 lat += dataLatency;
901 // Latency is dictated by the slowest of tag and data latencies
902 lat = ticksToCycles(delay) + lookup_lat + dataLatency;
902 } else {
903 } else {
903 lat = std::max(lookup_lat, dataLatency);
904 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
904 }
905
906 // Check if the block to be accessed is available. If not, apply the
907 // access latency on top of when the block is ready to be accessed.
905 }
906
907 // Check if the block to be accessed is available. If not, apply the
908 // access latency on top of when the block is ready to be accessed.
909 const Tick tick = curTick() + delay;
908 const Tick when_ready = blk->getWhenReady();
910 const Tick when_ready = blk->getWhenReady();
909 if (when_ready > curTick() &&
910 ticksToCycles(when_ready - curTick()) > lat) {
911 lat += ticksToCycles(when_ready - curTick());
911 if (when_ready > tick &&
912 ticksToCycles(when_ready - tick) > lat) {
913 lat += ticksToCycles(when_ready - tick);
912 }
914 }
915 } else {
916 // In case of a miss, apply lookup latency on top of the metadata
917 // delay, as the access can only start when it arrives.
918 lat = ticksToCycles(delay) + lookup_lat;
913 }
914
915 return lat;
916}
917
918bool
919BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
920 PacketList &writebacks)
921{
922 // sanity check
923 assert(pkt->isRequest());
924
925 chatty_assert(!(isReadOnly && pkt->isWrite()),
926 "Should never see a write in a read-only cache %s\n",
927 name());
928
929 // Access block in the tags
930 Cycles tag_latency(0);
931 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
932
919 }
920
921 return lat;
922}
923
924bool
925BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
926 PacketList &writebacks)
927{
928 // sanity check
929 assert(pkt->isRequest());
930
931 chatty_assert(!(isReadOnly && pkt->isWrite()),
932 "Should never see a write in a read-only cache %s\n",
933 name());
934
935 // Access block in the tags
936 Cycles tag_latency(0);
937 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
938
933 // Calculate access latency
934 lat = calculateAccessLatency(blk, tag_latency);
939 // Calculate access latency on top of when the packet arrives. This
940 // takes into account the bus delay.
941 lat = calculateAccessLatency(blk, pkt->headerDelay,
942 tag_latency);
935
936 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
937 blk ? "hit " + blk->print() : "miss");
938
939 if (pkt->req->isCacheMaintenance()) {
940 // A cache maintenance operation is always forwarded to the
941 // memory below even if the block is found in dirty state.
942
943 // We defer any changes to the state of the block until we
944 // create and mark as in service the mshr for the downstream
945 // packet.
946 return false;
947 }
948
949 if (pkt->isEviction()) {
950 // We check for presence of block in above caches before issuing
951 // Writeback or CleanEvict to write buffer. Therefore the only
952 // possible cases can be of a CleanEvict packet coming from above
953 // encountering a Writeback generated in this cache peer cache and
954 // waiting in the write buffer. Cases of upper level peer caches
955 // generating CleanEvict and Writeback or simply CleanEvict and
956 // CleanEvict almost simultaneously will be caught by snoops sent out
957 // by crossbar.
958 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
959 pkt->isSecure());
960 if (wb_entry) {
961 assert(wb_entry->getNumTargets() == 1);
962 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
963 assert(wbPkt->isWriteback());
964
965 if (pkt->isCleanEviction()) {
966 // The CleanEvict and WritebackClean snoops into other
967 // peer caches of the same level while traversing the
968 // crossbar. If a copy of the block is found, the
969 // packet is deleted in the crossbar. Hence, none of
970 // the other upper level caches connected to this
971 // cache have the block, so we can clear the
972 // BLOCK_CACHED flag in the Writeback if set and
973 // discard the CleanEvict by returning true.
974 wbPkt->clearBlockCached();
975 return true;
976 } else {
977 assert(pkt->cmd == MemCmd::WritebackDirty);
978 // Dirty writeback from above trumps our clean
979 // writeback... discard here
980 // Note: markInService will remove entry from writeback buffer.
981 markInService(wb_entry);
982 delete wbPkt;
983 }
984 }
985 }
986
987 // Writeback handling is special case. We can write the block into
988 // the cache without having a writeable copy (or any copy at all).
989 if (pkt->isWriteback()) {
990 assert(blkSize == pkt->getSize());
991
992 // we could get a clean writeback while we are having
993 // outstanding accesses to a block, do the simple thing for
994 // now and drop the clean writeback so that we do not upset
995 // any ordering/decisions about ownership already taken
996 if (pkt->cmd == MemCmd::WritebackClean &&
997 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
998 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
999 "dropping\n", pkt->getAddr());
1000 return true;
1001 }
1002
1003 if (!blk) {
1004 // need to do a replacement
1005 blk = allocateBlock(pkt, writebacks);
1006 if (!blk) {
1007 // no replaceable block available: give up, fwd to next level.
1008 incMissCount(pkt);
1009 return false;
1010 }
1011
1012 blk->status |= BlkReadable;
1013 }
1014 // only mark the block dirty if we got a writeback command,
1015 // and leave it as is for a clean writeback
1016 if (pkt->cmd == MemCmd::WritebackDirty) {
1017 // TODO: the coherent cache can assert(!blk->isDirty());
1018 blk->status |= BlkDirty;
1019 }
1020 // if the packet does not have sharers, it is passing
1021 // writable, and we got the writeback in Modified or Exclusive
1022 // state, if not we are in the Owned or Shared state
1023 if (!pkt->hasSharers()) {
1024 blk->status |= BlkWritable;
1025 }
1026 // nothing else to do; writeback doesn't expect response
1027 assert(!pkt->needsResponse());
1028 pkt->writeDataToBlock(blk->data, blkSize);
1029 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1030 incHitCount(pkt);
1031 // populate the time when the block will be ready to access.
1032 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1033 pkt->payloadDelay);
1034 return true;
1035 } else if (pkt->cmd == MemCmd::CleanEvict) {
1036 if (blk) {
1037 // Found the block in the tags, need to stop CleanEvict from
1038 // propagating further down the hierarchy. Returning true will
1039 // treat the CleanEvict like a satisfied write request and delete
1040 // it.
1041 return true;
1042 }
1043 // We didn't find the block here, propagate the CleanEvict further
1044 // down the memory hierarchy. Returning false will treat the CleanEvict
1045 // like a Writeback which could not find a replaceable block so has to
1046 // go to next level.
1047 return false;
1048 } else if (pkt->cmd == MemCmd::WriteClean) {
1049 // WriteClean handling is a special case. We can allocate a
1050 // block directly if it doesn't exist and we can update the
1051 // block immediately. The WriteClean transfers the ownership
1052 // of the block as well.
1053 assert(blkSize == pkt->getSize());
1054
1055 if (!blk) {
1056 if (pkt->writeThrough()) {
1057 // if this is a write through packet, we don't try to
1058 // allocate if the block is not present
1059 return false;
1060 } else {
1061 // a writeback that misses needs to allocate a new block
1062 blk = allocateBlock(pkt, writebacks);
1063 if (!blk) {
1064 // no replaceable block available: give up, fwd to
1065 // next level.
1066 incMissCount(pkt);
1067 return false;
1068 }
1069
1070 blk->status |= BlkReadable;
1071 }
1072 }
1073
1074 // at this point either this is a writeback or a write-through
1075 // write clean operation and the block is already in this
1076 // cache, we need to update the data and the block flags
1077 assert(blk);
1078 // TODO: the coherent cache can assert(!blk->isDirty());
1079 if (!pkt->writeThrough()) {
1080 blk->status |= BlkDirty;
1081 }
1082 // nothing else to do; writeback doesn't expect response
1083 assert(!pkt->needsResponse());
1084 pkt->writeDataToBlock(blk->data, blkSize);
1085 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1086
1087 incHitCount(pkt);
1088 // populate the time when the block will be ready to access.
1089 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1090 pkt->payloadDelay);
1091 // if this a write-through packet it will be sent to cache
1092 // below
1093 return !pkt->writeThrough();
1094 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1095 blk->isReadable())) {
1096 // OK to satisfy access
1097 incHitCount(pkt);
1098 satisfyRequest(pkt, blk);
1099 maintainClusivity(pkt->fromCache(), blk);
1100
1101 return true;
1102 }
1103
1104 // Can't satisfy access normally... either no block (blk == nullptr)
1105 // or have block but need writable
1106
1107 incMissCount(pkt);
1108
1109 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1110 // complete miss on store conditional... just give up now
1111 pkt->req->setExtraData(0);
1112 return true;
1113 }
1114
1115 return false;
1116}
1117
1118void
1119BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1120{
1121 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1122 clusivity == Enums::mostly_excl) {
1123 // if we have responded to a cache, and our block is still
1124 // valid, but not dirty, and this cache is mostly exclusive
1125 // with respect to the cache above, drop the block
1126 invalidateBlock(blk);
1127 }
1128}
1129
1130CacheBlk*
1131BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1132 bool allocate)
1133{
1134 assert(pkt->isResponse());
1135 Addr addr = pkt->getAddr();
1136 bool is_secure = pkt->isSecure();
1137#if TRACING_ON
1138 CacheBlk::State old_state = blk ? blk->status : 0;
1139#endif
1140
1141 // When handling a fill, we should have no writes to this line.
1142 assert(addr == pkt->getBlockAddr(blkSize));
1143 assert(!writeBuffer.findMatch(addr, is_secure));
1144
1145 if (!blk) {
1146 // better have read new data...
1147 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1148
1149 // need to do a replacement if allocating, otherwise we stick
1150 // with the temporary storage
1151 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1152
1153 if (!blk) {
1154 // No replaceable block or a mostly exclusive
1155 // cache... just use temporary storage to complete the
1156 // current request and then get rid of it
1157 blk = tempBlock;
1158 tempBlock->insert(addr, is_secure);
1159 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1160 is_secure ? "s" : "ns");
1161 }
1162 } else {
1163 // existing block... probably an upgrade
1164 // don't clear block status... if block is already dirty we
1165 // don't want to lose that
1166 }
1167
1168 // Block is guaranteed to be valid at this point
1169 assert(blk->isValid());
1170 assert(blk->isSecure() == is_secure);
1171 assert(regenerateBlkAddr(blk) == addr);
1172
1173 blk->status |= BlkReadable;
1174
1175 // sanity check for whole-line writes, which should always be
1176 // marked as writable as part of the fill, and then later marked
1177 // dirty as part of satisfyRequest
1178 if (pkt->cmd == MemCmd::InvalidateResp) {
1179 assert(!pkt->hasSharers());
1180 }
1181
1182 // here we deal with setting the appropriate state of the line,
1183 // and we start by looking at the hasSharers flag, and ignore the
1184 // cacheResponding flag (normally signalling dirty data) if the
1185 // packet has sharers, thus the line is never allocated as Owned
1186 // (dirty but not writable), and always ends up being either
1187 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1188 // for more details
1189 if (!pkt->hasSharers()) {
1190 // we could get a writable line from memory (rather than a
1191 // cache) even in a read-only cache, note that we set this bit
1192 // even for a read-only cache, possibly revisit this decision
1193 blk->status |= BlkWritable;
1194
1195 // check if we got this via cache-to-cache transfer (i.e., from a
1196 // cache that had the block in Modified or Owned state)
1197 if (pkt->cacheResponding()) {
1198 // we got the block in Modified state, and invalidated the
1199 // owners copy
1200 blk->status |= BlkDirty;
1201
1202 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1203 "in read-only cache %s\n", name());
1204 }
1205 }
1206
1207 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1208 addr, is_secure ? "s" : "ns", old_state, blk->print());
1209
1210 // if we got new data, copy it in (checking for a read response
1211 // and a response that has data is the same in the end)
1212 if (pkt->isRead()) {
1213 // sanity checks
1214 assert(pkt->hasData());
1215 assert(pkt->getSize() == blkSize);
1216
1217 pkt->writeDataToBlock(blk->data, blkSize);
1218 }
1219 // We pay for fillLatency here.
1220 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay);
1221
1222 return blk;
1223}
1224
1225CacheBlk*
1226BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1227{
1228 // Get address
1229 const Addr addr = pkt->getAddr();
1230
1231 // Get secure bit
1232 const bool is_secure = pkt->isSecure();
1233
1234 // Find replacement victim
1235 std::vector<CacheBlk*> evict_blks;
1236 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1237
1238 // It is valid to return nullptr if there is no victim
1239 if (!victim)
1240 return nullptr;
1241
1242 // Print victim block's information
1243 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1244
1245 // Check for transient state allocations. If any of the entries listed
1246 // for eviction has a transient state, the allocation fails
1247 for (const auto& blk : evict_blks) {
1248 if (blk->isValid()) {
1249 Addr repl_addr = regenerateBlkAddr(blk);
1250 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1251 if (repl_mshr) {
1252 // must be an outstanding upgrade or clean request
1253 // on a block we're about to replace...
1254 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1255 repl_mshr->isCleaning());
1256
1257 // too hard to replace block with transient state
1258 // allocation failed, block not inserted
1259 return nullptr;
1260 }
1261 }
1262 }
1263
1264 // The victim will be replaced by a new entry, so increase the replacement
1265 // counter if a valid block is being replaced
1266 if (victim->isValid()) {
1267 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1268 "(%s): %s\n", regenerateBlkAddr(victim),
1269 victim->isSecure() ? "s" : "ns",
1270 addr, is_secure ? "s" : "ns",
1271 victim->isDirty() ? "writeback" : "clean");
1272
1273 replacements++;
1274 }
1275
1276 // Evict valid blocks associated to this victim block
1277 for (const auto& blk : evict_blks) {
1278 if (blk->isValid()) {
1279 if (blk->wasPrefetched()) {
1280 unusedPrefetches++;
1281 }
1282
1283 evictBlock(blk, writebacks);
1284 }
1285 }
1286
1287 // Insert new block at victimized entry
1288 tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1289 pkt->req->taskId(), victim);
1290
1291 return victim;
1292}
1293
1294void
1295BaseCache::invalidateBlock(CacheBlk *blk)
1296{
1297 // If handling a block present in the Tags, let it do its invalidation
1298 // process, which will update stats and invalidate the block itself
1299 if (blk != tempBlock) {
1300 tags->invalidate(blk);
1301 } else {
1302 tempBlock->invalidate();
1303 }
1304}
1305
1306void
1307BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1308{
1309 PacketPtr pkt = evictBlock(blk);
1310 if (pkt) {
1311 writebacks.push_back(pkt);
1312 }
1313}
1314
1315PacketPtr
1316BaseCache::writebackBlk(CacheBlk *blk)
1317{
1318 chatty_assert(!isReadOnly || writebackClean,
1319 "Writeback from read-only cache");
1320 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1321
1322 writebacks[Request::wbMasterId]++;
1323
1324 RequestPtr req = std::make_shared<Request>(
1325 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1326
1327 if (blk->isSecure())
1328 req->setFlags(Request::SECURE);
1329
1330 req->taskId(blk->task_id);
1331
1332 PacketPtr pkt =
1333 new Packet(req, blk->isDirty() ?
1334 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1335
1336 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1337 pkt->print(), blk->isWritable(), blk->isDirty());
1338
1339 if (blk->isWritable()) {
1340 // not asserting shared means we pass the block in modified
1341 // state, mark our own block non-writeable
1342 blk->status &= ~BlkWritable;
1343 } else {
1344 // we are in the Owned state, tell the receiver
1345 pkt->setHasSharers();
1346 }
1347
1348 // make sure the block is not marked dirty
1349 blk->status &= ~BlkDirty;
1350
1351 pkt->allocate();
1352 pkt->setDataFromBlock(blk->data, blkSize);
1353
1354 return pkt;
1355}
1356
1357PacketPtr
1358BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1359{
1360 RequestPtr req = std::make_shared<Request>(
1361 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1362
1363 if (blk->isSecure()) {
1364 req->setFlags(Request::SECURE);
1365 }
1366 req->taskId(blk->task_id);
1367
1368 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1369
1370 if (dest) {
1371 req->setFlags(dest);
1372 pkt->setWriteThrough();
1373 }
1374
1375 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1376 blk->isWritable(), blk->isDirty());
1377
1378 if (blk->isWritable()) {
1379 // not asserting shared means we pass the block in modified
1380 // state, mark our own block non-writeable
1381 blk->status &= ~BlkWritable;
1382 } else {
1383 // we are in the Owned state, tell the receiver
1384 pkt->setHasSharers();
1385 }
1386
1387 // make sure the block is not marked dirty
1388 blk->status &= ~BlkDirty;
1389
1390 pkt->allocate();
1391 pkt->setDataFromBlock(blk->data, blkSize);
1392
1393 return pkt;
1394}
1395
1396
1397void
1398BaseCache::memWriteback()
1399{
1400 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1401}
1402
1403void
1404BaseCache::memInvalidate()
1405{
1406 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1407}
1408
1409bool
1410BaseCache::isDirty() const
1411{
1412 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1413}
1414
1415bool
1416BaseCache::coalesce() const
1417{
1418 return writeAllocator && writeAllocator->coalesce();
1419}
1420
1421void
1422BaseCache::writebackVisitor(CacheBlk &blk)
1423{
1424 if (blk.isDirty()) {
1425 assert(blk.isValid());
1426
1427 RequestPtr request = std::make_shared<Request>(
1428 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1429
1430 request->taskId(blk.task_id);
1431 if (blk.isSecure()) {
1432 request->setFlags(Request::SECURE);
1433 }
1434
1435 Packet packet(request, MemCmd::WriteReq);
1436 packet.dataStatic(blk.data);
1437
1438 memSidePort.sendFunctional(&packet);
1439
1440 blk.status &= ~BlkDirty;
1441 }
1442}
1443
1444void
1445BaseCache::invalidateVisitor(CacheBlk &blk)
1446{
1447 if (blk.isDirty())
1448 warn_once("Invalidating dirty cache lines. " \
1449 "Expect things to break.\n");
1450
1451 if (blk.isValid()) {
1452 assert(!blk.isDirty());
1453 invalidateBlock(&blk);
1454 }
1455}
1456
1457Tick
1458BaseCache::nextQueueReadyTime() const
1459{
1460 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1461 writeBuffer.nextReadyTime());
1462
1463 // Don't signal prefetch ready time if no MSHRs available
1464 // Will signal once enoguh MSHRs are deallocated
1465 if (prefetcher && mshrQueue.canPrefetch()) {
1466 nextReady = std::min(nextReady,
1467 prefetcher->nextPrefetchReadyTime());
1468 }
1469
1470 return nextReady;
1471}
1472
1473
1474bool
1475BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1476{
1477 assert(mshr);
1478
1479 // use request from 1st target
1480 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1481
1482 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1483
1484 // if the cache is in write coalescing mode or (additionally) in
1485 // no allocation mode, and we have a write packet with an MSHR
1486 // that is not a whole-line write (due to incompatible flags etc),
1487 // then reset the write mode
1488 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1489 if (!mshr->isWholeLineWrite()) {
1490 // if we are currently write coalescing, hold on the
1491 // MSHR as many cycles extra as we need to completely
1492 // write a cache line
1493 if (writeAllocator->delay(mshr->blkAddr)) {
1494 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1495 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1496 "for write coalescing\n", tgt_pkt->print(), delay);
1497 mshrQueue.delay(mshr, delay);
1498 return false;
1499 } else {
1500 writeAllocator->reset();
1501 }
1502 } else {
1503 writeAllocator->resetDelay(mshr->blkAddr);
1504 }
1505 }
1506
1507 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1508
1509 // either a prefetch that is not present upstream, or a normal
1510 // MSHR request, proceed to get the packet to send downstream
1511 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1512 mshr->isWholeLineWrite());
1513
1514 mshr->isForward = (pkt == nullptr);
1515
1516 if (mshr->isForward) {
1517 // not a cache block request, but a response is expected
1518 // make copy of current packet to forward, keep current
1519 // copy for response handling
1520 pkt = new Packet(tgt_pkt, false, true);
1521 assert(!pkt->isWrite());
1522 }
1523
1524 // play it safe and append (rather than set) the sender state,
1525 // as forwarded packets may already have existing state
1526 pkt->pushSenderState(mshr);
1527
1528 if (pkt->isClean() && blk && blk->isDirty()) {
1529 // A cache clean opearation is looking for a dirty block. Mark
1530 // the packet so that the destination xbar can determine that
1531 // there will be a follow-up write packet as well.
1532 pkt->setSatisfied();
1533 }
1534
1535 if (!memSidePort.sendTimingReq(pkt)) {
1536 // we are awaiting a retry, but we
1537 // delete the packet and will be creating a new packet
1538 // when we get the opportunity
1539 delete pkt;
1540
1541 // note that we have now masked any requestBus and
1542 // schedSendEvent (we will wait for a retry before
1543 // doing anything), and this is so even if we do not
1544 // care about this packet and might override it before
1545 // it gets retried
1546 return true;
1547 } else {
1548 // As part of the call to sendTimingReq the packet is
1549 // forwarded to all neighbouring caches (and any caches
1550 // above them) as a snoop. Thus at this point we know if
1551 // any of the neighbouring caches are responding, and if
1552 // so, we know it is dirty, and we can determine if it is
1553 // being passed as Modified, making our MSHR the ordering
1554 // point
1555 bool pending_modified_resp = !pkt->hasSharers() &&
1556 pkt->cacheResponding();
1557 markInService(mshr, pending_modified_resp);
1558
1559 if (pkt->isClean() && blk && blk->isDirty()) {
1560 // A cache clean opearation is looking for a dirty
1561 // block. If a dirty block is encountered a WriteClean
1562 // will update any copies to the path to the memory
1563 // until the point of reference.
1564 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1565 __func__, pkt->print(), blk->print());
1566 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1567 pkt->id);
1568 PacketList writebacks;
1569 writebacks.push_back(wb_pkt);
1570 doWritebacks(writebacks, 0);
1571 }
1572
1573 return false;
1574 }
1575}
1576
1577bool
1578BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1579{
1580 assert(wq_entry);
1581
1582 // always a single target for write queue entries
1583 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1584
1585 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1586
1587 // forward as is, both for evictions and uncacheable writes
1588 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1589 // note that we have now masked any requestBus and
1590 // schedSendEvent (we will wait for a retry before
1591 // doing anything), and this is so even if we do not
1592 // care about this packet and might override it before
1593 // it gets retried
1594 return true;
1595 } else {
1596 markInService(wq_entry);
1597 return false;
1598 }
1599}
1600
1601void
1602BaseCache::serialize(CheckpointOut &cp) const
1603{
1604 bool dirty(isDirty());
1605
1606 if (dirty) {
1607 warn("*** The cache still contains dirty data. ***\n");
1608 warn(" Make sure to drain the system using the correct flags.\n");
1609 warn(" This checkpoint will not restore correctly " \
1610 "and dirty data in the cache will be lost!\n");
1611 }
1612
1613 // Since we don't checkpoint the data in the cache, any dirty data
1614 // will be lost when restoring from a checkpoint of a system that
1615 // wasn't drained properly. Flag the checkpoint as invalid if the
1616 // cache contains dirty data.
1617 bool bad_checkpoint(dirty);
1618 SERIALIZE_SCALAR(bad_checkpoint);
1619}
1620
1621void
1622BaseCache::unserialize(CheckpointIn &cp)
1623{
1624 bool bad_checkpoint;
1625 UNSERIALIZE_SCALAR(bad_checkpoint);
1626 if (bad_checkpoint) {
1627 fatal("Restoring from checkpoints with dirty caches is not "
1628 "supported in the classic memory system. Please remove any "
1629 "caches or drain them properly before taking checkpoints.\n");
1630 }
1631}
1632
1633void
1634BaseCache::regStats()
1635{
1636 MemObject::regStats();
1637
1638 using namespace Stats;
1639
1640 // Hit statistics
1641 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1642 MemCmd cmd(access_idx);
1643 const string &cstr = cmd.toString();
1644
1645 hits[access_idx]
1646 .init(system->maxMasters())
1647 .name(name() + "." + cstr + "_hits")
1648 .desc("number of " + cstr + " hits")
1649 .flags(total | nozero | nonan)
1650 ;
1651 for (int i = 0; i < system->maxMasters(); i++) {
1652 hits[access_idx].subname(i, system->getMasterName(i));
1653 }
1654 }
1655
1656// These macros make it easier to sum the right subset of commands and
1657// to change the subset of commands that are considered "demand" vs
1658// "non-demand"
1659#define SUM_DEMAND(s) \
1660 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1661 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1662
1663// should writebacks be included here? prior code was inconsistent...
1664#define SUM_NON_DEMAND(s) \
1665 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1666
1667 demandHits
1668 .name(name() + ".demand_hits")
1669 .desc("number of demand (read+write) hits")
1670 .flags(total | nozero | nonan)
1671 ;
1672 demandHits = SUM_DEMAND(hits);
1673 for (int i = 0; i < system->maxMasters(); i++) {
1674 demandHits.subname(i, system->getMasterName(i));
1675 }
1676
1677 overallHits
1678 .name(name() + ".overall_hits")
1679 .desc("number of overall hits")
1680 .flags(total | nozero | nonan)
1681 ;
1682 overallHits = demandHits + SUM_NON_DEMAND(hits);
1683 for (int i = 0; i < system->maxMasters(); i++) {
1684 overallHits.subname(i, system->getMasterName(i));
1685 }
1686
1687 // Miss statistics
1688 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1689 MemCmd cmd(access_idx);
1690 const string &cstr = cmd.toString();
1691
1692 misses[access_idx]
1693 .init(system->maxMasters())
1694 .name(name() + "." + cstr + "_misses")
1695 .desc("number of " + cstr + " misses")
1696 .flags(total | nozero | nonan)
1697 ;
1698 for (int i = 0; i < system->maxMasters(); i++) {
1699 misses[access_idx].subname(i, system->getMasterName(i));
1700 }
1701 }
1702
1703 demandMisses
1704 .name(name() + ".demand_misses")
1705 .desc("number of demand (read+write) misses")
1706 .flags(total | nozero | nonan)
1707 ;
1708 demandMisses = SUM_DEMAND(misses);
1709 for (int i = 0; i < system->maxMasters(); i++) {
1710 demandMisses.subname(i, system->getMasterName(i));
1711 }
1712
1713 overallMisses
1714 .name(name() + ".overall_misses")
1715 .desc("number of overall misses")
1716 .flags(total | nozero | nonan)
1717 ;
1718 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1719 for (int i = 0; i < system->maxMasters(); i++) {
1720 overallMisses.subname(i, system->getMasterName(i));
1721 }
1722
1723 // Miss latency statistics
1724 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1725 MemCmd cmd(access_idx);
1726 const string &cstr = cmd.toString();
1727
1728 missLatency[access_idx]
1729 .init(system->maxMasters())
1730 .name(name() + "." + cstr + "_miss_latency")
1731 .desc("number of " + cstr + " miss cycles")
1732 .flags(total | nozero | nonan)
1733 ;
1734 for (int i = 0; i < system->maxMasters(); i++) {
1735 missLatency[access_idx].subname(i, system->getMasterName(i));
1736 }
1737 }
1738
1739 demandMissLatency
1740 .name(name() + ".demand_miss_latency")
1741 .desc("number of demand (read+write) miss cycles")
1742 .flags(total | nozero | nonan)
1743 ;
1744 demandMissLatency = SUM_DEMAND(missLatency);
1745 for (int i = 0; i < system->maxMasters(); i++) {
1746 demandMissLatency.subname(i, system->getMasterName(i));
1747 }
1748
1749 overallMissLatency
1750 .name(name() + ".overall_miss_latency")
1751 .desc("number of overall miss cycles")
1752 .flags(total | nozero | nonan)
1753 ;
1754 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1755 for (int i = 0; i < system->maxMasters(); i++) {
1756 overallMissLatency.subname(i, system->getMasterName(i));
1757 }
1758
1759 // access formulas
1760 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1761 MemCmd cmd(access_idx);
1762 const string &cstr = cmd.toString();
1763
1764 accesses[access_idx]
1765 .name(name() + "." + cstr + "_accesses")
1766 .desc("number of " + cstr + " accesses(hits+misses)")
1767 .flags(total | nozero | nonan)
1768 ;
1769 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1770
1771 for (int i = 0; i < system->maxMasters(); i++) {
1772 accesses[access_idx].subname(i, system->getMasterName(i));
1773 }
1774 }
1775
1776 demandAccesses
1777 .name(name() + ".demand_accesses")
1778 .desc("number of demand (read+write) accesses")
1779 .flags(total | nozero | nonan)
1780 ;
1781 demandAccesses = demandHits + demandMisses;
1782 for (int i = 0; i < system->maxMasters(); i++) {
1783 demandAccesses.subname(i, system->getMasterName(i));
1784 }
1785
1786 overallAccesses
1787 .name(name() + ".overall_accesses")
1788 .desc("number of overall (read+write) accesses")
1789 .flags(total | nozero | nonan)
1790 ;
1791 overallAccesses = overallHits + overallMisses;
1792 for (int i = 0; i < system->maxMasters(); i++) {
1793 overallAccesses.subname(i, system->getMasterName(i));
1794 }
1795
1796 // miss rate formulas
1797 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1798 MemCmd cmd(access_idx);
1799 const string &cstr = cmd.toString();
1800
1801 missRate[access_idx]
1802 .name(name() + "." + cstr + "_miss_rate")
1803 .desc("miss rate for " + cstr + " accesses")
1804 .flags(total | nozero | nonan)
1805 ;
1806 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1807
1808 for (int i = 0; i < system->maxMasters(); i++) {
1809 missRate[access_idx].subname(i, system->getMasterName(i));
1810 }
1811 }
1812
1813 demandMissRate
1814 .name(name() + ".demand_miss_rate")
1815 .desc("miss rate for demand accesses")
1816 .flags(total | nozero | nonan)
1817 ;
1818 demandMissRate = demandMisses / demandAccesses;
1819 for (int i = 0; i < system->maxMasters(); i++) {
1820 demandMissRate.subname(i, system->getMasterName(i));
1821 }
1822
1823 overallMissRate
1824 .name(name() + ".overall_miss_rate")
1825 .desc("miss rate for overall accesses")
1826 .flags(total | nozero | nonan)
1827 ;
1828 overallMissRate = overallMisses / overallAccesses;
1829 for (int i = 0; i < system->maxMasters(); i++) {
1830 overallMissRate.subname(i, system->getMasterName(i));
1831 }
1832
1833 // miss latency formulas
1834 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1835 MemCmd cmd(access_idx);
1836 const string &cstr = cmd.toString();
1837
1838 avgMissLatency[access_idx]
1839 .name(name() + "." + cstr + "_avg_miss_latency")
1840 .desc("average " + cstr + " miss latency")
1841 .flags(total | nozero | nonan)
1842 ;
1843 avgMissLatency[access_idx] =
1844 missLatency[access_idx] / misses[access_idx];
1845
1846 for (int i = 0; i < system->maxMasters(); i++) {
1847 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1848 }
1849 }
1850
1851 demandAvgMissLatency
1852 .name(name() + ".demand_avg_miss_latency")
1853 .desc("average overall miss latency")
1854 .flags(total | nozero | nonan)
1855 ;
1856 demandAvgMissLatency = demandMissLatency / demandMisses;
1857 for (int i = 0; i < system->maxMasters(); i++) {
1858 demandAvgMissLatency.subname(i, system->getMasterName(i));
1859 }
1860
1861 overallAvgMissLatency
1862 .name(name() + ".overall_avg_miss_latency")
1863 .desc("average overall miss latency")
1864 .flags(total | nozero | nonan)
1865 ;
1866 overallAvgMissLatency = overallMissLatency / overallMisses;
1867 for (int i = 0; i < system->maxMasters(); i++) {
1868 overallAvgMissLatency.subname(i, system->getMasterName(i));
1869 }
1870
1871 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1872 blocked_cycles
1873 .name(name() + ".blocked_cycles")
1874 .desc("number of cycles access was blocked")
1875 .subname(Blocked_NoMSHRs, "no_mshrs")
1876 .subname(Blocked_NoTargets, "no_targets")
1877 ;
1878
1879
1880 blocked_causes.init(NUM_BLOCKED_CAUSES);
1881 blocked_causes
1882 .name(name() + ".blocked")
1883 .desc("number of cycles access was blocked")
1884 .subname(Blocked_NoMSHRs, "no_mshrs")
1885 .subname(Blocked_NoTargets, "no_targets")
1886 ;
1887
1888 avg_blocked
1889 .name(name() + ".avg_blocked_cycles")
1890 .desc("average number of cycles each access was blocked")
1891 .subname(Blocked_NoMSHRs, "no_mshrs")
1892 .subname(Blocked_NoTargets, "no_targets")
1893 ;
1894
1895 avg_blocked = blocked_cycles / blocked_causes;
1896
1897 unusedPrefetches
1898 .name(name() + ".unused_prefetches")
1899 .desc("number of HardPF blocks evicted w/o reference")
1900 .flags(nozero)
1901 ;
1902
1903 writebacks
1904 .init(system->maxMasters())
1905 .name(name() + ".writebacks")
1906 .desc("number of writebacks")
1907 .flags(total | nozero | nonan)
1908 ;
1909 for (int i = 0; i < system->maxMasters(); i++) {
1910 writebacks.subname(i, system->getMasterName(i));
1911 }
1912
1913 // MSHR statistics
1914 // MSHR hit statistics
1915 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1916 MemCmd cmd(access_idx);
1917 const string &cstr = cmd.toString();
1918
1919 mshr_hits[access_idx]
1920 .init(system->maxMasters())
1921 .name(name() + "." + cstr + "_mshr_hits")
1922 .desc("number of " + cstr + " MSHR hits")
1923 .flags(total | nozero | nonan)
1924 ;
1925 for (int i = 0; i < system->maxMasters(); i++) {
1926 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1927 }
1928 }
1929
1930 demandMshrHits
1931 .name(name() + ".demand_mshr_hits")
1932 .desc("number of demand (read+write) MSHR hits")
1933 .flags(total | nozero | nonan)
1934 ;
1935 demandMshrHits = SUM_DEMAND(mshr_hits);
1936 for (int i = 0; i < system->maxMasters(); i++) {
1937 demandMshrHits.subname(i, system->getMasterName(i));
1938 }
1939
1940 overallMshrHits
1941 .name(name() + ".overall_mshr_hits")
1942 .desc("number of overall MSHR hits")
1943 .flags(total | nozero | nonan)
1944 ;
1945 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1946 for (int i = 0; i < system->maxMasters(); i++) {
1947 overallMshrHits.subname(i, system->getMasterName(i));
1948 }
1949
1950 // MSHR miss statistics
1951 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1952 MemCmd cmd(access_idx);
1953 const string &cstr = cmd.toString();
1954
1955 mshr_misses[access_idx]
1956 .init(system->maxMasters())
1957 .name(name() + "." + cstr + "_mshr_misses")
1958 .desc("number of " + cstr + " MSHR misses")
1959 .flags(total | nozero | nonan)
1960 ;
1961 for (int i = 0; i < system->maxMasters(); i++) {
1962 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1963 }
1964 }
1965
1966 demandMshrMisses
1967 .name(name() + ".demand_mshr_misses")
1968 .desc("number of demand (read+write) MSHR misses")
1969 .flags(total | nozero | nonan)
1970 ;
1971 demandMshrMisses = SUM_DEMAND(mshr_misses);
1972 for (int i = 0; i < system->maxMasters(); i++) {
1973 demandMshrMisses.subname(i, system->getMasterName(i));
1974 }
1975
1976 overallMshrMisses
1977 .name(name() + ".overall_mshr_misses")
1978 .desc("number of overall MSHR misses")
1979 .flags(total | nozero | nonan)
1980 ;
1981 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1982 for (int i = 0; i < system->maxMasters(); i++) {
1983 overallMshrMisses.subname(i, system->getMasterName(i));
1984 }
1985
1986 // MSHR miss latency statistics
1987 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1988 MemCmd cmd(access_idx);
1989 const string &cstr = cmd.toString();
1990
1991 mshr_miss_latency[access_idx]
1992 .init(system->maxMasters())
1993 .name(name() + "." + cstr + "_mshr_miss_latency")
1994 .desc("number of " + cstr + " MSHR miss cycles")
1995 .flags(total | nozero | nonan)
1996 ;
1997 for (int i = 0; i < system->maxMasters(); i++) {
1998 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
1999 }
2000 }
2001
2002 demandMshrMissLatency
2003 .name(name() + ".demand_mshr_miss_latency")
2004 .desc("number of demand (read+write) MSHR miss cycles")
2005 .flags(total | nozero | nonan)
2006 ;
2007 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2008 for (int i = 0; i < system->maxMasters(); i++) {
2009 demandMshrMissLatency.subname(i, system->getMasterName(i));
2010 }
2011
2012 overallMshrMissLatency
2013 .name(name() + ".overall_mshr_miss_latency")
2014 .desc("number of overall MSHR miss cycles")
2015 .flags(total | nozero | nonan)
2016 ;
2017 overallMshrMissLatency =
2018 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2019 for (int i = 0; i < system->maxMasters(); i++) {
2020 overallMshrMissLatency.subname(i, system->getMasterName(i));
2021 }
2022
2023 // MSHR uncacheable statistics
2024 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2025 MemCmd cmd(access_idx);
2026 const string &cstr = cmd.toString();
2027
2028 mshr_uncacheable[access_idx]
2029 .init(system->maxMasters())
2030 .name(name() + "." + cstr + "_mshr_uncacheable")
2031 .desc("number of " + cstr + " MSHR uncacheable")
2032 .flags(total | nozero | nonan)
2033 ;
2034 for (int i = 0; i < system->maxMasters(); i++) {
2035 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2036 }
2037 }
2038
2039 overallMshrUncacheable
2040 .name(name() + ".overall_mshr_uncacheable_misses")
2041 .desc("number of overall MSHR uncacheable misses")
2042 .flags(total | nozero | nonan)
2043 ;
2044 overallMshrUncacheable =
2045 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2046 for (int i = 0; i < system->maxMasters(); i++) {
2047 overallMshrUncacheable.subname(i, system->getMasterName(i));
2048 }
2049
2050 // MSHR miss latency statistics
2051 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2052 MemCmd cmd(access_idx);
2053 const string &cstr = cmd.toString();
2054
2055 mshr_uncacheable_lat[access_idx]
2056 .init(system->maxMasters())
2057 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2058 .desc("number of " + cstr + " MSHR uncacheable cycles")
2059 .flags(total | nozero | nonan)
2060 ;
2061 for (int i = 0; i < system->maxMasters(); i++) {
2062 mshr_uncacheable_lat[access_idx].subname(
2063 i, system->getMasterName(i));
2064 }
2065 }
2066
2067 overallMshrUncacheableLatency
2068 .name(name() + ".overall_mshr_uncacheable_latency")
2069 .desc("number of overall MSHR uncacheable cycles")
2070 .flags(total | nozero | nonan)
2071 ;
2072 overallMshrUncacheableLatency =
2073 SUM_DEMAND(mshr_uncacheable_lat) +
2074 SUM_NON_DEMAND(mshr_uncacheable_lat);
2075 for (int i = 0; i < system->maxMasters(); i++) {
2076 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2077 }
2078
2079#if 0
2080 // MSHR access formulas
2081 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2082 MemCmd cmd(access_idx);
2083 const string &cstr = cmd.toString();
2084
2085 mshrAccesses[access_idx]
2086 .name(name() + "." + cstr + "_mshr_accesses")
2087 .desc("number of " + cstr + " mshr accesses(hits+misses)")
2088 .flags(total | nozero | nonan)
2089 ;
2090 mshrAccesses[access_idx] =
2091 mshr_hits[access_idx] + mshr_misses[access_idx]
2092 + mshr_uncacheable[access_idx];
2093 }
2094
2095 demandMshrAccesses
2096 .name(name() + ".demand_mshr_accesses")
2097 .desc("number of demand (read+write) mshr accesses")
2098 .flags(total | nozero | nonan)
2099 ;
2100 demandMshrAccesses = demandMshrHits + demandMshrMisses;
2101
2102 overallMshrAccesses
2103 .name(name() + ".overall_mshr_accesses")
2104 .desc("number of overall (read+write) mshr accesses")
2105 .flags(total | nozero | nonan)
2106 ;
2107 overallMshrAccesses = overallMshrHits + overallMshrMisses
2108 + overallMshrUncacheable;
2109#endif
2110
2111 // MSHR miss rate formulas
2112 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2113 MemCmd cmd(access_idx);
2114 const string &cstr = cmd.toString();
2115
2116 mshrMissRate[access_idx]
2117 .name(name() + "." + cstr + "_mshr_miss_rate")
2118 .desc("mshr miss rate for " + cstr + " accesses")
2119 .flags(total | nozero | nonan)
2120 ;
2121 mshrMissRate[access_idx] =
2122 mshr_misses[access_idx] / accesses[access_idx];
2123
2124 for (int i = 0; i < system->maxMasters(); i++) {
2125 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2126 }
2127 }
2128
2129 demandMshrMissRate
2130 .name(name() + ".demand_mshr_miss_rate")
2131 .desc("mshr miss rate for demand accesses")
2132 .flags(total | nozero | nonan)
2133 ;
2134 demandMshrMissRate = demandMshrMisses / demandAccesses;
2135 for (int i = 0; i < system->maxMasters(); i++) {
2136 demandMshrMissRate.subname(i, system->getMasterName(i));
2137 }
2138
2139 overallMshrMissRate
2140 .name(name() + ".overall_mshr_miss_rate")
2141 .desc("mshr miss rate for overall accesses")
2142 .flags(total | nozero | nonan)
2143 ;
2144 overallMshrMissRate = overallMshrMisses / overallAccesses;
2145 for (int i = 0; i < system->maxMasters(); i++) {
2146 overallMshrMissRate.subname(i, system->getMasterName(i));
2147 }
2148
2149 // mshrMiss latency formulas
2150 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2151 MemCmd cmd(access_idx);
2152 const string &cstr = cmd.toString();
2153
2154 avgMshrMissLatency[access_idx]
2155 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2156 .desc("average " + cstr + " mshr miss latency")
2157 .flags(total | nozero | nonan)
2158 ;
2159 avgMshrMissLatency[access_idx] =
2160 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2161
2162 for (int i = 0; i < system->maxMasters(); i++) {
2163 avgMshrMissLatency[access_idx].subname(
2164 i, system->getMasterName(i));
2165 }
2166 }
2167
2168 demandAvgMshrMissLatency
2169 .name(name() + ".demand_avg_mshr_miss_latency")
2170 .desc("average overall mshr miss latency")
2171 .flags(total | nozero | nonan)
2172 ;
2173 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2174 for (int i = 0; i < system->maxMasters(); i++) {
2175 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2176 }
2177
2178 overallAvgMshrMissLatency
2179 .name(name() + ".overall_avg_mshr_miss_latency")
2180 .desc("average overall mshr miss latency")
2181 .flags(total | nozero | nonan)
2182 ;
2183 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2184 for (int i = 0; i < system->maxMasters(); i++) {
2185 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2186 }
2187
2188 // mshrUncacheable latency formulas
2189 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2190 MemCmd cmd(access_idx);
2191 const string &cstr = cmd.toString();
2192
2193 avgMshrUncacheableLatency[access_idx]
2194 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2195 .desc("average " + cstr + " mshr uncacheable latency")
2196 .flags(total | nozero | nonan)
2197 ;
2198 avgMshrUncacheableLatency[access_idx] =
2199 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2200
2201 for (int i = 0; i < system->maxMasters(); i++) {
2202 avgMshrUncacheableLatency[access_idx].subname(
2203 i, system->getMasterName(i));
2204 }
2205 }
2206
2207 overallAvgMshrUncacheableLatency
2208 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2209 .desc("average overall mshr uncacheable latency")
2210 .flags(total | nozero | nonan)
2211 ;
2212 overallAvgMshrUncacheableLatency =
2213 overallMshrUncacheableLatency / overallMshrUncacheable;
2214 for (int i = 0; i < system->maxMasters(); i++) {
2215 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2216 }
2217
2218 replacements
2219 .name(name() + ".replacements")
2220 .desc("number of replacements")
2221 ;
2222}
2223
2224void
2225BaseCache::regProbePoints()
2226{
2227 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2228 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2229 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2230}
2231
2232///////////////
2233//
2234// CpuSidePort
2235//
2236///////////////
2237bool
2238BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2239{
2240 // Snoops shouldn't happen when bypassing caches
2241 assert(!cache->system->bypassCaches());
2242
2243 assert(pkt->isResponse());
2244
2245 // Express snoop responses from master to slave, e.g., from L1 to L2
2246 cache->recvTimingSnoopResp(pkt);
2247 return true;
2248}
2249
2250
2251bool
2252BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2253{
2254 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2255 // always let express snoop packets through even if blocked
2256 return true;
2257 } else if (blocked || mustSendRetry) {
2258 // either already committed to send a retry, or blocked
2259 mustSendRetry = true;
2260 return false;
2261 }
2262 mustSendRetry = false;
2263 return true;
2264}
2265
2266bool
2267BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2268{
2269 assert(pkt->isRequest());
2270
2271 if (cache->system->bypassCaches()) {
2272 // Just forward the packet if caches are disabled.
2273 // @todo This should really enqueue the packet rather
2274 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2275 assert(success);
2276 return true;
2277 } else if (tryTiming(pkt)) {
2278 cache->recvTimingReq(pkt);
2279 return true;
2280 }
2281 return false;
2282}
2283
2284Tick
2285BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2286{
2287 if (cache->system->bypassCaches()) {
2288 // Forward the request if the system is in cache bypass mode.
2289 return cache->memSidePort.sendAtomic(pkt);
2290 } else {
2291 return cache->recvAtomic(pkt);
2292 }
2293}
2294
2295void
2296BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2297{
2298 if (cache->system->bypassCaches()) {
2299 // The cache should be flushed if we are in cache bypass mode,
2300 // so we don't need to check if we need to update anything.
2301 cache->memSidePort.sendFunctional(pkt);
2302 return;
2303 }
2304
2305 // functional request
2306 cache->functionalAccess(pkt, true);
2307}
2308
2309AddrRangeList
2310BaseCache::CpuSidePort::getAddrRanges() const
2311{
2312 return cache->getAddrRanges();
2313}
2314
2315
2316BaseCache::
2317CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2318 const std::string &_label)
2319 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2320{
2321}
2322
2323///////////////
2324//
2325// MemSidePort
2326//
2327///////////////
2328bool
2329BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2330{
2331 cache->recvTimingResp(pkt);
2332 return true;
2333}
2334
2335// Express snooping requests to memside port
2336void
2337BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2338{
2339 // Snoops shouldn't happen when bypassing caches
2340 assert(!cache->system->bypassCaches());
2341
2342 // handle snooping requests
2343 cache->recvTimingSnoopReq(pkt);
2344}
2345
2346Tick
2347BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2348{
2349 // Snoops shouldn't happen when bypassing caches
2350 assert(!cache->system->bypassCaches());
2351
2352 return cache->recvAtomicSnoop(pkt);
2353}
2354
2355void
2356BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2357{
2358 // Snoops shouldn't happen when bypassing caches
2359 assert(!cache->system->bypassCaches());
2360
2361 // functional snoop (note that in contrast to atomic we don't have
2362 // a specific functionalSnoop method, as they have the same
2363 // behaviour regardless)
2364 cache->functionalAccess(pkt, false);
2365}
2366
2367void
2368BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2369{
2370 // sanity check
2371 assert(!waitingOnRetry);
2372
2373 // there should never be any deferred request packets in the
2374 // queue, instead we resly on the cache to provide the packets
2375 // from the MSHR queue or write queue
2376 assert(deferredPacketReadyTime() == MaxTick);
2377
2378 // check for request packets (requests & writebacks)
2379 QueueEntry* entry = cache.getNextQueueEntry();
2380
2381 if (!entry) {
2382 // can happen if e.g. we attempt a writeback and fail, but
2383 // before the retry, the writeback is eliminated because
2384 // we snoop another cache's ReadEx.
2385 } else {
2386 // let our snoop responses go first if there are responses to
2387 // the same addresses
2388 if (checkConflictingSnoop(entry->blkAddr)) {
2389 return;
2390 }
2391 waitingOnRetry = entry->sendPacket(cache);
2392 }
2393
2394 // if we succeeded and are not waiting for a retry, schedule the
2395 // next send considering when the next queue is ready, note that
2396 // snoop responses have their own packet queue and thus schedule
2397 // their own events
2398 if (!waitingOnRetry) {
2399 schedSendEvent(cache.nextQueueReadyTime());
2400 }
2401}
2402
2403BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2404 BaseCache *_cache,
2405 const std::string &_label)
2406 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2407 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2408 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2409{
2410}
2411
2412void
2413WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2414 Addr blk_addr)
2415{
2416 // check if we are continuing where the last write ended
2417 if (nextAddr == write_addr) {
2418 delayCtr[blk_addr] = delayThreshold;
2419 // stop if we have already saturated
2420 if (mode != WriteMode::NO_ALLOCATE) {
2421 byteCount += write_size;
2422 // switch to streaming mode if we have passed the lower
2423 // threshold
2424 if (mode == WriteMode::ALLOCATE &&
2425 byteCount > coalesceLimit) {
2426 mode = WriteMode::COALESCE;
2427 DPRINTF(Cache, "Switched to write coalescing\n");
2428 } else if (mode == WriteMode::COALESCE &&
2429 byteCount > noAllocateLimit) {
2430 // and continue and switch to non-allocating mode if we
2431 // pass the upper threshold
2432 mode = WriteMode::NO_ALLOCATE;
2433 DPRINTF(Cache, "Switched to write-no-allocate\n");
2434 }
2435 }
2436 } else {
2437 // we did not see a write matching the previous one, start
2438 // over again
2439 byteCount = write_size;
2440 mode = WriteMode::ALLOCATE;
2441 resetDelay(blk_addr);
2442 }
2443 nextAddr = write_addr + write_size;
2444}
2445
2446WriteAllocator*
2447WriteAllocatorParams::create()
2448{
2449 return new WriteAllocator(this);
2450}
943
944 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
945 blk ? "hit " + blk->print() : "miss");
946
947 if (pkt->req->isCacheMaintenance()) {
948 // A cache maintenance operation is always forwarded to the
949 // memory below even if the block is found in dirty state.
950
951 // We defer any changes to the state of the block until we
952 // create and mark as in service the mshr for the downstream
953 // packet.
954 return false;
955 }
956
957 if (pkt->isEviction()) {
958 // We check for presence of block in above caches before issuing
959 // Writeback or CleanEvict to write buffer. Therefore the only
960 // possible cases can be of a CleanEvict packet coming from above
961 // encountering a Writeback generated in this cache peer cache and
962 // waiting in the write buffer. Cases of upper level peer caches
963 // generating CleanEvict and Writeback or simply CleanEvict and
964 // CleanEvict almost simultaneously will be caught by snoops sent out
965 // by crossbar.
966 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
967 pkt->isSecure());
968 if (wb_entry) {
969 assert(wb_entry->getNumTargets() == 1);
970 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
971 assert(wbPkt->isWriteback());
972
973 if (pkt->isCleanEviction()) {
974 // The CleanEvict and WritebackClean snoops into other
975 // peer caches of the same level while traversing the
976 // crossbar. If a copy of the block is found, the
977 // packet is deleted in the crossbar. Hence, none of
978 // the other upper level caches connected to this
979 // cache have the block, so we can clear the
980 // BLOCK_CACHED flag in the Writeback if set and
981 // discard the CleanEvict by returning true.
982 wbPkt->clearBlockCached();
983 return true;
984 } else {
985 assert(pkt->cmd == MemCmd::WritebackDirty);
986 // Dirty writeback from above trumps our clean
987 // writeback... discard here
988 // Note: markInService will remove entry from writeback buffer.
989 markInService(wb_entry);
990 delete wbPkt;
991 }
992 }
993 }
994
995 // Writeback handling is special case. We can write the block into
996 // the cache without having a writeable copy (or any copy at all).
997 if (pkt->isWriteback()) {
998 assert(blkSize == pkt->getSize());
999
1000 // we could get a clean writeback while we are having
1001 // outstanding accesses to a block, do the simple thing for
1002 // now and drop the clean writeback so that we do not upset
1003 // any ordering/decisions about ownership already taken
1004 if (pkt->cmd == MemCmd::WritebackClean &&
1005 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1006 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1007 "dropping\n", pkt->getAddr());
1008 return true;
1009 }
1010
1011 if (!blk) {
1012 // need to do a replacement
1013 blk = allocateBlock(pkt, writebacks);
1014 if (!blk) {
1015 // no replaceable block available: give up, fwd to next level.
1016 incMissCount(pkt);
1017 return false;
1018 }
1019
1020 blk->status |= BlkReadable;
1021 }
1022 // only mark the block dirty if we got a writeback command,
1023 // and leave it as is for a clean writeback
1024 if (pkt->cmd == MemCmd::WritebackDirty) {
1025 // TODO: the coherent cache can assert(!blk->isDirty());
1026 blk->status |= BlkDirty;
1027 }
1028 // if the packet does not have sharers, it is passing
1029 // writable, and we got the writeback in Modified or Exclusive
1030 // state, if not we are in the Owned or Shared state
1031 if (!pkt->hasSharers()) {
1032 blk->status |= BlkWritable;
1033 }
1034 // nothing else to do; writeback doesn't expect response
1035 assert(!pkt->needsResponse());
1036 pkt->writeDataToBlock(blk->data, blkSize);
1037 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1038 incHitCount(pkt);
1039 // populate the time when the block will be ready to access.
1040 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1041 pkt->payloadDelay);
1042 return true;
1043 } else if (pkt->cmd == MemCmd::CleanEvict) {
1044 if (blk) {
1045 // Found the block in the tags, need to stop CleanEvict from
1046 // propagating further down the hierarchy. Returning true will
1047 // treat the CleanEvict like a satisfied write request and delete
1048 // it.
1049 return true;
1050 }
1051 // We didn't find the block here, propagate the CleanEvict further
1052 // down the memory hierarchy. Returning false will treat the CleanEvict
1053 // like a Writeback which could not find a replaceable block so has to
1054 // go to next level.
1055 return false;
1056 } else if (pkt->cmd == MemCmd::WriteClean) {
1057 // WriteClean handling is a special case. We can allocate a
1058 // block directly if it doesn't exist and we can update the
1059 // block immediately. The WriteClean transfers the ownership
1060 // of the block as well.
1061 assert(blkSize == pkt->getSize());
1062
1063 if (!blk) {
1064 if (pkt->writeThrough()) {
1065 // if this is a write through packet, we don't try to
1066 // allocate if the block is not present
1067 return false;
1068 } else {
1069 // a writeback that misses needs to allocate a new block
1070 blk = allocateBlock(pkt, writebacks);
1071 if (!blk) {
1072 // no replaceable block available: give up, fwd to
1073 // next level.
1074 incMissCount(pkt);
1075 return false;
1076 }
1077
1078 blk->status |= BlkReadable;
1079 }
1080 }
1081
1082 // at this point either this is a writeback or a write-through
1083 // write clean operation and the block is already in this
1084 // cache, we need to update the data and the block flags
1085 assert(blk);
1086 // TODO: the coherent cache can assert(!blk->isDirty());
1087 if (!pkt->writeThrough()) {
1088 blk->status |= BlkDirty;
1089 }
1090 // nothing else to do; writeback doesn't expect response
1091 assert(!pkt->needsResponse());
1092 pkt->writeDataToBlock(blk->data, blkSize);
1093 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1094
1095 incHitCount(pkt);
1096 // populate the time when the block will be ready to access.
1097 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1098 pkt->payloadDelay);
1099 // if this a write-through packet it will be sent to cache
1100 // below
1101 return !pkt->writeThrough();
1102 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1103 blk->isReadable())) {
1104 // OK to satisfy access
1105 incHitCount(pkt);
1106 satisfyRequest(pkt, blk);
1107 maintainClusivity(pkt->fromCache(), blk);
1108
1109 return true;
1110 }
1111
1112 // Can't satisfy access normally... either no block (blk == nullptr)
1113 // or have block but need writable
1114
1115 incMissCount(pkt);
1116
1117 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1118 // complete miss on store conditional... just give up now
1119 pkt->req->setExtraData(0);
1120 return true;
1121 }
1122
1123 return false;
1124}
1125
1126void
1127BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1128{
1129 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1130 clusivity == Enums::mostly_excl) {
1131 // if we have responded to a cache, and our block is still
1132 // valid, but not dirty, and this cache is mostly exclusive
1133 // with respect to the cache above, drop the block
1134 invalidateBlock(blk);
1135 }
1136}
1137
1138CacheBlk*
1139BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1140 bool allocate)
1141{
1142 assert(pkt->isResponse());
1143 Addr addr = pkt->getAddr();
1144 bool is_secure = pkt->isSecure();
1145#if TRACING_ON
1146 CacheBlk::State old_state = blk ? blk->status : 0;
1147#endif
1148
1149 // When handling a fill, we should have no writes to this line.
1150 assert(addr == pkt->getBlockAddr(blkSize));
1151 assert(!writeBuffer.findMatch(addr, is_secure));
1152
1153 if (!blk) {
1154 // better have read new data...
1155 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1156
1157 // need to do a replacement if allocating, otherwise we stick
1158 // with the temporary storage
1159 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1160
1161 if (!blk) {
1162 // No replaceable block or a mostly exclusive
1163 // cache... just use temporary storage to complete the
1164 // current request and then get rid of it
1165 blk = tempBlock;
1166 tempBlock->insert(addr, is_secure);
1167 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1168 is_secure ? "s" : "ns");
1169 }
1170 } else {
1171 // existing block... probably an upgrade
1172 // don't clear block status... if block is already dirty we
1173 // don't want to lose that
1174 }
1175
1176 // Block is guaranteed to be valid at this point
1177 assert(blk->isValid());
1178 assert(blk->isSecure() == is_secure);
1179 assert(regenerateBlkAddr(blk) == addr);
1180
1181 blk->status |= BlkReadable;
1182
1183 // sanity check for whole-line writes, which should always be
1184 // marked as writable as part of the fill, and then later marked
1185 // dirty as part of satisfyRequest
1186 if (pkt->cmd == MemCmd::InvalidateResp) {
1187 assert(!pkt->hasSharers());
1188 }
1189
1190 // here we deal with setting the appropriate state of the line,
1191 // and we start by looking at the hasSharers flag, and ignore the
1192 // cacheResponding flag (normally signalling dirty data) if the
1193 // packet has sharers, thus the line is never allocated as Owned
1194 // (dirty but not writable), and always ends up being either
1195 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1196 // for more details
1197 if (!pkt->hasSharers()) {
1198 // we could get a writable line from memory (rather than a
1199 // cache) even in a read-only cache, note that we set this bit
1200 // even for a read-only cache, possibly revisit this decision
1201 blk->status |= BlkWritable;
1202
1203 // check if we got this via cache-to-cache transfer (i.e., from a
1204 // cache that had the block in Modified or Owned state)
1205 if (pkt->cacheResponding()) {
1206 // we got the block in Modified state, and invalidated the
1207 // owners copy
1208 blk->status |= BlkDirty;
1209
1210 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1211 "in read-only cache %s\n", name());
1212 }
1213 }
1214
1215 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1216 addr, is_secure ? "s" : "ns", old_state, blk->print());
1217
1218 // if we got new data, copy it in (checking for a read response
1219 // and a response that has data is the same in the end)
1220 if (pkt->isRead()) {
1221 // sanity checks
1222 assert(pkt->hasData());
1223 assert(pkt->getSize() == blkSize);
1224
1225 pkt->writeDataToBlock(blk->data, blkSize);
1226 }
1227 // We pay for fillLatency here.
1228 blk->setWhenReady(clockEdge(fillLatency) + pkt->payloadDelay);
1229
1230 return blk;
1231}
1232
1233CacheBlk*
1234BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1235{
1236 // Get address
1237 const Addr addr = pkt->getAddr();
1238
1239 // Get secure bit
1240 const bool is_secure = pkt->isSecure();
1241
1242 // Find replacement victim
1243 std::vector<CacheBlk*> evict_blks;
1244 CacheBlk *victim = tags->findVictim(addr, is_secure, evict_blks);
1245
1246 // It is valid to return nullptr if there is no victim
1247 if (!victim)
1248 return nullptr;
1249
1250 // Print victim block's information
1251 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1252
1253 // Check for transient state allocations. If any of the entries listed
1254 // for eviction has a transient state, the allocation fails
1255 for (const auto& blk : evict_blks) {
1256 if (blk->isValid()) {
1257 Addr repl_addr = regenerateBlkAddr(blk);
1258 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1259 if (repl_mshr) {
1260 // must be an outstanding upgrade or clean request
1261 // on a block we're about to replace...
1262 assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
1263 repl_mshr->isCleaning());
1264
1265 // too hard to replace block with transient state
1266 // allocation failed, block not inserted
1267 return nullptr;
1268 }
1269 }
1270 }
1271
1272 // The victim will be replaced by a new entry, so increase the replacement
1273 // counter if a valid block is being replaced
1274 if (victim->isValid()) {
1275 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1276 "(%s): %s\n", regenerateBlkAddr(victim),
1277 victim->isSecure() ? "s" : "ns",
1278 addr, is_secure ? "s" : "ns",
1279 victim->isDirty() ? "writeback" : "clean");
1280
1281 replacements++;
1282 }
1283
1284 // Evict valid blocks associated to this victim block
1285 for (const auto& blk : evict_blks) {
1286 if (blk->isValid()) {
1287 if (blk->wasPrefetched()) {
1288 unusedPrefetches++;
1289 }
1290
1291 evictBlock(blk, writebacks);
1292 }
1293 }
1294
1295 // Insert new block at victimized entry
1296 tags->insertBlock(addr, is_secure, pkt->req->masterId(),
1297 pkt->req->taskId(), victim);
1298
1299 return victim;
1300}
1301
1302void
1303BaseCache::invalidateBlock(CacheBlk *blk)
1304{
1305 // If handling a block present in the Tags, let it do its invalidation
1306 // process, which will update stats and invalidate the block itself
1307 if (blk != tempBlock) {
1308 tags->invalidate(blk);
1309 } else {
1310 tempBlock->invalidate();
1311 }
1312}
1313
1314void
1315BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1316{
1317 PacketPtr pkt = evictBlock(blk);
1318 if (pkt) {
1319 writebacks.push_back(pkt);
1320 }
1321}
1322
1323PacketPtr
1324BaseCache::writebackBlk(CacheBlk *blk)
1325{
1326 chatty_assert(!isReadOnly || writebackClean,
1327 "Writeback from read-only cache");
1328 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1329
1330 writebacks[Request::wbMasterId]++;
1331
1332 RequestPtr req = std::make_shared<Request>(
1333 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1334
1335 if (blk->isSecure())
1336 req->setFlags(Request::SECURE);
1337
1338 req->taskId(blk->task_id);
1339
1340 PacketPtr pkt =
1341 new Packet(req, blk->isDirty() ?
1342 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1343
1344 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1345 pkt->print(), blk->isWritable(), blk->isDirty());
1346
1347 if (blk->isWritable()) {
1348 // not asserting shared means we pass the block in modified
1349 // state, mark our own block non-writeable
1350 blk->status &= ~BlkWritable;
1351 } else {
1352 // we are in the Owned state, tell the receiver
1353 pkt->setHasSharers();
1354 }
1355
1356 // make sure the block is not marked dirty
1357 blk->status &= ~BlkDirty;
1358
1359 pkt->allocate();
1360 pkt->setDataFromBlock(blk->data, blkSize);
1361
1362 return pkt;
1363}
1364
1365PacketPtr
1366BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1367{
1368 RequestPtr req = std::make_shared<Request>(
1369 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1370
1371 if (blk->isSecure()) {
1372 req->setFlags(Request::SECURE);
1373 }
1374 req->taskId(blk->task_id);
1375
1376 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1377
1378 if (dest) {
1379 req->setFlags(dest);
1380 pkt->setWriteThrough();
1381 }
1382
1383 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1384 blk->isWritable(), blk->isDirty());
1385
1386 if (blk->isWritable()) {
1387 // not asserting shared means we pass the block in modified
1388 // state, mark our own block non-writeable
1389 blk->status &= ~BlkWritable;
1390 } else {
1391 // we are in the Owned state, tell the receiver
1392 pkt->setHasSharers();
1393 }
1394
1395 // make sure the block is not marked dirty
1396 blk->status &= ~BlkDirty;
1397
1398 pkt->allocate();
1399 pkt->setDataFromBlock(blk->data, blkSize);
1400
1401 return pkt;
1402}
1403
1404
1405void
1406BaseCache::memWriteback()
1407{
1408 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1409}
1410
1411void
1412BaseCache::memInvalidate()
1413{
1414 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1415}
1416
1417bool
1418BaseCache::isDirty() const
1419{
1420 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1421}
1422
1423bool
1424BaseCache::coalesce() const
1425{
1426 return writeAllocator && writeAllocator->coalesce();
1427}
1428
1429void
1430BaseCache::writebackVisitor(CacheBlk &blk)
1431{
1432 if (blk.isDirty()) {
1433 assert(blk.isValid());
1434
1435 RequestPtr request = std::make_shared<Request>(
1436 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1437
1438 request->taskId(blk.task_id);
1439 if (blk.isSecure()) {
1440 request->setFlags(Request::SECURE);
1441 }
1442
1443 Packet packet(request, MemCmd::WriteReq);
1444 packet.dataStatic(blk.data);
1445
1446 memSidePort.sendFunctional(&packet);
1447
1448 blk.status &= ~BlkDirty;
1449 }
1450}
1451
1452void
1453BaseCache::invalidateVisitor(CacheBlk &blk)
1454{
1455 if (blk.isDirty())
1456 warn_once("Invalidating dirty cache lines. " \
1457 "Expect things to break.\n");
1458
1459 if (blk.isValid()) {
1460 assert(!blk.isDirty());
1461 invalidateBlock(&blk);
1462 }
1463}
1464
1465Tick
1466BaseCache::nextQueueReadyTime() const
1467{
1468 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1469 writeBuffer.nextReadyTime());
1470
1471 // Don't signal prefetch ready time if no MSHRs available
1472 // Will signal once enoguh MSHRs are deallocated
1473 if (prefetcher && mshrQueue.canPrefetch()) {
1474 nextReady = std::min(nextReady,
1475 prefetcher->nextPrefetchReadyTime());
1476 }
1477
1478 return nextReady;
1479}
1480
1481
1482bool
1483BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1484{
1485 assert(mshr);
1486
1487 // use request from 1st target
1488 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1489
1490 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1491
1492 // if the cache is in write coalescing mode or (additionally) in
1493 // no allocation mode, and we have a write packet with an MSHR
1494 // that is not a whole-line write (due to incompatible flags etc),
1495 // then reset the write mode
1496 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1497 if (!mshr->isWholeLineWrite()) {
1498 // if we are currently write coalescing, hold on the
1499 // MSHR as many cycles extra as we need to completely
1500 // write a cache line
1501 if (writeAllocator->delay(mshr->blkAddr)) {
1502 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1503 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1504 "for write coalescing\n", tgt_pkt->print(), delay);
1505 mshrQueue.delay(mshr, delay);
1506 return false;
1507 } else {
1508 writeAllocator->reset();
1509 }
1510 } else {
1511 writeAllocator->resetDelay(mshr->blkAddr);
1512 }
1513 }
1514
1515 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1516
1517 // either a prefetch that is not present upstream, or a normal
1518 // MSHR request, proceed to get the packet to send downstream
1519 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1520 mshr->isWholeLineWrite());
1521
1522 mshr->isForward = (pkt == nullptr);
1523
1524 if (mshr->isForward) {
1525 // not a cache block request, but a response is expected
1526 // make copy of current packet to forward, keep current
1527 // copy for response handling
1528 pkt = new Packet(tgt_pkt, false, true);
1529 assert(!pkt->isWrite());
1530 }
1531
1532 // play it safe and append (rather than set) the sender state,
1533 // as forwarded packets may already have existing state
1534 pkt->pushSenderState(mshr);
1535
1536 if (pkt->isClean() && blk && blk->isDirty()) {
1537 // A cache clean opearation is looking for a dirty block. Mark
1538 // the packet so that the destination xbar can determine that
1539 // there will be a follow-up write packet as well.
1540 pkt->setSatisfied();
1541 }
1542
1543 if (!memSidePort.sendTimingReq(pkt)) {
1544 // we are awaiting a retry, but we
1545 // delete the packet and will be creating a new packet
1546 // when we get the opportunity
1547 delete pkt;
1548
1549 // note that we have now masked any requestBus and
1550 // schedSendEvent (we will wait for a retry before
1551 // doing anything), and this is so even if we do not
1552 // care about this packet and might override it before
1553 // it gets retried
1554 return true;
1555 } else {
1556 // As part of the call to sendTimingReq the packet is
1557 // forwarded to all neighbouring caches (and any caches
1558 // above them) as a snoop. Thus at this point we know if
1559 // any of the neighbouring caches are responding, and if
1560 // so, we know it is dirty, and we can determine if it is
1561 // being passed as Modified, making our MSHR the ordering
1562 // point
1563 bool pending_modified_resp = !pkt->hasSharers() &&
1564 pkt->cacheResponding();
1565 markInService(mshr, pending_modified_resp);
1566
1567 if (pkt->isClean() && blk && blk->isDirty()) {
1568 // A cache clean opearation is looking for a dirty
1569 // block. If a dirty block is encountered a WriteClean
1570 // will update any copies to the path to the memory
1571 // until the point of reference.
1572 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1573 __func__, pkt->print(), blk->print());
1574 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1575 pkt->id);
1576 PacketList writebacks;
1577 writebacks.push_back(wb_pkt);
1578 doWritebacks(writebacks, 0);
1579 }
1580
1581 return false;
1582 }
1583}
1584
1585bool
1586BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1587{
1588 assert(wq_entry);
1589
1590 // always a single target for write queue entries
1591 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1592
1593 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1594
1595 // forward as is, both for evictions and uncacheable writes
1596 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1597 // note that we have now masked any requestBus and
1598 // schedSendEvent (we will wait for a retry before
1599 // doing anything), and this is so even if we do not
1600 // care about this packet and might override it before
1601 // it gets retried
1602 return true;
1603 } else {
1604 markInService(wq_entry);
1605 return false;
1606 }
1607}
1608
1609void
1610BaseCache::serialize(CheckpointOut &cp) const
1611{
1612 bool dirty(isDirty());
1613
1614 if (dirty) {
1615 warn("*** The cache still contains dirty data. ***\n");
1616 warn(" Make sure to drain the system using the correct flags.\n");
1617 warn(" This checkpoint will not restore correctly " \
1618 "and dirty data in the cache will be lost!\n");
1619 }
1620
1621 // Since we don't checkpoint the data in the cache, any dirty data
1622 // will be lost when restoring from a checkpoint of a system that
1623 // wasn't drained properly. Flag the checkpoint as invalid if the
1624 // cache contains dirty data.
1625 bool bad_checkpoint(dirty);
1626 SERIALIZE_SCALAR(bad_checkpoint);
1627}
1628
1629void
1630BaseCache::unserialize(CheckpointIn &cp)
1631{
1632 bool bad_checkpoint;
1633 UNSERIALIZE_SCALAR(bad_checkpoint);
1634 if (bad_checkpoint) {
1635 fatal("Restoring from checkpoints with dirty caches is not "
1636 "supported in the classic memory system. Please remove any "
1637 "caches or drain them properly before taking checkpoints.\n");
1638 }
1639}
1640
1641void
1642BaseCache::regStats()
1643{
1644 MemObject::regStats();
1645
1646 using namespace Stats;
1647
1648 // Hit statistics
1649 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1650 MemCmd cmd(access_idx);
1651 const string &cstr = cmd.toString();
1652
1653 hits[access_idx]
1654 .init(system->maxMasters())
1655 .name(name() + "." + cstr + "_hits")
1656 .desc("number of " + cstr + " hits")
1657 .flags(total | nozero | nonan)
1658 ;
1659 for (int i = 0; i < system->maxMasters(); i++) {
1660 hits[access_idx].subname(i, system->getMasterName(i));
1661 }
1662 }
1663
1664// These macros make it easier to sum the right subset of commands and
1665// to change the subset of commands that are considered "demand" vs
1666// "non-demand"
1667#define SUM_DEMAND(s) \
1668 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1669 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1670
1671// should writebacks be included here? prior code was inconsistent...
1672#define SUM_NON_DEMAND(s) \
1673 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1674
1675 demandHits
1676 .name(name() + ".demand_hits")
1677 .desc("number of demand (read+write) hits")
1678 .flags(total | nozero | nonan)
1679 ;
1680 demandHits = SUM_DEMAND(hits);
1681 for (int i = 0; i < system->maxMasters(); i++) {
1682 demandHits.subname(i, system->getMasterName(i));
1683 }
1684
1685 overallHits
1686 .name(name() + ".overall_hits")
1687 .desc("number of overall hits")
1688 .flags(total | nozero | nonan)
1689 ;
1690 overallHits = demandHits + SUM_NON_DEMAND(hits);
1691 for (int i = 0; i < system->maxMasters(); i++) {
1692 overallHits.subname(i, system->getMasterName(i));
1693 }
1694
1695 // Miss statistics
1696 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1697 MemCmd cmd(access_idx);
1698 const string &cstr = cmd.toString();
1699
1700 misses[access_idx]
1701 .init(system->maxMasters())
1702 .name(name() + "." + cstr + "_misses")
1703 .desc("number of " + cstr + " misses")
1704 .flags(total | nozero | nonan)
1705 ;
1706 for (int i = 0; i < system->maxMasters(); i++) {
1707 misses[access_idx].subname(i, system->getMasterName(i));
1708 }
1709 }
1710
1711 demandMisses
1712 .name(name() + ".demand_misses")
1713 .desc("number of demand (read+write) misses")
1714 .flags(total | nozero | nonan)
1715 ;
1716 demandMisses = SUM_DEMAND(misses);
1717 for (int i = 0; i < system->maxMasters(); i++) {
1718 demandMisses.subname(i, system->getMasterName(i));
1719 }
1720
1721 overallMisses
1722 .name(name() + ".overall_misses")
1723 .desc("number of overall misses")
1724 .flags(total | nozero | nonan)
1725 ;
1726 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
1727 for (int i = 0; i < system->maxMasters(); i++) {
1728 overallMisses.subname(i, system->getMasterName(i));
1729 }
1730
1731 // Miss latency statistics
1732 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1733 MemCmd cmd(access_idx);
1734 const string &cstr = cmd.toString();
1735
1736 missLatency[access_idx]
1737 .init(system->maxMasters())
1738 .name(name() + "." + cstr + "_miss_latency")
1739 .desc("number of " + cstr + " miss cycles")
1740 .flags(total | nozero | nonan)
1741 ;
1742 for (int i = 0; i < system->maxMasters(); i++) {
1743 missLatency[access_idx].subname(i, system->getMasterName(i));
1744 }
1745 }
1746
1747 demandMissLatency
1748 .name(name() + ".demand_miss_latency")
1749 .desc("number of demand (read+write) miss cycles")
1750 .flags(total | nozero | nonan)
1751 ;
1752 demandMissLatency = SUM_DEMAND(missLatency);
1753 for (int i = 0; i < system->maxMasters(); i++) {
1754 demandMissLatency.subname(i, system->getMasterName(i));
1755 }
1756
1757 overallMissLatency
1758 .name(name() + ".overall_miss_latency")
1759 .desc("number of overall miss cycles")
1760 .flags(total | nozero | nonan)
1761 ;
1762 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
1763 for (int i = 0; i < system->maxMasters(); i++) {
1764 overallMissLatency.subname(i, system->getMasterName(i));
1765 }
1766
1767 // access formulas
1768 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1769 MemCmd cmd(access_idx);
1770 const string &cstr = cmd.toString();
1771
1772 accesses[access_idx]
1773 .name(name() + "." + cstr + "_accesses")
1774 .desc("number of " + cstr + " accesses(hits+misses)")
1775 .flags(total | nozero | nonan)
1776 ;
1777 accesses[access_idx] = hits[access_idx] + misses[access_idx];
1778
1779 for (int i = 0; i < system->maxMasters(); i++) {
1780 accesses[access_idx].subname(i, system->getMasterName(i));
1781 }
1782 }
1783
1784 demandAccesses
1785 .name(name() + ".demand_accesses")
1786 .desc("number of demand (read+write) accesses")
1787 .flags(total | nozero | nonan)
1788 ;
1789 demandAccesses = demandHits + demandMisses;
1790 for (int i = 0; i < system->maxMasters(); i++) {
1791 demandAccesses.subname(i, system->getMasterName(i));
1792 }
1793
1794 overallAccesses
1795 .name(name() + ".overall_accesses")
1796 .desc("number of overall (read+write) accesses")
1797 .flags(total | nozero | nonan)
1798 ;
1799 overallAccesses = overallHits + overallMisses;
1800 for (int i = 0; i < system->maxMasters(); i++) {
1801 overallAccesses.subname(i, system->getMasterName(i));
1802 }
1803
1804 // miss rate formulas
1805 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1806 MemCmd cmd(access_idx);
1807 const string &cstr = cmd.toString();
1808
1809 missRate[access_idx]
1810 .name(name() + "." + cstr + "_miss_rate")
1811 .desc("miss rate for " + cstr + " accesses")
1812 .flags(total | nozero | nonan)
1813 ;
1814 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
1815
1816 for (int i = 0; i < system->maxMasters(); i++) {
1817 missRate[access_idx].subname(i, system->getMasterName(i));
1818 }
1819 }
1820
1821 demandMissRate
1822 .name(name() + ".demand_miss_rate")
1823 .desc("miss rate for demand accesses")
1824 .flags(total | nozero | nonan)
1825 ;
1826 demandMissRate = demandMisses / demandAccesses;
1827 for (int i = 0; i < system->maxMasters(); i++) {
1828 demandMissRate.subname(i, system->getMasterName(i));
1829 }
1830
1831 overallMissRate
1832 .name(name() + ".overall_miss_rate")
1833 .desc("miss rate for overall accesses")
1834 .flags(total | nozero | nonan)
1835 ;
1836 overallMissRate = overallMisses / overallAccesses;
1837 for (int i = 0; i < system->maxMasters(); i++) {
1838 overallMissRate.subname(i, system->getMasterName(i));
1839 }
1840
1841 // miss latency formulas
1842 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1843 MemCmd cmd(access_idx);
1844 const string &cstr = cmd.toString();
1845
1846 avgMissLatency[access_idx]
1847 .name(name() + "." + cstr + "_avg_miss_latency")
1848 .desc("average " + cstr + " miss latency")
1849 .flags(total | nozero | nonan)
1850 ;
1851 avgMissLatency[access_idx] =
1852 missLatency[access_idx] / misses[access_idx];
1853
1854 for (int i = 0; i < system->maxMasters(); i++) {
1855 avgMissLatency[access_idx].subname(i, system->getMasterName(i));
1856 }
1857 }
1858
1859 demandAvgMissLatency
1860 .name(name() + ".demand_avg_miss_latency")
1861 .desc("average overall miss latency")
1862 .flags(total | nozero | nonan)
1863 ;
1864 demandAvgMissLatency = demandMissLatency / demandMisses;
1865 for (int i = 0; i < system->maxMasters(); i++) {
1866 demandAvgMissLatency.subname(i, system->getMasterName(i));
1867 }
1868
1869 overallAvgMissLatency
1870 .name(name() + ".overall_avg_miss_latency")
1871 .desc("average overall miss latency")
1872 .flags(total | nozero | nonan)
1873 ;
1874 overallAvgMissLatency = overallMissLatency / overallMisses;
1875 for (int i = 0; i < system->maxMasters(); i++) {
1876 overallAvgMissLatency.subname(i, system->getMasterName(i));
1877 }
1878
1879 blocked_cycles.init(NUM_BLOCKED_CAUSES);
1880 blocked_cycles
1881 .name(name() + ".blocked_cycles")
1882 .desc("number of cycles access was blocked")
1883 .subname(Blocked_NoMSHRs, "no_mshrs")
1884 .subname(Blocked_NoTargets, "no_targets")
1885 ;
1886
1887
1888 blocked_causes.init(NUM_BLOCKED_CAUSES);
1889 blocked_causes
1890 .name(name() + ".blocked")
1891 .desc("number of cycles access was blocked")
1892 .subname(Blocked_NoMSHRs, "no_mshrs")
1893 .subname(Blocked_NoTargets, "no_targets")
1894 ;
1895
1896 avg_blocked
1897 .name(name() + ".avg_blocked_cycles")
1898 .desc("average number of cycles each access was blocked")
1899 .subname(Blocked_NoMSHRs, "no_mshrs")
1900 .subname(Blocked_NoTargets, "no_targets")
1901 ;
1902
1903 avg_blocked = blocked_cycles / blocked_causes;
1904
1905 unusedPrefetches
1906 .name(name() + ".unused_prefetches")
1907 .desc("number of HardPF blocks evicted w/o reference")
1908 .flags(nozero)
1909 ;
1910
1911 writebacks
1912 .init(system->maxMasters())
1913 .name(name() + ".writebacks")
1914 .desc("number of writebacks")
1915 .flags(total | nozero | nonan)
1916 ;
1917 for (int i = 0; i < system->maxMasters(); i++) {
1918 writebacks.subname(i, system->getMasterName(i));
1919 }
1920
1921 // MSHR statistics
1922 // MSHR hit statistics
1923 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1924 MemCmd cmd(access_idx);
1925 const string &cstr = cmd.toString();
1926
1927 mshr_hits[access_idx]
1928 .init(system->maxMasters())
1929 .name(name() + "." + cstr + "_mshr_hits")
1930 .desc("number of " + cstr + " MSHR hits")
1931 .flags(total | nozero | nonan)
1932 ;
1933 for (int i = 0; i < system->maxMasters(); i++) {
1934 mshr_hits[access_idx].subname(i, system->getMasterName(i));
1935 }
1936 }
1937
1938 demandMshrHits
1939 .name(name() + ".demand_mshr_hits")
1940 .desc("number of demand (read+write) MSHR hits")
1941 .flags(total | nozero | nonan)
1942 ;
1943 demandMshrHits = SUM_DEMAND(mshr_hits);
1944 for (int i = 0; i < system->maxMasters(); i++) {
1945 demandMshrHits.subname(i, system->getMasterName(i));
1946 }
1947
1948 overallMshrHits
1949 .name(name() + ".overall_mshr_hits")
1950 .desc("number of overall MSHR hits")
1951 .flags(total | nozero | nonan)
1952 ;
1953 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
1954 for (int i = 0; i < system->maxMasters(); i++) {
1955 overallMshrHits.subname(i, system->getMasterName(i));
1956 }
1957
1958 // MSHR miss statistics
1959 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1960 MemCmd cmd(access_idx);
1961 const string &cstr = cmd.toString();
1962
1963 mshr_misses[access_idx]
1964 .init(system->maxMasters())
1965 .name(name() + "." + cstr + "_mshr_misses")
1966 .desc("number of " + cstr + " MSHR misses")
1967 .flags(total | nozero | nonan)
1968 ;
1969 for (int i = 0; i < system->maxMasters(); i++) {
1970 mshr_misses[access_idx].subname(i, system->getMasterName(i));
1971 }
1972 }
1973
1974 demandMshrMisses
1975 .name(name() + ".demand_mshr_misses")
1976 .desc("number of demand (read+write) MSHR misses")
1977 .flags(total | nozero | nonan)
1978 ;
1979 demandMshrMisses = SUM_DEMAND(mshr_misses);
1980 for (int i = 0; i < system->maxMasters(); i++) {
1981 demandMshrMisses.subname(i, system->getMasterName(i));
1982 }
1983
1984 overallMshrMisses
1985 .name(name() + ".overall_mshr_misses")
1986 .desc("number of overall MSHR misses")
1987 .flags(total | nozero | nonan)
1988 ;
1989 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
1990 for (int i = 0; i < system->maxMasters(); i++) {
1991 overallMshrMisses.subname(i, system->getMasterName(i));
1992 }
1993
1994 // MSHR miss latency statistics
1995 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
1996 MemCmd cmd(access_idx);
1997 const string &cstr = cmd.toString();
1998
1999 mshr_miss_latency[access_idx]
2000 .init(system->maxMasters())
2001 .name(name() + "." + cstr + "_mshr_miss_latency")
2002 .desc("number of " + cstr + " MSHR miss cycles")
2003 .flags(total | nozero | nonan)
2004 ;
2005 for (int i = 0; i < system->maxMasters(); i++) {
2006 mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
2007 }
2008 }
2009
2010 demandMshrMissLatency
2011 .name(name() + ".demand_mshr_miss_latency")
2012 .desc("number of demand (read+write) MSHR miss cycles")
2013 .flags(total | nozero | nonan)
2014 ;
2015 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2016 for (int i = 0; i < system->maxMasters(); i++) {
2017 demandMshrMissLatency.subname(i, system->getMasterName(i));
2018 }
2019
2020 overallMshrMissLatency
2021 .name(name() + ".overall_mshr_miss_latency")
2022 .desc("number of overall MSHR miss cycles")
2023 .flags(total | nozero | nonan)
2024 ;
2025 overallMshrMissLatency =
2026 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2027 for (int i = 0; i < system->maxMasters(); i++) {
2028 overallMshrMissLatency.subname(i, system->getMasterName(i));
2029 }
2030
2031 // MSHR uncacheable statistics
2032 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2033 MemCmd cmd(access_idx);
2034 const string &cstr = cmd.toString();
2035
2036 mshr_uncacheable[access_idx]
2037 .init(system->maxMasters())
2038 .name(name() + "." + cstr + "_mshr_uncacheable")
2039 .desc("number of " + cstr + " MSHR uncacheable")
2040 .flags(total | nozero | nonan)
2041 ;
2042 for (int i = 0; i < system->maxMasters(); i++) {
2043 mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
2044 }
2045 }
2046
2047 overallMshrUncacheable
2048 .name(name() + ".overall_mshr_uncacheable_misses")
2049 .desc("number of overall MSHR uncacheable misses")
2050 .flags(total | nozero | nonan)
2051 ;
2052 overallMshrUncacheable =
2053 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2054 for (int i = 0; i < system->maxMasters(); i++) {
2055 overallMshrUncacheable.subname(i, system->getMasterName(i));
2056 }
2057
2058 // MSHR miss latency statistics
2059 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2060 MemCmd cmd(access_idx);
2061 const string &cstr = cmd.toString();
2062
2063 mshr_uncacheable_lat[access_idx]
2064 .init(system->maxMasters())
2065 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
2066 .desc("number of " + cstr + " MSHR uncacheable cycles")
2067 .flags(total | nozero | nonan)
2068 ;
2069 for (int i = 0; i < system->maxMasters(); i++) {
2070 mshr_uncacheable_lat[access_idx].subname(
2071 i, system->getMasterName(i));
2072 }
2073 }
2074
2075 overallMshrUncacheableLatency
2076 .name(name() + ".overall_mshr_uncacheable_latency")
2077 .desc("number of overall MSHR uncacheable cycles")
2078 .flags(total | nozero | nonan)
2079 ;
2080 overallMshrUncacheableLatency =
2081 SUM_DEMAND(mshr_uncacheable_lat) +
2082 SUM_NON_DEMAND(mshr_uncacheable_lat);
2083 for (int i = 0; i < system->maxMasters(); i++) {
2084 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2085 }
2086
2087#if 0
2088 // MSHR access formulas
2089 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2090 MemCmd cmd(access_idx);
2091 const string &cstr = cmd.toString();
2092
2093 mshrAccesses[access_idx]
2094 .name(name() + "." + cstr + "_mshr_accesses")
2095 .desc("number of " + cstr + " mshr accesses(hits+misses)")
2096 .flags(total | nozero | nonan)
2097 ;
2098 mshrAccesses[access_idx] =
2099 mshr_hits[access_idx] + mshr_misses[access_idx]
2100 + mshr_uncacheable[access_idx];
2101 }
2102
2103 demandMshrAccesses
2104 .name(name() + ".demand_mshr_accesses")
2105 .desc("number of demand (read+write) mshr accesses")
2106 .flags(total | nozero | nonan)
2107 ;
2108 demandMshrAccesses = demandMshrHits + demandMshrMisses;
2109
2110 overallMshrAccesses
2111 .name(name() + ".overall_mshr_accesses")
2112 .desc("number of overall (read+write) mshr accesses")
2113 .flags(total | nozero | nonan)
2114 ;
2115 overallMshrAccesses = overallMshrHits + overallMshrMisses
2116 + overallMshrUncacheable;
2117#endif
2118
2119 // MSHR miss rate formulas
2120 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2121 MemCmd cmd(access_idx);
2122 const string &cstr = cmd.toString();
2123
2124 mshrMissRate[access_idx]
2125 .name(name() + "." + cstr + "_mshr_miss_rate")
2126 .desc("mshr miss rate for " + cstr + " accesses")
2127 .flags(total | nozero | nonan)
2128 ;
2129 mshrMissRate[access_idx] =
2130 mshr_misses[access_idx] / accesses[access_idx];
2131
2132 for (int i = 0; i < system->maxMasters(); i++) {
2133 mshrMissRate[access_idx].subname(i, system->getMasterName(i));
2134 }
2135 }
2136
2137 demandMshrMissRate
2138 .name(name() + ".demand_mshr_miss_rate")
2139 .desc("mshr miss rate for demand accesses")
2140 .flags(total | nozero | nonan)
2141 ;
2142 demandMshrMissRate = demandMshrMisses / demandAccesses;
2143 for (int i = 0; i < system->maxMasters(); i++) {
2144 demandMshrMissRate.subname(i, system->getMasterName(i));
2145 }
2146
2147 overallMshrMissRate
2148 .name(name() + ".overall_mshr_miss_rate")
2149 .desc("mshr miss rate for overall accesses")
2150 .flags(total | nozero | nonan)
2151 ;
2152 overallMshrMissRate = overallMshrMisses / overallAccesses;
2153 for (int i = 0; i < system->maxMasters(); i++) {
2154 overallMshrMissRate.subname(i, system->getMasterName(i));
2155 }
2156
2157 // mshrMiss latency formulas
2158 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2159 MemCmd cmd(access_idx);
2160 const string &cstr = cmd.toString();
2161
2162 avgMshrMissLatency[access_idx]
2163 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
2164 .desc("average " + cstr + " mshr miss latency")
2165 .flags(total | nozero | nonan)
2166 ;
2167 avgMshrMissLatency[access_idx] =
2168 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
2169
2170 for (int i = 0; i < system->maxMasters(); i++) {
2171 avgMshrMissLatency[access_idx].subname(
2172 i, system->getMasterName(i));
2173 }
2174 }
2175
2176 demandAvgMshrMissLatency
2177 .name(name() + ".demand_avg_mshr_miss_latency")
2178 .desc("average overall mshr miss latency")
2179 .flags(total | nozero | nonan)
2180 ;
2181 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2182 for (int i = 0; i < system->maxMasters(); i++) {
2183 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2184 }
2185
2186 overallAvgMshrMissLatency
2187 .name(name() + ".overall_avg_mshr_miss_latency")
2188 .desc("average overall mshr miss latency")
2189 .flags(total | nozero | nonan)
2190 ;
2191 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2192 for (int i = 0; i < system->maxMasters(); i++) {
2193 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2194 }
2195
2196 // mshrUncacheable latency formulas
2197 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
2198 MemCmd cmd(access_idx);
2199 const string &cstr = cmd.toString();
2200
2201 avgMshrUncacheableLatency[access_idx]
2202 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
2203 .desc("average " + cstr + " mshr uncacheable latency")
2204 .flags(total | nozero | nonan)
2205 ;
2206 avgMshrUncacheableLatency[access_idx] =
2207 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
2208
2209 for (int i = 0; i < system->maxMasters(); i++) {
2210 avgMshrUncacheableLatency[access_idx].subname(
2211 i, system->getMasterName(i));
2212 }
2213 }
2214
2215 overallAvgMshrUncacheableLatency
2216 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2217 .desc("average overall mshr uncacheable latency")
2218 .flags(total | nozero | nonan)
2219 ;
2220 overallAvgMshrUncacheableLatency =
2221 overallMshrUncacheableLatency / overallMshrUncacheable;
2222 for (int i = 0; i < system->maxMasters(); i++) {
2223 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2224 }
2225
2226 replacements
2227 .name(name() + ".replacements")
2228 .desc("number of replacements")
2229 ;
2230}
2231
2232void
2233BaseCache::regProbePoints()
2234{
2235 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2236 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2237 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2238}
2239
2240///////////////
2241//
2242// CpuSidePort
2243//
2244///////////////
2245bool
2246BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2247{
2248 // Snoops shouldn't happen when bypassing caches
2249 assert(!cache->system->bypassCaches());
2250
2251 assert(pkt->isResponse());
2252
2253 // Express snoop responses from master to slave, e.g., from L1 to L2
2254 cache->recvTimingSnoopResp(pkt);
2255 return true;
2256}
2257
2258
2259bool
2260BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2261{
2262 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2263 // always let express snoop packets through even if blocked
2264 return true;
2265 } else if (blocked || mustSendRetry) {
2266 // either already committed to send a retry, or blocked
2267 mustSendRetry = true;
2268 return false;
2269 }
2270 mustSendRetry = false;
2271 return true;
2272}
2273
2274bool
2275BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2276{
2277 assert(pkt->isRequest());
2278
2279 if (cache->system->bypassCaches()) {
2280 // Just forward the packet if caches are disabled.
2281 // @todo This should really enqueue the packet rather
2282 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2283 assert(success);
2284 return true;
2285 } else if (tryTiming(pkt)) {
2286 cache->recvTimingReq(pkt);
2287 return true;
2288 }
2289 return false;
2290}
2291
2292Tick
2293BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2294{
2295 if (cache->system->bypassCaches()) {
2296 // Forward the request if the system is in cache bypass mode.
2297 return cache->memSidePort.sendAtomic(pkt);
2298 } else {
2299 return cache->recvAtomic(pkt);
2300 }
2301}
2302
2303void
2304BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2305{
2306 if (cache->system->bypassCaches()) {
2307 // The cache should be flushed if we are in cache bypass mode,
2308 // so we don't need to check if we need to update anything.
2309 cache->memSidePort.sendFunctional(pkt);
2310 return;
2311 }
2312
2313 // functional request
2314 cache->functionalAccess(pkt, true);
2315}
2316
2317AddrRangeList
2318BaseCache::CpuSidePort::getAddrRanges() const
2319{
2320 return cache->getAddrRanges();
2321}
2322
2323
2324BaseCache::
2325CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2326 const std::string &_label)
2327 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2328{
2329}
2330
2331///////////////
2332//
2333// MemSidePort
2334//
2335///////////////
2336bool
2337BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2338{
2339 cache->recvTimingResp(pkt);
2340 return true;
2341}
2342
2343// Express snooping requests to memside port
2344void
2345BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2346{
2347 // Snoops shouldn't happen when bypassing caches
2348 assert(!cache->system->bypassCaches());
2349
2350 // handle snooping requests
2351 cache->recvTimingSnoopReq(pkt);
2352}
2353
2354Tick
2355BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2356{
2357 // Snoops shouldn't happen when bypassing caches
2358 assert(!cache->system->bypassCaches());
2359
2360 return cache->recvAtomicSnoop(pkt);
2361}
2362
2363void
2364BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2365{
2366 // Snoops shouldn't happen when bypassing caches
2367 assert(!cache->system->bypassCaches());
2368
2369 // functional snoop (note that in contrast to atomic we don't have
2370 // a specific functionalSnoop method, as they have the same
2371 // behaviour regardless)
2372 cache->functionalAccess(pkt, false);
2373}
2374
2375void
2376BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2377{
2378 // sanity check
2379 assert(!waitingOnRetry);
2380
2381 // there should never be any deferred request packets in the
2382 // queue, instead we resly on the cache to provide the packets
2383 // from the MSHR queue or write queue
2384 assert(deferredPacketReadyTime() == MaxTick);
2385
2386 // check for request packets (requests & writebacks)
2387 QueueEntry* entry = cache.getNextQueueEntry();
2388
2389 if (!entry) {
2390 // can happen if e.g. we attempt a writeback and fail, but
2391 // before the retry, the writeback is eliminated because
2392 // we snoop another cache's ReadEx.
2393 } else {
2394 // let our snoop responses go first if there are responses to
2395 // the same addresses
2396 if (checkConflictingSnoop(entry->blkAddr)) {
2397 return;
2398 }
2399 waitingOnRetry = entry->sendPacket(cache);
2400 }
2401
2402 // if we succeeded and are not waiting for a retry, schedule the
2403 // next send considering when the next queue is ready, note that
2404 // snoop responses have their own packet queue and thus schedule
2405 // their own events
2406 if (!waitingOnRetry) {
2407 schedSendEvent(cache.nextQueueReadyTime());
2408 }
2409}
2410
2411BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2412 BaseCache *_cache,
2413 const std::string &_label)
2414 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2415 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2416 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2417{
2418}
2419
2420void
2421WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2422 Addr blk_addr)
2423{
2424 // check if we are continuing where the last write ended
2425 if (nextAddr == write_addr) {
2426 delayCtr[blk_addr] = delayThreshold;
2427 // stop if we have already saturated
2428 if (mode != WriteMode::NO_ALLOCATE) {
2429 byteCount += write_size;
2430 // switch to streaming mode if we have passed the lower
2431 // threshold
2432 if (mode == WriteMode::ALLOCATE &&
2433 byteCount > coalesceLimit) {
2434 mode = WriteMode::COALESCE;
2435 DPRINTF(Cache, "Switched to write coalescing\n");
2436 } else if (mode == WriteMode::COALESCE &&
2437 byteCount > noAllocateLimit) {
2438 // and continue and switch to non-allocating mode if we
2439 // pass the upper threshold
2440 mode = WriteMode::NO_ALLOCATE;
2441 DPRINTF(Cache, "Switched to write-no-allocate\n");
2442 }
2443 }
2444 } else {
2445 // we did not see a write matching the previous one, start
2446 // over again
2447 byteCount = write_size;
2448 mode = WriteMode::ALLOCATE;
2449 resetDelay(blk_addr);
2450 }
2451 nextAddr = write_addr + write_size;
2452}
2453
2454WriteAllocator*
2455WriteAllocatorParams::create()
2456{
2457 return new WriteAllocator(this);
2458}