base.hh (13418:08101e89101e) base.hh (13478:59414c401cd9)
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/eventq.hh"
78#include "sim/probe/probe.hh"
79#include "sim/serialize.hh"
80#include "sim/sim_exit.hh"
81#include "sim/system.hh"
82
83class BaseMasterPort;
84class BasePrefetcher;
85class BaseSlavePort;
86class MSHR;
87class MasterPort;
88class QueueEntry;
89struct BaseCacheParams;
90
91/**
92 * A basic cache interface. Implements some common functions for speed.
93 */
94class BaseCache : public MemObject
95{
96 protected:
97 /**
98 * Indexes to enumerate the MSHR queues.
99 */
100 enum MSHRQueueIndex {
101 MSHRQueue_MSHRs,
102 MSHRQueue_WriteBuffer
103 };
104
105 public:
106 /**
107 * Reasons for caches to be blocked.
108 */
109 enum BlockedCause {
110 Blocked_NoMSHRs = MSHRQueue_MSHRs,
111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112 Blocked_NoTargets,
113 NUM_BLOCKED_CAUSES
114 };
115
116 protected:
117
118 /**
119 * A cache master port is used for the memory-side port of the
120 * cache, and in addition to the basic timing port that only sends
121 * response packets through a transmit list, it also offers the
122 * ability to schedule and send request packets (requests &
123 * writebacks). The send event is scheduled through schedSendEvent,
124 * and the sendDeferredPacket of the timing port is modified to
125 * consider both the transmit list and the requests from the MSHR.
126 */
127 class CacheMasterPort : public QueuedMasterPort
128 {
129
130 public:
131
132 /**
133 * Schedule a send of a request packet (from the MSHR). Note
134 * that we could already have a retry outstanding.
135 */
136 void schedSendEvent(Tick time)
137 {
138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139 reqQueue.schedSendEvent(time);
140 }
141
142 protected:
143
144 CacheMasterPort(const std::string &_name, BaseCache *_cache,
145 ReqPacketQueue &_reqQueue,
146 SnoopRespPacketQueue &_snoopRespQueue) :
147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148 { }
149
150 /**
151 * Memory-side port always snoops.
152 *
153 * @return always true
154 */
155 virtual bool isSnooping() const { return true; }
156 };
157
158 /**
159 * Override the default behaviour of sendDeferredPacket to enable
160 * the memory-side cache port to also send requests based on the
161 * current MSHR status. This queue has a pointer to our specific
162 * cache implementation and is used by the MemSidePort.
163 */
164 class CacheReqPacketQueue : public ReqPacketQueue
165 {
166
167 protected:
168
169 BaseCache &cache;
170 SnoopRespPacketQueue &snoopRespQueue;
171
172 public:
173
174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175 SnoopRespPacketQueue &snoop_resp_queue,
176 const std::string &label) :
177 ReqPacketQueue(cache, port, label), cache(cache),
178 snoopRespQueue(snoop_resp_queue) { }
179
180 /**
181 * Override the normal sendDeferredPacket and do not only
182 * consider the transmit list (used for responses), but also
183 * requests.
184 */
185 virtual void sendDeferredPacket();
186
187 /**
188 * Check if there is a conflicting snoop response about to be
189 * send out, and if so simply stall any requests, and schedule
190 * a send event at the same time as the next snoop response is
191 * being sent out.
192 */
193 bool checkConflictingSnoop(Addr addr)
194 {
195 if (snoopRespQueue.hasAddr(addr)) {
196 DPRINTF(CachePort, "Waiting for snoop response to be "
197 "sent\n");
198 Tick when = snoopRespQueue.deferredPacketReadyTime();
199 schedSendEvent(when);
200 return true;
201 }
202 return false;
203 }
204 };
205
206
207 /**
208 * The memory-side port extends the base cache master port with
209 * access functions for functional, atomic and timing snoops.
210 */
211 class MemSidePort : public CacheMasterPort
212 {
213 private:
214
215 /** The cache-specific queue. */
216 CacheReqPacketQueue _reqQueue;
217
218 SnoopRespPacketQueue _snoopRespQueue;
219
220 // a pointer to our specific cache implementation
221 BaseCache *cache;
222
223 protected:
224
225 virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227 virtual bool recvTimingResp(PacketPtr pkt);
228
229 virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231 virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233 public:
234
235 MemSidePort(const std::string &_name, BaseCache *_cache,
236 const std::string &_label);
237 };
238
239 /**
240 * A cache slave port is used for the CPU-side port of the cache,
241 * and it is basically a simple timing port that uses a transmit
242 * list for responses to the CPU (or connected master). In
243 * addition, it has the functionality to block the port for
244 * incoming requests. If blocked, the port will issue a retry once
245 * unblocked.
246 */
247 class CacheSlavePort : public QueuedSlavePort
248 {
249
250 public:
251
252 /** Do not accept any new requests. */
253 void setBlocked();
254
255 /** Return to normal operation and accept new requests. */
256 void clearBlocked();
257
258 bool isBlocked() const { return blocked; }
259
260 protected:
261
262 CacheSlavePort(const std::string &_name, BaseCache *_cache,
263 const std::string &_label);
264
265 /** A normal packet queue used to store responses. */
266 RespPacketQueue queue;
267
268 bool blocked;
269
270 bool mustSendRetry;
271
272 private:
273
274 void processSendRetry();
275
276 EventFunctionWrapper sendRetryEvent;
277
278 };
279
280 /**
281 * The CPU-side port extends the base cache slave port with access
282 * functions for functional, atomic and timing requests.
283 */
284 class CpuSidePort : public CacheSlavePort
285 {
286 private:
287
288 // a pointer to our specific cache implementation
289 BaseCache *cache;
290
291 protected:
292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294 virtual bool tryTiming(PacketPtr pkt) override;
295
296 virtual bool recvTimingReq(PacketPtr pkt) override;
297
298 virtual Tick recvAtomic(PacketPtr pkt) override;
299
300 virtual void recvFunctional(PacketPtr pkt) override;
301
302 virtual AddrRangeList getAddrRanges() const override;
303
304 public:
305
306 CpuSidePort(const std::string &_name, BaseCache *_cache,
307 const std::string &_label);
308
309 };
310
311 CpuSidePort cpuSidePort;
312 MemSidePort memSidePort;
313
314 protected:
315
316 /** Miss status registers */
317 MSHRQueue mshrQueue;
318
319 /** Write/writeback buffer */
320 WriteQueue writeBuffer;
321
322 /** Tag and data Storage */
323 BaseTags *tags;
324
325 /** Prefetcher */
326 BasePrefetcher *prefetcher;
327
328 /** To probe when a cache hit occurs */
329 ProbePointArg<PacketPtr> *ppHit;
330
331 /** To probe when a cache miss occurs */
332 ProbePointArg<PacketPtr> *ppMiss;
333
334 /**
335 * The writeAllocator drive optimizations for streaming writes.
336 * It first determines whether a WriteReq MSHR should be delayed,
337 * thus ensuring that we wait longer in cases when we are write
338 * coalescing and allowing all the bytes of the line to be written
339 * before the MSHR packet is sent downstream. This works in unison
340 * with the tracking in the MSHR to check if the entire line is
341 * written. The write mode also affects the behaviour on filling
342 * any whole-line writes. Normally the cache allocates the line
343 * when receiving the InvalidateResp, but after seeing enough
344 * consecutive lines we switch to using the tempBlock, and thus
345 * end up not allocating the line, and instead turning the
346 * whole-line write into a writeback straight away.
347 */
348 WriteAllocator * const writeAllocator;
349
350 /**
351 * Temporary cache block for occasional transitory use. We use
352 * the tempBlock to fill when allocation fails (e.g., when there
353 * is an outstanding request that accesses the victim block) or
354 * when we want to avoid allocation (e.g., exclusive caches)
355 */
356 TempCacheBlk *tempBlock;
357
358 /**
359 * Upstream caches need this packet until true is returned, so
360 * hold it for deletion until a subsequent call
361 */
362 std::unique_ptr<Packet> pendingDelete;
363
364 /**
365 * Mark a request as in service (sent downstream in the memory
366 * system), effectively making this MSHR the ordering point.
367 */
368 void markInService(MSHR *mshr, bool pending_modified_resp)
369 {
370 bool wasFull = mshrQueue.isFull();
371 mshrQueue.markInService(mshr, pending_modified_resp);
372
373 if (wasFull && !mshrQueue.isFull()) {
374 clearBlocked(Blocked_NoMSHRs);
375 }
376 }
377
378 void markInService(WriteQueueEntry *entry)
379 {
380 bool wasFull = writeBuffer.isFull();
381 writeBuffer.markInService(entry);
382
383 if (wasFull && !writeBuffer.isFull()) {
384 clearBlocked(Blocked_NoWBBuffers);
385 }
386 }
387
388 /**
389 * Determine whether we should allocate on a fill or not. If this
390 * cache is mostly inclusive with regards to the upstream cache(s)
391 * we always allocate (for any non-forwarded and cacheable
392 * requests). In the case of a mostly exclusive cache, we allocate
393 * on fill if the packet did not come from a cache, thus if we:
394 * are dealing with a whole-line write (the latter behaves much
395 * like a writeback), the original target packet came from a
396 * non-caching source, or if we are performing a prefetch or LLSC.
397 *
398 * @param cmd Command of the incoming requesting packet
399 * @return Whether we should allocate on the fill
400 */
401 inline bool allocOnFill(MemCmd cmd) const
402 {
403 return clusivity == Enums::mostly_incl ||
404 cmd == MemCmd::WriteLineReq ||
405 cmd == MemCmd::ReadReq ||
406 cmd == MemCmd::WriteReq ||
407 cmd.isPrefetch() ||
408 cmd.isLLSC();
409 }
410
411 /**
412 * Regenerate block address using tags.
413 * Block address regeneration depends on whether we're using a temporary
414 * block or not.
415 *
416 * @param blk The block to regenerate address.
417 * @return The block's address.
418 */
419 Addr regenerateBlkAddr(CacheBlk* blk);
420
421 /**
422 * Calculate access latency in ticks given a tag lookup latency, and
423 * whether access was a hit or miss.
424 *
425 * @param blk The cache block that was accessed.
426 * @param lookup_lat Latency of the respective tag lookup.
427 * @return The number of ticks that pass due to a block access.
428 */
429 Cycles calculateAccessLatency(const CacheBlk* blk,
430 const Cycles lookup_lat) const;
431
432 /**
433 * Does all the processing necessary to perform the provided request.
434 * @param pkt The memory request to perform.
435 * @param blk The cache block to be updated.
436 * @param lat The latency of the access.
437 * @param writebacks List for any writebacks that need to be performed.
438 * @return Boolean indicating whether the request was satisfied.
439 */
440 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
441 PacketList &writebacks);
442
443 /*
444 * Handle a timing request that hit in the cache
445 *
446 * @param ptk The request packet
447 * @param blk The referenced block
448 * @param request_time The tick at which the block lookup is compete
449 */
450 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
451 Tick request_time);
452
453 /*
454 * Handle a timing request that missed in the cache
455 *
456 * Implementation specific handling for different cache
457 * implementations
458 *
459 * @param ptk The request packet
460 * @param blk The referenced block
461 * @param forward_time The tick at which we can process dependent requests
462 * @param request_time The tick at which the block lookup is compete
463 */
464 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
465 Tick forward_time,
466 Tick request_time) = 0;
467
468 /*
469 * Handle a timing request that missed in the cache
470 *
471 * Common functionality across different cache implementations
472 *
473 * @param ptk The request packet
474 * @param blk The referenced block
475 * @param mshr Any existing mshr for the referenced cache block
476 * @param forward_time The tick at which we can process dependent requests
477 * @param request_time The tick at which the block lookup is compete
478 */
479 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
480 Tick forward_time, Tick request_time);
481
482 /**
483 * Performs the access specified by the request.
484 * @param pkt The request to perform.
485 */
486 virtual void recvTimingReq(PacketPtr pkt);
487
488 /**
489 * Handling the special case of uncacheable write responses to
490 * make recvTimingResp less cluttered.
491 */
492 void handleUncacheableWriteResp(PacketPtr pkt);
493
494 /**
495 * Service non-deferred MSHR targets using the received response
496 *
497 * Iterates through the list of targets that can be serviced with
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/eventq.hh"
78#include "sim/probe/probe.hh"
79#include "sim/serialize.hh"
80#include "sim/sim_exit.hh"
81#include "sim/system.hh"
82
83class BaseMasterPort;
84class BasePrefetcher;
85class BaseSlavePort;
86class MSHR;
87class MasterPort;
88class QueueEntry;
89struct BaseCacheParams;
90
91/**
92 * A basic cache interface. Implements some common functions for speed.
93 */
94class BaseCache : public MemObject
95{
96 protected:
97 /**
98 * Indexes to enumerate the MSHR queues.
99 */
100 enum MSHRQueueIndex {
101 MSHRQueue_MSHRs,
102 MSHRQueue_WriteBuffer
103 };
104
105 public:
106 /**
107 * Reasons for caches to be blocked.
108 */
109 enum BlockedCause {
110 Blocked_NoMSHRs = MSHRQueue_MSHRs,
111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112 Blocked_NoTargets,
113 NUM_BLOCKED_CAUSES
114 };
115
116 protected:
117
118 /**
119 * A cache master port is used for the memory-side port of the
120 * cache, and in addition to the basic timing port that only sends
121 * response packets through a transmit list, it also offers the
122 * ability to schedule and send request packets (requests &
123 * writebacks). The send event is scheduled through schedSendEvent,
124 * and the sendDeferredPacket of the timing port is modified to
125 * consider both the transmit list and the requests from the MSHR.
126 */
127 class CacheMasterPort : public QueuedMasterPort
128 {
129
130 public:
131
132 /**
133 * Schedule a send of a request packet (from the MSHR). Note
134 * that we could already have a retry outstanding.
135 */
136 void schedSendEvent(Tick time)
137 {
138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139 reqQueue.schedSendEvent(time);
140 }
141
142 protected:
143
144 CacheMasterPort(const std::string &_name, BaseCache *_cache,
145 ReqPacketQueue &_reqQueue,
146 SnoopRespPacketQueue &_snoopRespQueue) :
147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148 { }
149
150 /**
151 * Memory-side port always snoops.
152 *
153 * @return always true
154 */
155 virtual bool isSnooping() const { return true; }
156 };
157
158 /**
159 * Override the default behaviour of sendDeferredPacket to enable
160 * the memory-side cache port to also send requests based on the
161 * current MSHR status. This queue has a pointer to our specific
162 * cache implementation and is used by the MemSidePort.
163 */
164 class CacheReqPacketQueue : public ReqPacketQueue
165 {
166
167 protected:
168
169 BaseCache &cache;
170 SnoopRespPacketQueue &snoopRespQueue;
171
172 public:
173
174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175 SnoopRespPacketQueue &snoop_resp_queue,
176 const std::string &label) :
177 ReqPacketQueue(cache, port, label), cache(cache),
178 snoopRespQueue(snoop_resp_queue) { }
179
180 /**
181 * Override the normal sendDeferredPacket and do not only
182 * consider the transmit list (used for responses), but also
183 * requests.
184 */
185 virtual void sendDeferredPacket();
186
187 /**
188 * Check if there is a conflicting snoop response about to be
189 * send out, and if so simply stall any requests, and schedule
190 * a send event at the same time as the next snoop response is
191 * being sent out.
192 */
193 bool checkConflictingSnoop(Addr addr)
194 {
195 if (snoopRespQueue.hasAddr(addr)) {
196 DPRINTF(CachePort, "Waiting for snoop response to be "
197 "sent\n");
198 Tick when = snoopRespQueue.deferredPacketReadyTime();
199 schedSendEvent(when);
200 return true;
201 }
202 return false;
203 }
204 };
205
206
207 /**
208 * The memory-side port extends the base cache master port with
209 * access functions for functional, atomic and timing snoops.
210 */
211 class MemSidePort : public CacheMasterPort
212 {
213 private:
214
215 /** The cache-specific queue. */
216 CacheReqPacketQueue _reqQueue;
217
218 SnoopRespPacketQueue _snoopRespQueue;
219
220 // a pointer to our specific cache implementation
221 BaseCache *cache;
222
223 protected:
224
225 virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227 virtual bool recvTimingResp(PacketPtr pkt);
228
229 virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231 virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233 public:
234
235 MemSidePort(const std::string &_name, BaseCache *_cache,
236 const std::string &_label);
237 };
238
239 /**
240 * A cache slave port is used for the CPU-side port of the cache,
241 * and it is basically a simple timing port that uses a transmit
242 * list for responses to the CPU (or connected master). In
243 * addition, it has the functionality to block the port for
244 * incoming requests. If blocked, the port will issue a retry once
245 * unblocked.
246 */
247 class CacheSlavePort : public QueuedSlavePort
248 {
249
250 public:
251
252 /** Do not accept any new requests. */
253 void setBlocked();
254
255 /** Return to normal operation and accept new requests. */
256 void clearBlocked();
257
258 bool isBlocked() const { return blocked; }
259
260 protected:
261
262 CacheSlavePort(const std::string &_name, BaseCache *_cache,
263 const std::string &_label);
264
265 /** A normal packet queue used to store responses. */
266 RespPacketQueue queue;
267
268 bool blocked;
269
270 bool mustSendRetry;
271
272 private:
273
274 void processSendRetry();
275
276 EventFunctionWrapper sendRetryEvent;
277
278 };
279
280 /**
281 * The CPU-side port extends the base cache slave port with access
282 * functions for functional, atomic and timing requests.
283 */
284 class CpuSidePort : public CacheSlavePort
285 {
286 private:
287
288 // a pointer to our specific cache implementation
289 BaseCache *cache;
290
291 protected:
292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294 virtual bool tryTiming(PacketPtr pkt) override;
295
296 virtual bool recvTimingReq(PacketPtr pkt) override;
297
298 virtual Tick recvAtomic(PacketPtr pkt) override;
299
300 virtual void recvFunctional(PacketPtr pkt) override;
301
302 virtual AddrRangeList getAddrRanges() const override;
303
304 public:
305
306 CpuSidePort(const std::string &_name, BaseCache *_cache,
307 const std::string &_label);
308
309 };
310
311 CpuSidePort cpuSidePort;
312 MemSidePort memSidePort;
313
314 protected:
315
316 /** Miss status registers */
317 MSHRQueue mshrQueue;
318
319 /** Write/writeback buffer */
320 WriteQueue writeBuffer;
321
322 /** Tag and data Storage */
323 BaseTags *tags;
324
325 /** Prefetcher */
326 BasePrefetcher *prefetcher;
327
328 /** To probe when a cache hit occurs */
329 ProbePointArg<PacketPtr> *ppHit;
330
331 /** To probe when a cache miss occurs */
332 ProbePointArg<PacketPtr> *ppMiss;
333
334 /**
335 * The writeAllocator drive optimizations for streaming writes.
336 * It first determines whether a WriteReq MSHR should be delayed,
337 * thus ensuring that we wait longer in cases when we are write
338 * coalescing and allowing all the bytes of the line to be written
339 * before the MSHR packet is sent downstream. This works in unison
340 * with the tracking in the MSHR to check if the entire line is
341 * written. The write mode also affects the behaviour on filling
342 * any whole-line writes. Normally the cache allocates the line
343 * when receiving the InvalidateResp, but after seeing enough
344 * consecutive lines we switch to using the tempBlock, and thus
345 * end up not allocating the line, and instead turning the
346 * whole-line write into a writeback straight away.
347 */
348 WriteAllocator * const writeAllocator;
349
350 /**
351 * Temporary cache block for occasional transitory use. We use
352 * the tempBlock to fill when allocation fails (e.g., when there
353 * is an outstanding request that accesses the victim block) or
354 * when we want to avoid allocation (e.g., exclusive caches)
355 */
356 TempCacheBlk *tempBlock;
357
358 /**
359 * Upstream caches need this packet until true is returned, so
360 * hold it for deletion until a subsequent call
361 */
362 std::unique_ptr<Packet> pendingDelete;
363
364 /**
365 * Mark a request as in service (sent downstream in the memory
366 * system), effectively making this MSHR the ordering point.
367 */
368 void markInService(MSHR *mshr, bool pending_modified_resp)
369 {
370 bool wasFull = mshrQueue.isFull();
371 mshrQueue.markInService(mshr, pending_modified_resp);
372
373 if (wasFull && !mshrQueue.isFull()) {
374 clearBlocked(Blocked_NoMSHRs);
375 }
376 }
377
378 void markInService(WriteQueueEntry *entry)
379 {
380 bool wasFull = writeBuffer.isFull();
381 writeBuffer.markInService(entry);
382
383 if (wasFull && !writeBuffer.isFull()) {
384 clearBlocked(Blocked_NoWBBuffers);
385 }
386 }
387
388 /**
389 * Determine whether we should allocate on a fill or not. If this
390 * cache is mostly inclusive with regards to the upstream cache(s)
391 * we always allocate (for any non-forwarded and cacheable
392 * requests). In the case of a mostly exclusive cache, we allocate
393 * on fill if the packet did not come from a cache, thus if we:
394 * are dealing with a whole-line write (the latter behaves much
395 * like a writeback), the original target packet came from a
396 * non-caching source, or if we are performing a prefetch or LLSC.
397 *
398 * @param cmd Command of the incoming requesting packet
399 * @return Whether we should allocate on the fill
400 */
401 inline bool allocOnFill(MemCmd cmd) const
402 {
403 return clusivity == Enums::mostly_incl ||
404 cmd == MemCmd::WriteLineReq ||
405 cmd == MemCmd::ReadReq ||
406 cmd == MemCmd::WriteReq ||
407 cmd.isPrefetch() ||
408 cmd.isLLSC();
409 }
410
411 /**
412 * Regenerate block address using tags.
413 * Block address regeneration depends on whether we're using a temporary
414 * block or not.
415 *
416 * @param blk The block to regenerate address.
417 * @return The block's address.
418 */
419 Addr regenerateBlkAddr(CacheBlk* blk);
420
421 /**
422 * Calculate access latency in ticks given a tag lookup latency, and
423 * whether access was a hit or miss.
424 *
425 * @param blk The cache block that was accessed.
426 * @param lookup_lat Latency of the respective tag lookup.
427 * @return The number of ticks that pass due to a block access.
428 */
429 Cycles calculateAccessLatency(const CacheBlk* blk,
430 const Cycles lookup_lat) const;
431
432 /**
433 * Does all the processing necessary to perform the provided request.
434 * @param pkt The memory request to perform.
435 * @param blk The cache block to be updated.
436 * @param lat The latency of the access.
437 * @param writebacks List for any writebacks that need to be performed.
438 * @return Boolean indicating whether the request was satisfied.
439 */
440 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
441 PacketList &writebacks);
442
443 /*
444 * Handle a timing request that hit in the cache
445 *
446 * @param ptk The request packet
447 * @param blk The referenced block
448 * @param request_time The tick at which the block lookup is compete
449 */
450 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
451 Tick request_time);
452
453 /*
454 * Handle a timing request that missed in the cache
455 *
456 * Implementation specific handling for different cache
457 * implementations
458 *
459 * @param ptk The request packet
460 * @param blk The referenced block
461 * @param forward_time The tick at which we can process dependent requests
462 * @param request_time The tick at which the block lookup is compete
463 */
464 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
465 Tick forward_time,
466 Tick request_time) = 0;
467
468 /*
469 * Handle a timing request that missed in the cache
470 *
471 * Common functionality across different cache implementations
472 *
473 * @param ptk The request packet
474 * @param blk The referenced block
475 * @param mshr Any existing mshr for the referenced cache block
476 * @param forward_time The tick at which we can process dependent requests
477 * @param request_time The tick at which the block lookup is compete
478 */
479 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
480 Tick forward_time, Tick request_time);
481
482 /**
483 * Performs the access specified by the request.
484 * @param pkt The request to perform.
485 */
486 virtual void recvTimingReq(PacketPtr pkt);
487
488 /**
489 * Handling the special case of uncacheable write responses to
490 * make recvTimingResp less cluttered.
491 */
492 void handleUncacheableWriteResp(PacketPtr pkt);
493
494 /**
495 * Service non-deferred MSHR targets using the received response
496 *
497 * Iterates through the list of targets that can be serviced with
498 * the current response. Any writebacks that need to performed
499 * must be appended to the writebacks parameter.
498 * the current response.
500 *
501 * @param mshr The MSHR that corresponds to the reponse
502 * @param pkt The response packet
503 * @param blk The reference block
499 *
500 * @param mshr The MSHR that corresponds to the reponse
501 * @param pkt The response packet
502 * @param blk The reference block
504 * @param writebacks List of writebacks that need to be performed
505 */
506 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
503 */
504 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
507 CacheBlk *blk, PacketList& writebacks) = 0;
505 CacheBlk *blk) = 0;
508
509 /**
510 * Handles a response (cache line fill/write ack) from the bus.
511 * @param pkt The response packet
512 */
513 virtual void recvTimingResp(PacketPtr pkt);
514
515 /**
516 * Snoops bus transactions to maintain coherence.
517 * @param pkt The current bus transaction.
518 */
519 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
520
521 /**
522 * Handle a snoop response.
523 * @param pkt Snoop response packet
524 */
525 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
526
527 /**
528 * Handle a request in atomic mode that missed in this cache
529 *
530 * Creates a downstream request, sends it to the memory below and
531 * handles the response. As we are in atomic mode all operations
532 * are performed immediately.
533 *
534 * @param pkt The packet with the requests
535 * @param blk The referenced block
536 * @param writebacks A list with packets for any performed writebacks
537 * @return Cycles for handling the request
538 */
539 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
540 PacketList &writebacks) = 0;
541
542 /**
543 * Performs the access specified by the request.
544 * @param pkt The request to perform.
545 * @return The number of ticks required for the access.
546 */
547 virtual Tick recvAtomic(PacketPtr pkt);
548
549 /**
550 * Snoop for the provided request in the cache and return the estimated
551 * time taken.
552 * @param pkt The memory request to snoop
553 * @return The number of ticks required for the snoop.
554 */
555 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
556
557 /**
558 * Performs the access specified by the request.
559 *
560 * @param pkt The request to perform.
561 * @param fromCpuSide from the CPU side port or the memory side port
562 */
563 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
564
565 /**
566 * Handle doing the Compare and Swap function for SPARC.
567 */
568 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
569
570 /**
571 * Return the next queue entry to service, either a pending miss
572 * from the MSHR queue, a buffered write from the write buffer, or
573 * something from the prefetcher. This function is responsible
574 * for prioritizing among those sources on the fly.
575 */
576 QueueEntry* getNextQueueEntry();
577
578 /**
579 * Insert writebacks into the write buffer
580 */
581 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
582
583 /**
584 * Send writebacks down the memory hierarchy in atomic mode
585 */
586 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
587
588 /**
589 * Create an appropriate downstream bus request packet.
590 *
591 * Creates a new packet with the request to be send to the memory
592 * below, or nullptr if the current request in cpu_pkt should just
593 * be forwarded on.
594 *
595 * @param cpu_pkt The miss packet that needs to be satisfied.
596 * @param blk The referenced block, can be nullptr.
597 * @param needs_writable Indicates that the block must be writable
598 * even if the request in cpu_pkt doesn't indicate that.
599 * @param is_whole_line_write True if there are writes for the
600 * whole line
601 * @return A packet send to the memory below
602 */
603 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
604 bool needs_writable,
605 bool is_whole_line_write) const = 0;
606
607 /**
608 * Determine if clean lines should be written back or not. In
609 * cases where a downstream cache is mostly inclusive we likely
610 * want it to act as a victim cache also for lines that have not
611 * been modified. Hence, we cannot simply drop the line (or send a
612 * clean evict), but rather need to send the actual data.
613 */
614 const bool writebackClean;
615
616 /**
617 * Writebacks from the tempBlock, resulting on the response path
618 * in atomic mode, must happen after the call to recvAtomic has
619 * finished (for the right ordering of the packets). We therefore
620 * need to hold on to the packets, and have a method and an event
621 * to send them.
622 */
623 PacketPtr tempBlockWriteback;
624
625 /**
626 * Send the outstanding tempBlock writeback. To be called after
627 * recvAtomic finishes in cases where the block we filled is in
628 * fact the tempBlock, and now needs to be written back.
629 */
630 void writebackTempBlockAtomic() {
631 assert(tempBlockWriteback != nullptr);
632 PacketList writebacks{tempBlockWriteback};
633 doWritebacksAtomic(writebacks);
634 tempBlockWriteback = nullptr;
635 }
636
637 /**
638 * An event to writeback the tempBlock after recvAtomic
639 * finishes. To avoid other calls to recvAtomic getting in
640 * between, we create this event with a higher priority.
641 */
642 EventFunctionWrapper writebackTempBlockAtomicEvent;
643
644 /**
645 * Perform any necessary updates to the block and perform any data
646 * exchange between the packet and the block. The flags of the
647 * packet are also set accordingly.
648 *
649 * @param pkt Request packet from upstream that hit a block
650 * @param blk Cache block that the packet hit
651 * @param deferred_response Whether this request originally missed
652 * @param pending_downgrade Whether the writable flag is to be removed
653 */
654 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
655 bool deferred_response = false,
656 bool pending_downgrade = false);
657
658 /**
659 * Maintain the clusivity of this cache by potentially
660 * invalidating a block. This method works in conjunction with
661 * satisfyRequest, but is separate to allow us to handle all MSHR
662 * targets before potentially dropping a block.
663 *
664 * @param from_cache Whether we have dealt with a packet from a cache
665 * @param blk The block that should potentially be dropped
666 */
667 void maintainClusivity(bool from_cache, CacheBlk *blk);
668
669 /**
670 * Handle a fill operation caused by a received packet.
671 *
672 * Populates a cache block and handles all outstanding requests for the
673 * satisfied fill request. This version takes two memory requests. One
674 * contains the fill data, the other is an optional target to satisfy.
675 * Note that the reason we return a list of writebacks rather than
676 * inserting them directly in the write buffer is that this function
677 * is called by both atomic and timing-mode accesses, and in atomic
678 * mode we don't mess with the write buffer (we just perform the
679 * writebacks atomically once the original request is complete).
680 *
681 * @param pkt The memory request with the fill data.
682 * @param blk The cache block if it already exists.
683 * @param writebacks List for any writebacks that need to be performed.
684 * @param allocate Whether to allocate a block or use the temp block
685 * @return Pointer to the new cache block.
686 */
687 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
688 PacketList &writebacks, bool allocate);
689
690 /**
691 * Allocate a new block and perform any necessary writebacks
692 *
693 * Find a victim block and if necessary prepare writebacks for any
694 * existing data. May return nullptr if there are no replaceable
695 * blocks. If a replaceable block is found, it inserts the new block in
696 * its place. The new block, however, is not set as valid yet.
697 *
698 * @param pkt Packet holding the address to update
699 * @param writebacks A list of writeback packets for the evicted blocks
700 * @return the allocated block
701 */
702 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
703 /**
704 * Evict a cache block.
705 *
706 * Performs a writeback if necesssary and invalidates the block
707 *
708 * @param blk Block to invalidate
709 * @return A packet with the writeback, can be nullptr
710 */
711 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
712
713 /**
714 * Evict a cache block.
715 *
716 * Performs a writeback if necesssary and invalidates the block
717 *
718 * @param blk Block to invalidate
719 * @param writebacks Return a list of packets with writebacks
720 */
721 void evictBlock(CacheBlk *blk, PacketList &writebacks);
722
723 /**
724 * Invalidate a cache block.
725 *
726 * @param blk Block to invalidate
727 */
728 void invalidateBlock(CacheBlk *blk);
729
730 /**
731 * Create a writeback request for the given block.
732 *
733 * @param blk The block to writeback.
734 * @return The writeback request for the block.
735 */
736 PacketPtr writebackBlk(CacheBlk *blk);
737
738 /**
739 * Create a writeclean request for the given block.
740 *
741 * Creates a request that writes the block to the cache below
742 * without evicting the block from the current cache.
743 *
744 * @param blk The block to write clean.
745 * @param dest The destination of the write clean operation.
746 * @param id Use the given packet id for the write clean operation.
747 * @return The generated write clean packet.
748 */
749 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
750
751 /**
752 * Write back dirty blocks in the cache using functional accesses.
753 */
754 virtual void memWriteback() override;
755
756 /**
757 * Invalidates all blocks in the cache.
758 *
759 * @warn Dirty cache lines will not be written back to
760 * memory. Make sure to call functionalWriteback() first if you
761 * want the to write them to memory.
762 */
763 virtual void memInvalidate() override;
764
765 /**
766 * Determine if there are any dirty blocks in the cache.
767 *
768 * @return true if at least one block is dirty, false otherwise.
769 */
770 bool isDirty() const;
771
772 /**
773 * Determine if an address is in the ranges covered by this
774 * cache. This is useful to filter snoops.
775 *
776 * @param addr Address to check against
777 *
778 * @return If the address in question is in range
779 */
780 bool inRange(Addr addr) const;
781
782 /**
783 * Find next request ready time from among possible sources.
784 */
785 Tick nextQueueReadyTime() const;
786
787 /** Block size of this cache */
788 const unsigned blkSize;
789
790 /**
791 * The latency of tag lookup of a cache. It occurs when there is
792 * an access to the cache.
793 */
794 const Cycles lookupLatency;
795
796 /**
797 * The latency of data access of a cache. It occurs when there is
798 * an access to the cache.
799 */
800 const Cycles dataLatency;
801
802 /**
803 * This is the forward latency of the cache. It occurs when there
804 * is a cache miss and a request is forwarded downstream, in
805 * particular an outbound miss.
806 */
807 const Cycles forwardLatency;
808
809 /** The latency to fill a cache block */
810 const Cycles fillLatency;
811
812 /**
813 * The latency of sending reponse to its upper level cache/core on
814 * a linefill. The responseLatency parameter captures this
815 * latency.
816 */
817 const Cycles responseLatency;
818
819 /**
820 * Whether tags and data are accessed sequentially.
821 */
822 const bool sequentialAccess;
823
824 /** The number of targets for each MSHR. */
825 const int numTarget;
826
827 /** Do we forward snoops from mem side port through to cpu side port? */
828 bool forwardSnoops;
829
830 /**
831 * Clusivity with respect to the upstream cache, determining if we
832 * fill into both this cache and the cache above on a miss. Note
833 * that we currently do not support strict clusivity policies.
834 */
835 const Enums::Clusivity clusivity;
836
837 /**
838 * Is this cache read only, for example the instruction cache, or
839 * table-walker cache. A cache that is read only should never see
840 * any writes, and should never get any dirty data (and hence
841 * never have to do any writebacks).
842 */
843 const bool isReadOnly;
844
845 /**
846 * Bit vector of the blocking reasons for the access path.
847 * @sa #BlockedCause
848 */
849 uint8_t blocked;
850
851 /** Increasing order number assigned to each incoming request. */
852 uint64_t order;
853
854 /** Stores time the cache blocked for statistics. */
855 Cycles blockedCycle;
856
857 /** Pointer to the MSHR that has no targets. */
858 MSHR *noTargetMSHR;
859
860 /** The number of misses to trigger an exit event. */
861 Counter missCount;
862
863 /**
864 * The address range to which the cache responds on the CPU side.
865 * Normally this is all possible memory addresses. */
866 const AddrRangeList addrRanges;
867
868 public:
869 /** System we are currently operating in. */
870 System *system;
871
872 // Statistics
873 /**
874 * @addtogroup CacheStatistics
875 * @{
876 */
877
878 /** Number of hits per thread for each type of command.
879 @sa Packet::Command */
880 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
881 /** Number of hits for demand accesses. */
882 Stats::Formula demandHits;
883 /** Number of hit for all accesses. */
884 Stats::Formula overallHits;
885
886 /** Number of misses per thread for each type of command.
887 @sa Packet::Command */
888 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
889 /** Number of misses for demand accesses. */
890 Stats::Formula demandMisses;
891 /** Number of misses for all accesses. */
892 Stats::Formula overallMisses;
893
894 /**
895 * Total number of cycles per thread/command spent waiting for a miss.
896 * Used to calculate the average miss latency.
897 */
898 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
899 /** Total number of cycles spent waiting for demand misses. */
900 Stats::Formula demandMissLatency;
901 /** Total number of cycles spent waiting for all misses. */
902 Stats::Formula overallMissLatency;
903
904 /** The number of accesses per command and thread. */
905 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
906 /** The number of demand accesses. */
907 Stats::Formula demandAccesses;
908 /** The number of overall accesses. */
909 Stats::Formula overallAccesses;
910
911 /** The miss rate per command and thread. */
912 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
913 /** The miss rate of all demand accesses. */
914 Stats::Formula demandMissRate;
915 /** The miss rate for all accesses. */
916 Stats::Formula overallMissRate;
917
918 /** The average miss latency per command and thread. */
919 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
920 /** The average miss latency for demand misses. */
921 Stats::Formula demandAvgMissLatency;
922 /** The average miss latency for all misses. */
923 Stats::Formula overallAvgMissLatency;
924
925 /** The total number of cycles blocked for each blocked cause. */
926 Stats::Vector blocked_cycles;
927 /** The number of times this cache blocked for each blocked cause. */
928 Stats::Vector blocked_causes;
929
930 /** The average number of cycles blocked for each blocked cause. */
931 Stats::Formula avg_blocked;
932
933 /** The number of times a HW-prefetched block is evicted w/o reference. */
934 Stats::Scalar unusedPrefetches;
935
936 /** Number of blocks written back per thread. */
937 Stats::Vector writebacks;
938
939 /** Number of misses that hit in the MSHRs per command and thread. */
940 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
941 /** Demand misses that hit in the MSHRs. */
942 Stats::Formula demandMshrHits;
943 /** Total number of misses that hit in the MSHRs. */
944 Stats::Formula overallMshrHits;
945
946 /** Number of misses that miss in the MSHRs, per command and thread. */
947 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
948 /** Demand misses that miss in the MSHRs. */
949 Stats::Formula demandMshrMisses;
950 /** Total number of misses that miss in the MSHRs. */
951 Stats::Formula overallMshrMisses;
952
953 /** Number of misses that miss in the MSHRs, per command and thread. */
954 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
955 /** Total number of misses that miss in the MSHRs. */
956 Stats::Formula overallMshrUncacheable;
957
958 /** Total cycle latency of each MSHR miss, per command and thread. */
959 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
960 /** Total cycle latency of demand MSHR misses. */
961 Stats::Formula demandMshrMissLatency;
962 /** Total cycle latency of overall MSHR misses. */
963 Stats::Formula overallMshrMissLatency;
964
965 /** Total cycle latency of each MSHR miss, per command and thread. */
966 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
967 /** Total cycle latency of overall MSHR misses. */
968 Stats::Formula overallMshrUncacheableLatency;
969
970#if 0
971 /** The total number of MSHR accesses per command and thread. */
972 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
973 /** The total number of demand MSHR accesses. */
974 Stats::Formula demandMshrAccesses;
975 /** The total number of MSHR accesses. */
976 Stats::Formula overallMshrAccesses;
977#endif
978
979 /** The miss rate in the MSHRs pre command and thread. */
980 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
981 /** The demand miss rate in the MSHRs. */
982 Stats::Formula demandMshrMissRate;
983 /** The overall miss rate in the MSHRs. */
984 Stats::Formula overallMshrMissRate;
985
986 /** The average latency of an MSHR miss, per command and thread. */
987 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
988 /** The average latency of a demand MSHR miss. */
989 Stats::Formula demandAvgMshrMissLatency;
990 /** The average overall latency of an MSHR miss. */
991 Stats::Formula overallAvgMshrMissLatency;
992
993 /** The average latency of an MSHR miss, per command and thread. */
994 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
995 /** The average overall latency of an MSHR miss. */
996 Stats::Formula overallAvgMshrUncacheableLatency;
997
998 /** Number of replacements of valid blocks. */
999 Stats::Scalar replacements;
1000
1001 /**
1002 * @}
1003 */
1004
1005 /**
1006 * Register stats for this object.
1007 */
1008 void regStats() override;
1009
1010 /** Registers probes. */
1011 void regProbePoints() override;
1012
1013 public:
1014 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1015 ~BaseCache();
1016
1017 void init() override;
1018
1019 BaseMasterPort &getMasterPort(const std::string &if_name,
1020 PortID idx = InvalidPortID) override;
1021 BaseSlavePort &getSlavePort(const std::string &if_name,
1022 PortID idx = InvalidPortID) override;
1023
1024 /**
1025 * Query block size of a cache.
1026 * @return The block size
1027 */
1028 unsigned
1029 getBlockSize() const
1030 {
1031 return blkSize;
1032 }
1033
1034 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1035
1036 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1037 {
1038 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1039 pkt, time, order++,
1040 allocOnFill(pkt->cmd));
1041
1042 if (mshrQueue.isFull()) {
1043 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1044 }
1045
1046 if (sched_send) {
1047 // schedule the send
1048 schedMemSideSendEvent(time);
1049 }
1050
1051 return mshr;
1052 }
1053
1054 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1055 {
1056 // should only see writes or clean evicts here
1057 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1058
1059 Addr blk_addr = pkt->getBlockAddr(blkSize);
1060
1061 WriteQueueEntry *wq_entry =
1062 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1063 if (wq_entry && !wq_entry->inService) {
1064 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1065 }
1066
1067 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1068
1069 if (writeBuffer.isFull()) {
1070 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1071 }
1072
1073 // schedule the send
1074 schedMemSideSendEvent(time);
1075 }
1076
1077 /**
1078 * Returns true if the cache is blocked for accesses.
1079 */
1080 bool isBlocked() const
1081 {
1082 return blocked != 0;
1083 }
1084
1085 /**
1086 * Marks the access path of the cache as blocked for the given cause. This
1087 * also sets the blocked flag in the slave interface.
1088 * @param cause The reason for the cache blocking.
1089 */
1090 void setBlocked(BlockedCause cause)
1091 {
1092 uint8_t flag = 1 << cause;
1093 if (blocked == 0) {
1094 blocked_causes[cause]++;
1095 blockedCycle = curCycle();
1096 cpuSidePort.setBlocked();
1097 }
1098 blocked |= flag;
1099 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1100 }
1101
1102 /**
1103 * Marks the cache as unblocked for the given cause. This also clears the
1104 * blocked flags in the appropriate interfaces.
1105 * @param cause The newly unblocked cause.
1106 * @warning Calling this function can cause a blocked request on the bus to
1107 * access the cache. The cache must be in a state to handle that request.
1108 */
1109 void clearBlocked(BlockedCause cause)
1110 {
1111 uint8_t flag = 1 << cause;
1112 blocked &= ~flag;
1113 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1114 if (blocked == 0) {
1115 blocked_cycles[cause] += curCycle() - blockedCycle;
1116 cpuSidePort.clearBlocked();
1117 }
1118 }
1119
1120 /**
1121 * Schedule a send event for the memory-side port. If already
1122 * scheduled, this may reschedule the event at an earlier
1123 * time. When the specified time is reached, the port is free to
1124 * send either a response, a request, or a prefetch request.
1125 *
1126 * @param time The time when to attempt sending a packet.
1127 */
1128 void schedMemSideSendEvent(Tick time)
1129 {
1130 memSidePort.schedSendEvent(time);
1131 }
1132
1133 bool inCache(Addr addr, bool is_secure) const {
1134 return tags->findBlock(addr, is_secure);
1135 }
1136
1137 bool inMissQueue(Addr addr, bool is_secure) const {
1138 return mshrQueue.findMatch(addr, is_secure);
1139 }
1140
1141 void incMissCount(PacketPtr pkt)
1142 {
1143 assert(pkt->req->masterId() < system->maxMasters());
1144 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1145 pkt->req->incAccessDepth();
1146 if (missCount) {
1147 --missCount;
1148 if (missCount == 0)
1149 exitSimLoop("A cache reached the maximum miss count");
1150 }
1151 }
1152 void incHitCount(PacketPtr pkt)
1153 {
1154 assert(pkt->req->masterId() < system->maxMasters());
1155 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1156
1157 }
1158
1159 /**
1160 * Checks if the cache is coalescing writes
1161 *
1162 * @return True if the cache is coalescing writes
1163 */
1164 bool coalesce() const;
1165
1166
1167 /**
1168 * Cache block visitor that writes back dirty cache blocks using
1169 * functional writes.
1170 */
1171 void writebackVisitor(CacheBlk &blk);
1172
1173 /**
1174 * Cache block visitor that invalidates all blocks in the cache.
1175 *
1176 * @warn Dirty cache lines will not be written back to memory.
1177 */
1178 void invalidateVisitor(CacheBlk &blk);
1179
1180 /**
1181 * Take an MSHR, turn it into a suitable downstream packet, and
1182 * send it out. This construct allows a queue entry to choose a suitable
1183 * approach based on its type.
1184 *
1185 * @param mshr The MSHR to turn into a packet and send
1186 * @return True if the port is waiting for a retry
1187 */
1188 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1189
1190 /**
1191 * Similar to sendMSHR, but for a write-queue entry
1192 * instead. Create the packet, and send it, and if successful also
1193 * mark the entry in service.
1194 *
1195 * @param wq_entry The write-queue entry to turn into a packet and send
1196 * @return True if the port is waiting for a retry
1197 */
1198 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1199
1200 /**
1201 * Serialize the state of the caches
1202 *
1203 * We currently don't support checkpointing cache state, so this panics.
1204 */
1205 void serialize(CheckpointOut &cp) const override;
1206 void unserialize(CheckpointIn &cp) override;
1207};
1208
1209/**
1210 * The write allocator inspects write packets and detects streaming
1211 * patterns. The write allocator supports a single stream where writes
1212 * are expected to access consecutive locations and keeps track of
1213 * size of the area covered by the concecutive writes in byteCount.
1214 *
1215 * 1) When byteCount has surpassed the coallesceLimit the mode
1216 * switches from ALLOCATE to COALESCE where writes should be delayed
1217 * until the whole block is written at which point a single packet
1218 * (whole line write) can service them.
1219 *
1220 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1221 * line) we switch to NO_ALLOCATE when writes should not allocate in
1222 * the cache but rather send a whole line write to the memory below.
1223 */
1224class WriteAllocator : public SimObject {
1225 public:
1226 WriteAllocator(const WriteAllocatorParams *p) :
1227 SimObject(p),
1228 coalesceLimit(p->coalesce_limit * p->block_size),
1229 noAllocateLimit(p->no_allocate_limit * p->block_size),
1230 delayThreshold(p->delay_threshold)
1231 {
1232 reset();
1233 }
1234
1235 /**
1236 * Should writes be coalesced? This is true if the mode is set to
1237 * NO_ALLOCATE.
1238 *
1239 * @return return true if the cache should coalesce writes.
1240 */
1241 bool coalesce() const {
1242 return mode != WriteMode::ALLOCATE;
1243 }
1244
1245 /**
1246 * Should writes allocate?
1247 *
1248 * @return return true if the cache should not allocate for writes.
1249 */
1250 bool allocate() const {
1251 return mode != WriteMode::NO_ALLOCATE;
1252 }
1253
1254 /**
1255 * Reset the write allocator state, meaning that it allocates for
1256 * writes and has not recorded any information about qualifying
1257 * writes that might trigger a switch to coalescing and later no
1258 * allocation.
1259 */
1260 void reset() {
1261 mode = WriteMode::ALLOCATE;
1262 byteCount = 0;
1263 nextAddr = 0;
1264 }
1265
1266 /**
1267 * Access whether we need to delay the current write.
1268 *
1269 * @param blk_addr The block address the packet writes to
1270 * @return true if the current packet should be delayed
1271 */
1272 bool delay(Addr blk_addr) {
1273 if (delayCtr[blk_addr] > 0) {
1274 --delayCtr[blk_addr];
1275 return true;
1276 } else {
1277 return false;
1278 }
1279 }
1280
1281 /**
1282 * Clear delay counter for the input block
1283 *
1284 * @param blk_addr The accessed cache block
1285 */
1286 void resetDelay(Addr blk_addr) {
1287 delayCtr.erase(blk_addr);
1288 }
1289
1290 /**
1291 * Update the write mode based on the current write
1292 * packet. This method compares the packet's address with any
1293 * current stream, and updates the tracking and the mode
1294 * accordingly.
1295 *
1296 * @param write_addr Start address of the write request
1297 * @param write_size Size of the write request
1298 * @param blk_addr The block address that this packet writes to
1299 */
1300 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1301
1302 private:
1303 /**
1304 * The current mode for write coalescing and allocation, either
1305 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1306 * write coalescing without allocation (NO_ALLOCATE).
1307 */
1308 enum class WriteMode : char {
1309 ALLOCATE,
1310 COALESCE,
1311 NO_ALLOCATE,
1312 };
1313 WriteMode mode;
1314
1315 /** Address to match writes against to detect streams. */
1316 Addr nextAddr;
1317
1318 /**
1319 * Bytes written contiguously. Saturating once we no longer
1320 * allocate.
1321 */
1322 uint32_t byteCount;
1323
1324 /**
1325 * Limits for when to switch between the different write modes.
1326 */
1327 const uint32_t coalesceLimit;
1328 const uint32_t noAllocateLimit;
1329 /**
1330 * The number of times the allocator will delay an WriteReq MSHR.
1331 */
1332 const uint32_t delayThreshold;
1333
1334 /**
1335 * Keep track of the number of times the allocator has delayed an
1336 * WriteReq MSHR.
1337 */
1338 std::unordered_map<Addr, Counter> delayCtr;
1339};
1340
1341#endif //__MEM_CACHE_BASE_HH__
506
507 /**
508 * Handles a response (cache line fill/write ack) from the bus.
509 * @param pkt The response packet
510 */
511 virtual void recvTimingResp(PacketPtr pkt);
512
513 /**
514 * Snoops bus transactions to maintain coherence.
515 * @param pkt The current bus transaction.
516 */
517 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
518
519 /**
520 * Handle a snoop response.
521 * @param pkt Snoop response packet
522 */
523 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
524
525 /**
526 * Handle a request in atomic mode that missed in this cache
527 *
528 * Creates a downstream request, sends it to the memory below and
529 * handles the response. As we are in atomic mode all operations
530 * are performed immediately.
531 *
532 * @param pkt The packet with the requests
533 * @param blk The referenced block
534 * @param writebacks A list with packets for any performed writebacks
535 * @return Cycles for handling the request
536 */
537 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
538 PacketList &writebacks) = 0;
539
540 /**
541 * Performs the access specified by the request.
542 * @param pkt The request to perform.
543 * @return The number of ticks required for the access.
544 */
545 virtual Tick recvAtomic(PacketPtr pkt);
546
547 /**
548 * Snoop for the provided request in the cache and return the estimated
549 * time taken.
550 * @param pkt The memory request to snoop
551 * @return The number of ticks required for the snoop.
552 */
553 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
554
555 /**
556 * Performs the access specified by the request.
557 *
558 * @param pkt The request to perform.
559 * @param fromCpuSide from the CPU side port or the memory side port
560 */
561 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
562
563 /**
564 * Handle doing the Compare and Swap function for SPARC.
565 */
566 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
567
568 /**
569 * Return the next queue entry to service, either a pending miss
570 * from the MSHR queue, a buffered write from the write buffer, or
571 * something from the prefetcher. This function is responsible
572 * for prioritizing among those sources on the fly.
573 */
574 QueueEntry* getNextQueueEntry();
575
576 /**
577 * Insert writebacks into the write buffer
578 */
579 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
580
581 /**
582 * Send writebacks down the memory hierarchy in atomic mode
583 */
584 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
585
586 /**
587 * Create an appropriate downstream bus request packet.
588 *
589 * Creates a new packet with the request to be send to the memory
590 * below, or nullptr if the current request in cpu_pkt should just
591 * be forwarded on.
592 *
593 * @param cpu_pkt The miss packet that needs to be satisfied.
594 * @param blk The referenced block, can be nullptr.
595 * @param needs_writable Indicates that the block must be writable
596 * even if the request in cpu_pkt doesn't indicate that.
597 * @param is_whole_line_write True if there are writes for the
598 * whole line
599 * @return A packet send to the memory below
600 */
601 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
602 bool needs_writable,
603 bool is_whole_line_write) const = 0;
604
605 /**
606 * Determine if clean lines should be written back or not. In
607 * cases where a downstream cache is mostly inclusive we likely
608 * want it to act as a victim cache also for lines that have not
609 * been modified. Hence, we cannot simply drop the line (or send a
610 * clean evict), but rather need to send the actual data.
611 */
612 const bool writebackClean;
613
614 /**
615 * Writebacks from the tempBlock, resulting on the response path
616 * in atomic mode, must happen after the call to recvAtomic has
617 * finished (for the right ordering of the packets). We therefore
618 * need to hold on to the packets, and have a method and an event
619 * to send them.
620 */
621 PacketPtr tempBlockWriteback;
622
623 /**
624 * Send the outstanding tempBlock writeback. To be called after
625 * recvAtomic finishes in cases where the block we filled is in
626 * fact the tempBlock, and now needs to be written back.
627 */
628 void writebackTempBlockAtomic() {
629 assert(tempBlockWriteback != nullptr);
630 PacketList writebacks{tempBlockWriteback};
631 doWritebacksAtomic(writebacks);
632 tempBlockWriteback = nullptr;
633 }
634
635 /**
636 * An event to writeback the tempBlock after recvAtomic
637 * finishes. To avoid other calls to recvAtomic getting in
638 * between, we create this event with a higher priority.
639 */
640 EventFunctionWrapper writebackTempBlockAtomicEvent;
641
642 /**
643 * Perform any necessary updates to the block and perform any data
644 * exchange between the packet and the block. The flags of the
645 * packet are also set accordingly.
646 *
647 * @param pkt Request packet from upstream that hit a block
648 * @param blk Cache block that the packet hit
649 * @param deferred_response Whether this request originally missed
650 * @param pending_downgrade Whether the writable flag is to be removed
651 */
652 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
653 bool deferred_response = false,
654 bool pending_downgrade = false);
655
656 /**
657 * Maintain the clusivity of this cache by potentially
658 * invalidating a block. This method works in conjunction with
659 * satisfyRequest, but is separate to allow us to handle all MSHR
660 * targets before potentially dropping a block.
661 *
662 * @param from_cache Whether we have dealt with a packet from a cache
663 * @param blk The block that should potentially be dropped
664 */
665 void maintainClusivity(bool from_cache, CacheBlk *blk);
666
667 /**
668 * Handle a fill operation caused by a received packet.
669 *
670 * Populates a cache block and handles all outstanding requests for the
671 * satisfied fill request. This version takes two memory requests. One
672 * contains the fill data, the other is an optional target to satisfy.
673 * Note that the reason we return a list of writebacks rather than
674 * inserting them directly in the write buffer is that this function
675 * is called by both atomic and timing-mode accesses, and in atomic
676 * mode we don't mess with the write buffer (we just perform the
677 * writebacks atomically once the original request is complete).
678 *
679 * @param pkt The memory request with the fill data.
680 * @param blk The cache block if it already exists.
681 * @param writebacks List for any writebacks that need to be performed.
682 * @param allocate Whether to allocate a block or use the temp block
683 * @return Pointer to the new cache block.
684 */
685 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
686 PacketList &writebacks, bool allocate);
687
688 /**
689 * Allocate a new block and perform any necessary writebacks
690 *
691 * Find a victim block and if necessary prepare writebacks for any
692 * existing data. May return nullptr if there are no replaceable
693 * blocks. If a replaceable block is found, it inserts the new block in
694 * its place. The new block, however, is not set as valid yet.
695 *
696 * @param pkt Packet holding the address to update
697 * @param writebacks A list of writeback packets for the evicted blocks
698 * @return the allocated block
699 */
700 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
701 /**
702 * Evict a cache block.
703 *
704 * Performs a writeback if necesssary and invalidates the block
705 *
706 * @param blk Block to invalidate
707 * @return A packet with the writeback, can be nullptr
708 */
709 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
710
711 /**
712 * Evict a cache block.
713 *
714 * Performs a writeback if necesssary and invalidates the block
715 *
716 * @param blk Block to invalidate
717 * @param writebacks Return a list of packets with writebacks
718 */
719 void evictBlock(CacheBlk *blk, PacketList &writebacks);
720
721 /**
722 * Invalidate a cache block.
723 *
724 * @param blk Block to invalidate
725 */
726 void invalidateBlock(CacheBlk *blk);
727
728 /**
729 * Create a writeback request for the given block.
730 *
731 * @param blk The block to writeback.
732 * @return The writeback request for the block.
733 */
734 PacketPtr writebackBlk(CacheBlk *blk);
735
736 /**
737 * Create a writeclean request for the given block.
738 *
739 * Creates a request that writes the block to the cache below
740 * without evicting the block from the current cache.
741 *
742 * @param blk The block to write clean.
743 * @param dest The destination of the write clean operation.
744 * @param id Use the given packet id for the write clean operation.
745 * @return The generated write clean packet.
746 */
747 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
748
749 /**
750 * Write back dirty blocks in the cache using functional accesses.
751 */
752 virtual void memWriteback() override;
753
754 /**
755 * Invalidates all blocks in the cache.
756 *
757 * @warn Dirty cache lines will not be written back to
758 * memory. Make sure to call functionalWriteback() first if you
759 * want the to write them to memory.
760 */
761 virtual void memInvalidate() override;
762
763 /**
764 * Determine if there are any dirty blocks in the cache.
765 *
766 * @return true if at least one block is dirty, false otherwise.
767 */
768 bool isDirty() const;
769
770 /**
771 * Determine if an address is in the ranges covered by this
772 * cache. This is useful to filter snoops.
773 *
774 * @param addr Address to check against
775 *
776 * @return If the address in question is in range
777 */
778 bool inRange(Addr addr) const;
779
780 /**
781 * Find next request ready time from among possible sources.
782 */
783 Tick nextQueueReadyTime() const;
784
785 /** Block size of this cache */
786 const unsigned blkSize;
787
788 /**
789 * The latency of tag lookup of a cache. It occurs when there is
790 * an access to the cache.
791 */
792 const Cycles lookupLatency;
793
794 /**
795 * The latency of data access of a cache. It occurs when there is
796 * an access to the cache.
797 */
798 const Cycles dataLatency;
799
800 /**
801 * This is the forward latency of the cache. It occurs when there
802 * is a cache miss and a request is forwarded downstream, in
803 * particular an outbound miss.
804 */
805 const Cycles forwardLatency;
806
807 /** The latency to fill a cache block */
808 const Cycles fillLatency;
809
810 /**
811 * The latency of sending reponse to its upper level cache/core on
812 * a linefill. The responseLatency parameter captures this
813 * latency.
814 */
815 const Cycles responseLatency;
816
817 /**
818 * Whether tags and data are accessed sequentially.
819 */
820 const bool sequentialAccess;
821
822 /** The number of targets for each MSHR. */
823 const int numTarget;
824
825 /** Do we forward snoops from mem side port through to cpu side port? */
826 bool forwardSnoops;
827
828 /**
829 * Clusivity with respect to the upstream cache, determining if we
830 * fill into both this cache and the cache above on a miss. Note
831 * that we currently do not support strict clusivity policies.
832 */
833 const Enums::Clusivity clusivity;
834
835 /**
836 * Is this cache read only, for example the instruction cache, or
837 * table-walker cache. A cache that is read only should never see
838 * any writes, and should never get any dirty data (and hence
839 * never have to do any writebacks).
840 */
841 const bool isReadOnly;
842
843 /**
844 * Bit vector of the blocking reasons for the access path.
845 * @sa #BlockedCause
846 */
847 uint8_t blocked;
848
849 /** Increasing order number assigned to each incoming request. */
850 uint64_t order;
851
852 /** Stores time the cache blocked for statistics. */
853 Cycles blockedCycle;
854
855 /** Pointer to the MSHR that has no targets. */
856 MSHR *noTargetMSHR;
857
858 /** The number of misses to trigger an exit event. */
859 Counter missCount;
860
861 /**
862 * The address range to which the cache responds on the CPU side.
863 * Normally this is all possible memory addresses. */
864 const AddrRangeList addrRanges;
865
866 public:
867 /** System we are currently operating in. */
868 System *system;
869
870 // Statistics
871 /**
872 * @addtogroup CacheStatistics
873 * @{
874 */
875
876 /** Number of hits per thread for each type of command.
877 @sa Packet::Command */
878 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
879 /** Number of hits for demand accesses. */
880 Stats::Formula demandHits;
881 /** Number of hit for all accesses. */
882 Stats::Formula overallHits;
883
884 /** Number of misses per thread for each type of command.
885 @sa Packet::Command */
886 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
887 /** Number of misses for demand accesses. */
888 Stats::Formula demandMisses;
889 /** Number of misses for all accesses. */
890 Stats::Formula overallMisses;
891
892 /**
893 * Total number of cycles per thread/command spent waiting for a miss.
894 * Used to calculate the average miss latency.
895 */
896 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
897 /** Total number of cycles spent waiting for demand misses. */
898 Stats::Formula demandMissLatency;
899 /** Total number of cycles spent waiting for all misses. */
900 Stats::Formula overallMissLatency;
901
902 /** The number of accesses per command and thread. */
903 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
904 /** The number of demand accesses. */
905 Stats::Formula demandAccesses;
906 /** The number of overall accesses. */
907 Stats::Formula overallAccesses;
908
909 /** The miss rate per command and thread. */
910 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
911 /** The miss rate of all demand accesses. */
912 Stats::Formula demandMissRate;
913 /** The miss rate for all accesses. */
914 Stats::Formula overallMissRate;
915
916 /** The average miss latency per command and thread. */
917 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
918 /** The average miss latency for demand misses. */
919 Stats::Formula demandAvgMissLatency;
920 /** The average miss latency for all misses. */
921 Stats::Formula overallAvgMissLatency;
922
923 /** The total number of cycles blocked for each blocked cause. */
924 Stats::Vector blocked_cycles;
925 /** The number of times this cache blocked for each blocked cause. */
926 Stats::Vector blocked_causes;
927
928 /** The average number of cycles blocked for each blocked cause. */
929 Stats::Formula avg_blocked;
930
931 /** The number of times a HW-prefetched block is evicted w/o reference. */
932 Stats::Scalar unusedPrefetches;
933
934 /** Number of blocks written back per thread. */
935 Stats::Vector writebacks;
936
937 /** Number of misses that hit in the MSHRs per command and thread. */
938 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
939 /** Demand misses that hit in the MSHRs. */
940 Stats::Formula demandMshrHits;
941 /** Total number of misses that hit in the MSHRs. */
942 Stats::Formula overallMshrHits;
943
944 /** Number of misses that miss in the MSHRs, per command and thread. */
945 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
946 /** Demand misses that miss in the MSHRs. */
947 Stats::Formula demandMshrMisses;
948 /** Total number of misses that miss in the MSHRs. */
949 Stats::Formula overallMshrMisses;
950
951 /** Number of misses that miss in the MSHRs, per command and thread. */
952 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
953 /** Total number of misses that miss in the MSHRs. */
954 Stats::Formula overallMshrUncacheable;
955
956 /** Total cycle latency of each MSHR miss, per command and thread. */
957 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
958 /** Total cycle latency of demand MSHR misses. */
959 Stats::Formula demandMshrMissLatency;
960 /** Total cycle latency of overall MSHR misses. */
961 Stats::Formula overallMshrMissLatency;
962
963 /** Total cycle latency of each MSHR miss, per command and thread. */
964 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
965 /** Total cycle latency of overall MSHR misses. */
966 Stats::Formula overallMshrUncacheableLatency;
967
968#if 0
969 /** The total number of MSHR accesses per command and thread. */
970 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
971 /** The total number of demand MSHR accesses. */
972 Stats::Formula demandMshrAccesses;
973 /** The total number of MSHR accesses. */
974 Stats::Formula overallMshrAccesses;
975#endif
976
977 /** The miss rate in the MSHRs pre command and thread. */
978 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
979 /** The demand miss rate in the MSHRs. */
980 Stats::Formula demandMshrMissRate;
981 /** The overall miss rate in the MSHRs. */
982 Stats::Formula overallMshrMissRate;
983
984 /** The average latency of an MSHR miss, per command and thread. */
985 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
986 /** The average latency of a demand MSHR miss. */
987 Stats::Formula demandAvgMshrMissLatency;
988 /** The average overall latency of an MSHR miss. */
989 Stats::Formula overallAvgMshrMissLatency;
990
991 /** The average latency of an MSHR miss, per command and thread. */
992 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
993 /** The average overall latency of an MSHR miss. */
994 Stats::Formula overallAvgMshrUncacheableLatency;
995
996 /** Number of replacements of valid blocks. */
997 Stats::Scalar replacements;
998
999 /**
1000 * @}
1001 */
1002
1003 /**
1004 * Register stats for this object.
1005 */
1006 void regStats() override;
1007
1008 /** Registers probes. */
1009 void regProbePoints() override;
1010
1011 public:
1012 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1013 ~BaseCache();
1014
1015 void init() override;
1016
1017 BaseMasterPort &getMasterPort(const std::string &if_name,
1018 PortID idx = InvalidPortID) override;
1019 BaseSlavePort &getSlavePort(const std::string &if_name,
1020 PortID idx = InvalidPortID) override;
1021
1022 /**
1023 * Query block size of a cache.
1024 * @return The block size
1025 */
1026 unsigned
1027 getBlockSize() const
1028 {
1029 return blkSize;
1030 }
1031
1032 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1033
1034 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1035 {
1036 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1037 pkt, time, order++,
1038 allocOnFill(pkt->cmd));
1039
1040 if (mshrQueue.isFull()) {
1041 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1042 }
1043
1044 if (sched_send) {
1045 // schedule the send
1046 schedMemSideSendEvent(time);
1047 }
1048
1049 return mshr;
1050 }
1051
1052 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1053 {
1054 // should only see writes or clean evicts here
1055 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1056
1057 Addr blk_addr = pkt->getBlockAddr(blkSize);
1058
1059 WriteQueueEntry *wq_entry =
1060 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1061 if (wq_entry && !wq_entry->inService) {
1062 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1063 }
1064
1065 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1066
1067 if (writeBuffer.isFull()) {
1068 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1069 }
1070
1071 // schedule the send
1072 schedMemSideSendEvent(time);
1073 }
1074
1075 /**
1076 * Returns true if the cache is blocked for accesses.
1077 */
1078 bool isBlocked() const
1079 {
1080 return blocked != 0;
1081 }
1082
1083 /**
1084 * Marks the access path of the cache as blocked for the given cause. This
1085 * also sets the blocked flag in the slave interface.
1086 * @param cause The reason for the cache blocking.
1087 */
1088 void setBlocked(BlockedCause cause)
1089 {
1090 uint8_t flag = 1 << cause;
1091 if (blocked == 0) {
1092 blocked_causes[cause]++;
1093 blockedCycle = curCycle();
1094 cpuSidePort.setBlocked();
1095 }
1096 blocked |= flag;
1097 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1098 }
1099
1100 /**
1101 * Marks the cache as unblocked for the given cause. This also clears the
1102 * blocked flags in the appropriate interfaces.
1103 * @param cause The newly unblocked cause.
1104 * @warning Calling this function can cause a blocked request on the bus to
1105 * access the cache. The cache must be in a state to handle that request.
1106 */
1107 void clearBlocked(BlockedCause cause)
1108 {
1109 uint8_t flag = 1 << cause;
1110 blocked &= ~flag;
1111 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1112 if (blocked == 0) {
1113 blocked_cycles[cause] += curCycle() - blockedCycle;
1114 cpuSidePort.clearBlocked();
1115 }
1116 }
1117
1118 /**
1119 * Schedule a send event for the memory-side port. If already
1120 * scheduled, this may reschedule the event at an earlier
1121 * time. When the specified time is reached, the port is free to
1122 * send either a response, a request, or a prefetch request.
1123 *
1124 * @param time The time when to attempt sending a packet.
1125 */
1126 void schedMemSideSendEvent(Tick time)
1127 {
1128 memSidePort.schedSendEvent(time);
1129 }
1130
1131 bool inCache(Addr addr, bool is_secure) const {
1132 return tags->findBlock(addr, is_secure);
1133 }
1134
1135 bool inMissQueue(Addr addr, bool is_secure) const {
1136 return mshrQueue.findMatch(addr, is_secure);
1137 }
1138
1139 void incMissCount(PacketPtr pkt)
1140 {
1141 assert(pkt->req->masterId() < system->maxMasters());
1142 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1143 pkt->req->incAccessDepth();
1144 if (missCount) {
1145 --missCount;
1146 if (missCount == 0)
1147 exitSimLoop("A cache reached the maximum miss count");
1148 }
1149 }
1150 void incHitCount(PacketPtr pkt)
1151 {
1152 assert(pkt->req->masterId() < system->maxMasters());
1153 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1154
1155 }
1156
1157 /**
1158 * Checks if the cache is coalescing writes
1159 *
1160 * @return True if the cache is coalescing writes
1161 */
1162 bool coalesce() const;
1163
1164
1165 /**
1166 * Cache block visitor that writes back dirty cache blocks using
1167 * functional writes.
1168 */
1169 void writebackVisitor(CacheBlk &blk);
1170
1171 /**
1172 * Cache block visitor that invalidates all blocks in the cache.
1173 *
1174 * @warn Dirty cache lines will not be written back to memory.
1175 */
1176 void invalidateVisitor(CacheBlk &blk);
1177
1178 /**
1179 * Take an MSHR, turn it into a suitable downstream packet, and
1180 * send it out. This construct allows a queue entry to choose a suitable
1181 * approach based on its type.
1182 *
1183 * @param mshr The MSHR to turn into a packet and send
1184 * @return True if the port is waiting for a retry
1185 */
1186 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1187
1188 /**
1189 * Similar to sendMSHR, but for a write-queue entry
1190 * instead. Create the packet, and send it, and if successful also
1191 * mark the entry in service.
1192 *
1193 * @param wq_entry The write-queue entry to turn into a packet and send
1194 * @return True if the port is waiting for a retry
1195 */
1196 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1197
1198 /**
1199 * Serialize the state of the caches
1200 *
1201 * We currently don't support checkpointing cache state, so this panics.
1202 */
1203 void serialize(CheckpointOut &cp) const override;
1204 void unserialize(CheckpointIn &cp) override;
1205};
1206
1207/**
1208 * The write allocator inspects write packets and detects streaming
1209 * patterns. The write allocator supports a single stream where writes
1210 * are expected to access consecutive locations and keeps track of
1211 * size of the area covered by the concecutive writes in byteCount.
1212 *
1213 * 1) When byteCount has surpassed the coallesceLimit the mode
1214 * switches from ALLOCATE to COALESCE where writes should be delayed
1215 * until the whole block is written at which point a single packet
1216 * (whole line write) can service them.
1217 *
1218 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1219 * line) we switch to NO_ALLOCATE when writes should not allocate in
1220 * the cache but rather send a whole line write to the memory below.
1221 */
1222class WriteAllocator : public SimObject {
1223 public:
1224 WriteAllocator(const WriteAllocatorParams *p) :
1225 SimObject(p),
1226 coalesceLimit(p->coalesce_limit * p->block_size),
1227 noAllocateLimit(p->no_allocate_limit * p->block_size),
1228 delayThreshold(p->delay_threshold)
1229 {
1230 reset();
1231 }
1232
1233 /**
1234 * Should writes be coalesced? This is true if the mode is set to
1235 * NO_ALLOCATE.
1236 *
1237 * @return return true if the cache should coalesce writes.
1238 */
1239 bool coalesce() const {
1240 return mode != WriteMode::ALLOCATE;
1241 }
1242
1243 /**
1244 * Should writes allocate?
1245 *
1246 * @return return true if the cache should not allocate for writes.
1247 */
1248 bool allocate() const {
1249 return mode != WriteMode::NO_ALLOCATE;
1250 }
1251
1252 /**
1253 * Reset the write allocator state, meaning that it allocates for
1254 * writes and has not recorded any information about qualifying
1255 * writes that might trigger a switch to coalescing and later no
1256 * allocation.
1257 */
1258 void reset() {
1259 mode = WriteMode::ALLOCATE;
1260 byteCount = 0;
1261 nextAddr = 0;
1262 }
1263
1264 /**
1265 * Access whether we need to delay the current write.
1266 *
1267 * @param blk_addr The block address the packet writes to
1268 * @return true if the current packet should be delayed
1269 */
1270 bool delay(Addr blk_addr) {
1271 if (delayCtr[blk_addr] > 0) {
1272 --delayCtr[blk_addr];
1273 return true;
1274 } else {
1275 return false;
1276 }
1277 }
1278
1279 /**
1280 * Clear delay counter for the input block
1281 *
1282 * @param blk_addr The accessed cache block
1283 */
1284 void resetDelay(Addr blk_addr) {
1285 delayCtr.erase(blk_addr);
1286 }
1287
1288 /**
1289 * Update the write mode based on the current write
1290 * packet. This method compares the packet's address with any
1291 * current stream, and updates the tracking and the mode
1292 * accordingly.
1293 *
1294 * @param write_addr Start address of the write request
1295 * @param write_size Size of the write request
1296 * @param blk_addr The block address that this packet writes to
1297 */
1298 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1299
1300 private:
1301 /**
1302 * The current mode for write coalescing and allocation, either
1303 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1304 * write coalescing without allocation (NO_ALLOCATE).
1305 */
1306 enum class WriteMode : char {
1307 ALLOCATE,
1308 COALESCE,
1309 NO_ALLOCATE,
1310 };
1311 WriteMode mode;
1312
1313 /** Address to match writes against to detect streams. */
1314 Addr nextAddr;
1315
1316 /**
1317 * Bytes written contiguously. Saturating once we no longer
1318 * allocate.
1319 */
1320 uint32_t byteCount;
1321
1322 /**
1323 * Limits for when to switch between the different write modes.
1324 */
1325 const uint32_t coalesceLimit;
1326 const uint32_t noAllocateLimit;
1327 /**
1328 * The number of times the allocator will delay an WriteReq MSHR.
1329 */
1330 const uint32_t delayThreshold;
1331
1332 /**
1333 * Keep track of the number of times the allocator has delayed an
1334 * WriteReq MSHR.
1335 */
1336 std::unordered_map<Addr, Counter> delayCtr;
1337};
1338
1339#endif //__MEM_CACHE_BASE_HH__