base.hh (13717:11e81e2a98bd) base.hh (13746:723109f11d56)
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/eventq.hh"
78#include "sim/probe/probe.hh"
79#include "sim/serialize.hh"
80#include "sim/sim_exit.hh"
81#include "sim/system.hh"
82
83class BaseMasterPort;
84class BasePrefetcher;
85class BaseSlavePort;
86class MSHR;
87class MasterPort;
88class QueueEntry;
89struct BaseCacheParams;
90
91/**
92 * A basic cache interface. Implements some common functions for speed.
93 */
94class BaseCache : public MemObject
95{
96 protected:
97 /**
98 * Indexes to enumerate the MSHR queues.
99 */
100 enum MSHRQueueIndex {
101 MSHRQueue_MSHRs,
102 MSHRQueue_WriteBuffer
103 };
104
105 public:
106 /**
107 * Reasons for caches to be blocked.
108 */
109 enum BlockedCause {
110 Blocked_NoMSHRs = MSHRQueue_MSHRs,
111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112 Blocked_NoTargets,
113 NUM_BLOCKED_CAUSES
114 };
115
116 protected:
117
118 /**
119 * A cache master port is used for the memory-side port of the
120 * cache, and in addition to the basic timing port that only sends
121 * response packets through a transmit list, it also offers the
122 * ability to schedule and send request packets (requests &
123 * writebacks). The send event is scheduled through schedSendEvent,
124 * and the sendDeferredPacket of the timing port is modified to
125 * consider both the transmit list and the requests from the MSHR.
126 */
127 class CacheMasterPort : public QueuedMasterPort
128 {
129
130 public:
131
132 /**
133 * Schedule a send of a request packet (from the MSHR). Note
134 * that we could already have a retry outstanding.
135 */
136 void schedSendEvent(Tick time)
137 {
138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139 reqQueue.schedSendEvent(time);
140 }
141
142 protected:
143
144 CacheMasterPort(const std::string &_name, BaseCache *_cache,
145 ReqPacketQueue &_reqQueue,
146 SnoopRespPacketQueue &_snoopRespQueue) :
147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148 { }
149
150 /**
151 * Memory-side port always snoops.
152 *
153 * @return always true
154 */
155 virtual bool isSnooping() const { return true; }
156 };
157
158 /**
159 * Override the default behaviour of sendDeferredPacket to enable
160 * the memory-side cache port to also send requests based on the
161 * current MSHR status. This queue has a pointer to our specific
162 * cache implementation and is used by the MemSidePort.
163 */
164 class CacheReqPacketQueue : public ReqPacketQueue
165 {
166
167 protected:
168
169 BaseCache &cache;
170 SnoopRespPacketQueue &snoopRespQueue;
171
172 public:
173
174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175 SnoopRespPacketQueue &snoop_resp_queue,
176 const std::string &label) :
177 ReqPacketQueue(cache, port, label), cache(cache),
178 snoopRespQueue(snoop_resp_queue) { }
179
180 /**
181 * Override the normal sendDeferredPacket and do not only
182 * consider the transmit list (used for responses), but also
183 * requests.
184 */
185 virtual void sendDeferredPacket();
186
187 /**
188 * Check if there is a conflicting snoop response about to be
189 * send out, and if so simply stall any requests, and schedule
190 * a send event at the same time as the next snoop response is
191 * being sent out.
192 */
193 bool checkConflictingSnoop(Addr addr)
194 {
195 if (snoopRespQueue.hasAddr(addr)) {
196 DPRINTF(CachePort, "Waiting for snoop response to be "
197 "sent\n");
198 Tick when = snoopRespQueue.deferredPacketReadyTime();
199 schedSendEvent(when);
200 return true;
201 }
202 return false;
203 }
204 };
205
206
207 /**
208 * The memory-side port extends the base cache master port with
209 * access functions for functional, atomic and timing snoops.
210 */
211 class MemSidePort : public CacheMasterPort
212 {
213 private:
214
215 /** The cache-specific queue. */
216 CacheReqPacketQueue _reqQueue;
217
218 SnoopRespPacketQueue _snoopRespQueue;
219
220 // a pointer to our specific cache implementation
221 BaseCache *cache;
222
223 protected:
224
225 virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227 virtual bool recvTimingResp(PacketPtr pkt);
228
229 virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231 virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233 public:
234
235 MemSidePort(const std::string &_name, BaseCache *_cache,
236 const std::string &_label);
237 };
238
239 /**
240 * A cache slave port is used for the CPU-side port of the cache,
241 * and it is basically a simple timing port that uses a transmit
242 * list for responses to the CPU (or connected master). In
243 * addition, it has the functionality to block the port for
244 * incoming requests. If blocked, the port will issue a retry once
245 * unblocked.
246 */
247 class CacheSlavePort : public QueuedSlavePort
248 {
249
250 public:
251
252 /** Do not accept any new requests. */
253 void setBlocked();
254
255 /** Return to normal operation and accept new requests. */
256 void clearBlocked();
257
258 bool isBlocked() const { return blocked; }
259
260 protected:
261
262 CacheSlavePort(const std::string &_name, BaseCache *_cache,
263 const std::string &_label);
264
265 /** A normal packet queue used to store responses. */
266 RespPacketQueue queue;
267
268 bool blocked;
269
270 bool mustSendRetry;
271
272 private:
273
274 void processSendRetry();
275
276 EventFunctionWrapper sendRetryEvent;
277
278 };
279
280 /**
281 * The CPU-side port extends the base cache slave port with access
282 * functions for functional, atomic and timing requests.
283 */
284 class CpuSidePort : public CacheSlavePort
285 {
286 private:
287
288 // a pointer to our specific cache implementation
289 BaseCache *cache;
290
291 protected:
292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294 virtual bool tryTiming(PacketPtr pkt) override;
295
296 virtual bool recvTimingReq(PacketPtr pkt) override;
297
298 virtual Tick recvAtomic(PacketPtr pkt) override;
299
300 virtual void recvFunctional(PacketPtr pkt) override;
301
302 virtual AddrRangeList getAddrRanges() const override;
303
304 public:
305
306 CpuSidePort(const std::string &_name, BaseCache *_cache,
307 const std::string &_label);
308
309 };
310
311 CpuSidePort cpuSidePort;
312 MemSidePort memSidePort;
313
314 protected:
315
316 /** Miss status registers */
317 MSHRQueue mshrQueue;
318
319 /** Write/writeback buffer */
320 WriteQueue writeBuffer;
321
322 /** Tag and data Storage */
323 BaseTags *tags;
324
325 /** Prefetcher */
326 BasePrefetcher *prefetcher;
327
328 /** To probe when a cache hit occurs */
329 ProbePointArg<PacketPtr> *ppHit;
330
331 /** To probe when a cache miss occurs */
332 ProbePointArg<PacketPtr> *ppMiss;
333
334 /** To probe when a cache fill occurs */
335 ProbePointArg<PacketPtr> *ppFill;
336
337 /**
338 * The writeAllocator drive optimizations for streaming writes.
339 * It first determines whether a WriteReq MSHR should be delayed,
340 * thus ensuring that we wait longer in cases when we are write
341 * coalescing and allowing all the bytes of the line to be written
342 * before the MSHR packet is sent downstream. This works in unison
343 * with the tracking in the MSHR to check if the entire line is
344 * written. The write mode also affects the behaviour on filling
345 * any whole-line writes. Normally the cache allocates the line
346 * when receiving the InvalidateResp, but after seeing enough
347 * consecutive lines we switch to using the tempBlock, and thus
348 * end up not allocating the line, and instead turning the
349 * whole-line write into a writeback straight away.
350 */
351 WriteAllocator * const writeAllocator;
352
353 /**
354 * Temporary cache block for occasional transitory use. We use
355 * the tempBlock to fill when allocation fails (e.g., when there
356 * is an outstanding request that accesses the victim block) or
357 * when we want to avoid allocation (e.g., exclusive caches)
358 */
359 TempCacheBlk *tempBlock;
360
361 /**
362 * Upstream caches need this packet until true is returned, so
363 * hold it for deletion until a subsequent call
364 */
365 std::unique_ptr<Packet> pendingDelete;
366
367 /**
368 * Mark a request as in service (sent downstream in the memory
369 * system), effectively making this MSHR the ordering point.
370 */
371 void markInService(MSHR *mshr, bool pending_modified_resp)
372 {
373 bool wasFull = mshrQueue.isFull();
374 mshrQueue.markInService(mshr, pending_modified_resp);
375
376 if (wasFull && !mshrQueue.isFull()) {
377 clearBlocked(Blocked_NoMSHRs);
378 }
379 }
380
381 void markInService(WriteQueueEntry *entry)
382 {
383 bool wasFull = writeBuffer.isFull();
384 writeBuffer.markInService(entry);
385
386 if (wasFull && !writeBuffer.isFull()) {
387 clearBlocked(Blocked_NoWBBuffers);
388 }
389 }
390
391 /**
392 * Determine whether we should allocate on a fill or not. If this
393 * cache is mostly inclusive with regards to the upstream cache(s)
394 * we always allocate (for any non-forwarded and cacheable
395 * requests). In the case of a mostly exclusive cache, we allocate
396 * on fill if the packet did not come from a cache, thus if we:
397 * are dealing with a whole-line write (the latter behaves much
398 * like a writeback), the original target packet came from a
399 * non-caching source, or if we are performing a prefetch or LLSC.
400 *
401 * @param cmd Command of the incoming requesting packet
402 * @return Whether we should allocate on the fill
403 */
404 inline bool allocOnFill(MemCmd cmd) const
405 {
406 return clusivity == Enums::mostly_incl ||
407 cmd == MemCmd::WriteLineReq ||
408 cmd == MemCmd::ReadReq ||
409 cmd == MemCmd::WriteReq ||
410 cmd.isPrefetch() ||
411 cmd.isLLSC();
412 }
413
414 /**
415 * Regenerate block address using tags.
416 * Block address regeneration depends on whether we're using a temporary
417 * block or not.
418 *
419 * @param blk The block to regenerate address.
420 * @return The block's address.
421 */
422 Addr regenerateBlkAddr(CacheBlk* blk);
423
424 /**
425 * Calculate access latency in ticks given a tag lookup latency, and
426 * whether access was a hit or miss.
427 *
428 * @param blk The cache block that was accessed.
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/eventq.hh"
78#include "sim/probe/probe.hh"
79#include "sim/serialize.hh"
80#include "sim/sim_exit.hh"
81#include "sim/system.hh"
82
83class BaseMasterPort;
84class BasePrefetcher;
85class BaseSlavePort;
86class MSHR;
87class MasterPort;
88class QueueEntry;
89struct BaseCacheParams;
90
91/**
92 * A basic cache interface. Implements some common functions for speed.
93 */
94class BaseCache : public MemObject
95{
96 protected:
97 /**
98 * Indexes to enumerate the MSHR queues.
99 */
100 enum MSHRQueueIndex {
101 MSHRQueue_MSHRs,
102 MSHRQueue_WriteBuffer
103 };
104
105 public:
106 /**
107 * Reasons for caches to be blocked.
108 */
109 enum BlockedCause {
110 Blocked_NoMSHRs = MSHRQueue_MSHRs,
111 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112 Blocked_NoTargets,
113 NUM_BLOCKED_CAUSES
114 };
115
116 protected:
117
118 /**
119 * A cache master port is used for the memory-side port of the
120 * cache, and in addition to the basic timing port that only sends
121 * response packets through a transmit list, it also offers the
122 * ability to schedule and send request packets (requests &
123 * writebacks). The send event is scheduled through schedSendEvent,
124 * and the sendDeferredPacket of the timing port is modified to
125 * consider both the transmit list and the requests from the MSHR.
126 */
127 class CacheMasterPort : public QueuedMasterPort
128 {
129
130 public:
131
132 /**
133 * Schedule a send of a request packet (from the MSHR). Note
134 * that we could already have a retry outstanding.
135 */
136 void schedSendEvent(Tick time)
137 {
138 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139 reqQueue.schedSendEvent(time);
140 }
141
142 protected:
143
144 CacheMasterPort(const std::string &_name, BaseCache *_cache,
145 ReqPacketQueue &_reqQueue,
146 SnoopRespPacketQueue &_snoopRespQueue) :
147 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148 { }
149
150 /**
151 * Memory-side port always snoops.
152 *
153 * @return always true
154 */
155 virtual bool isSnooping() const { return true; }
156 };
157
158 /**
159 * Override the default behaviour of sendDeferredPacket to enable
160 * the memory-side cache port to also send requests based on the
161 * current MSHR status. This queue has a pointer to our specific
162 * cache implementation and is used by the MemSidePort.
163 */
164 class CacheReqPacketQueue : public ReqPacketQueue
165 {
166
167 protected:
168
169 BaseCache &cache;
170 SnoopRespPacketQueue &snoopRespQueue;
171
172 public:
173
174 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175 SnoopRespPacketQueue &snoop_resp_queue,
176 const std::string &label) :
177 ReqPacketQueue(cache, port, label), cache(cache),
178 snoopRespQueue(snoop_resp_queue) { }
179
180 /**
181 * Override the normal sendDeferredPacket and do not only
182 * consider the transmit list (used for responses), but also
183 * requests.
184 */
185 virtual void sendDeferredPacket();
186
187 /**
188 * Check if there is a conflicting snoop response about to be
189 * send out, and if so simply stall any requests, and schedule
190 * a send event at the same time as the next snoop response is
191 * being sent out.
192 */
193 bool checkConflictingSnoop(Addr addr)
194 {
195 if (snoopRespQueue.hasAddr(addr)) {
196 DPRINTF(CachePort, "Waiting for snoop response to be "
197 "sent\n");
198 Tick when = snoopRespQueue.deferredPacketReadyTime();
199 schedSendEvent(when);
200 return true;
201 }
202 return false;
203 }
204 };
205
206
207 /**
208 * The memory-side port extends the base cache master port with
209 * access functions for functional, atomic and timing snoops.
210 */
211 class MemSidePort : public CacheMasterPort
212 {
213 private:
214
215 /** The cache-specific queue. */
216 CacheReqPacketQueue _reqQueue;
217
218 SnoopRespPacketQueue _snoopRespQueue;
219
220 // a pointer to our specific cache implementation
221 BaseCache *cache;
222
223 protected:
224
225 virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227 virtual bool recvTimingResp(PacketPtr pkt);
228
229 virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231 virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233 public:
234
235 MemSidePort(const std::string &_name, BaseCache *_cache,
236 const std::string &_label);
237 };
238
239 /**
240 * A cache slave port is used for the CPU-side port of the cache,
241 * and it is basically a simple timing port that uses a transmit
242 * list for responses to the CPU (or connected master). In
243 * addition, it has the functionality to block the port for
244 * incoming requests. If blocked, the port will issue a retry once
245 * unblocked.
246 */
247 class CacheSlavePort : public QueuedSlavePort
248 {
249
250 public:
251
252 /** Do not accept any new requests. */
253 void setBlocked();
254
255 /** Return to normal operation and accept new requests. */
256 void clearBlocked();
257
258 bool isBlocked() const { return blocked; }
259
260 protected:
261
262 CacheSlavePort(const std::string &_name, BaseCache *_cache,
263 const std::string &_label);
264
265 /** A normal packet queue used to store responses. */
266 RespPacketQueue queue;
267
268 bool blocked;
269
270 bool mustSendRetry;
271
272 private:
273
274 void processSendRetry();
275
276 EventFunctionWrapper sendRetryEvent;
277
278 };
279
280 /**
281 * The CPU-side port extends the base cache slave port with access
282 * functions for functional, atomic and timing requests.
283 */
284 class CpuSidePort : public CacheSlavePort
285 {
286 private:
287
288 // a pointer to our specific cache implementation
289 BaseCache *cache;
290
291 protected:
292 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294 virtual bool tryTiming(PacketPtr pkt) override;
295
296 virtual bool recvTimingReq(PacketPtr pkt) override;
297
298 virtual Tick recvAtomic(PacketPtr pkt) override;
299
300 virtual void recvFunctional(PacketPtr pkt) override;
301
302 virtual AddrRangeList getAddrRanges() const override;
303
304 public:
305
306 CpuSidePort(const std::string &_name, BaseCache *_cache,
307 const std::string &_label);
308
309 };
310
311 CpuSidePort cpuSidePort;
312 MemSidePort memSidePort;
313
314 protected:
315
316 /** Miss status registers */
317 MSHRQueue mshrQueue;
318
319 /** Write/writeback buffer */
320 WriteQueue writeBuffer;
321
322 /** Tag and data Storage */
323 BaseTags *tags;
324
325 /** Prefetcher */
326 BasePrefetcher *prefetcher;
327
328 /** To probe when a cache hit occurs */
329 ProbePointArg<PacketPtr> *ppHit;
330
331 /** To probe when a cache miss occurs */
332 ProbePointArg<PacketPtr> *ppMiss;
333
334 /** To probe when a cache fill occurs */
335 ProbePointArg<PacketPtr> *ppFill;
336
337 /**
338 * The writeAllocator drive optimizations for streaming writes.
339 * It first determines whether a WriteReq MSHR should be delayed,
340 * thus ensuring that we wait longer in cases when we are write
341 * coalescing and allowing all the bytes of the line to be written
342 * before the MSHR packet is sent downstream. This works in unison
343 * with the tracking in the MSHR to check if the entire line is
344 * written. The write mode also affects the behaviour on filling
345 * any whole-line writes. Normally the cache allocates the line
346 * when receiving the InvalidateResp, but after seeing enough
347 * consecutive lines we switch to using the tempBlock, and thus
348 * end up not allocating the line, and instead turning the
349 * whole-line write into a writeback straight away.
350 */
351 WriteAllocator * const writeAllocator;
352
353 /**
354 * Temporary cache block for occasional transitory use. We use
355 * the tempBlock to fill when allocation fails (e.g., when there
356 * is an outstanding request that accesses the victim block) or
357 * when we want to avoid allocation (e.g., exclusive caches)
358 */
359 TempCacheBlk *tempBlock;
360
361 /**
362 * Upstream caches need this packet until true is returned, so
363 * hold it for deletion until a subsequent call
364 */
365 std::unique_ptr<Packet> pendingDelete;
366
367 /**
368 * Mark a request as in service (sent downstream in the memory
369 * system), effectively making this MSHR the ordering point.
370 */
371 void markInService(MSHR *mshr, bool pending_modified_resp)
372 {
373 bool wasFull = mshrQueue.isFull();
374 mshrQueue.markInService(mshr, pending_modified_resp);
375
376 if (wasFull && !mshrQueue.isFull()) {
377 clearBlocked(Blocked_NoMSHRs);
378 }
379 }
380
381 void markInService(WriteQueueEntry *entry)
382 {
383 bool wasFull = writeBuffer.isFull();
384 writeBuffer.markInService(entry);
385
386 if (wasFull && !writeBuffer.isFull()) {
387 clearBlocked(Blocked_NoWBBuffers);
388 }
389 }
390
391 /**
392 * Determine whether we should allocate on a fill or not. If this
393 * cache is mostly inclusive with regards to the upstream cache(s)
394 * we always allocate (for any non-forwarded and cacheable
395 * requests). In the case of a mostly exclusive cache, we allocate
396 * on fill if the packet did not come from a cache, thus if we:
397 * are dealing with a whole-line write (the latter behaves much
398 * like a writeback), the original target packet came from a
399 * non-caching source, or if we are performing a prefetch or LLSC.
400 *
401 * @param cmd Command of the incoming requesting packet
402 * @return Whether we should allocate on the fill
403 */
404 inline bool allocOnFill(MemCmd cmd) const
405 {
406 return clusivity == Enums::mostly_incl ||
407 cmd == MemCmd::WriteLineReq ||
408 cmd == MemCmd::ReadReq ||
409 cmd == MemCmd::WriteReq ||
410 cmd.isPrefetch() ||
411 cmd.isLLSC();
412 }
413
414 /**
415 * Regenerate block address using tags.
416 * Block address regeneration depends on whether we're using a temporary
417 * block or not.
418 *
419 * @param blk The block to regenerate address.
420 * @return The block's address.
421 */
422 Addr regenerateBlkAddr(CacheBlk* blk);
423
424 /**
425 * Calculate access latency in ticks given a tag lookup latency, and
426 * whether access was a hit or miss.
427 *
428 * @param blk The cache block that was accessed.
429 * @param delay The delay until the packet's metadata is present.
429 * @param lookup_lat Latency of the respective tag lookup.
430 * @return The number of ticks that pass due to a block access.
431 */
430 * @param lookup_lat Latency of the respective tag lookup.
431 * @return The number of ticks that pass due to a block access.
432 */
432 Cycles calculateAccessLatency(const CacheBlk* blk,
433 Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
433 const Cycles lookup_lat) const;
434
435 /**
436 * Does all the processing necessary to perform the provided request.
437 * @param pkt The memory request to perform.
438 * @param blk The cache block to be updated.
439 * @param lat The latency of the access.
440 * @param writebacks List for any writebacks that need to be performed.
441 * @return Boolean indicating whether the request was satisfied.
442 */
443 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
444 PacketList &writebacks);
445
446 /*
447 * Handle a timing request that hit in the cache
448 *
449 * @param ptk The request packet
450 * @param blk The referenced block
451 * @param request_time The tick at which the block lookup is compete
452 */
453 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
454 Tick request_time);
455
456 /*
457 * Handle a timing request that missed in the cache
458 *
459 * Implementation specific handling for different cache
460 * implementations
461 *
462 * @param ptk The request packet
463 * @param blk The referenced block
464 * @param forward_time The tick at which we can process dependent requests
465 * @param request_time The tick at which the block lookup is compete
466 */
467 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
468 Tick forward_time,
469 Tick request_time) = 0;
470
471 /*
472 * Handle a timing request that missed in the cache
473 *
474 * Common functionality across different cache implementations
475 *
476 * @param ptk The request packet
477 * @param blk The referenced block
478 * @param mshr Any existing mshr for the referenced cache block
479 * @param forward_time The tick at which we can process dependent requests
480 * @param request_time The tick at which the block lookup is compete
481 */
482 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
483 Tick forward_time, Tick request_time);
484
485 /**
486 * Performs the access specified by the request.
487 * @param pkt The request to perform.
488 */
489 virtual void recvTimingReq(PacketPtr pkt);
490
491 /**
492 * Handling the special case of uncacheable write responses to
493 * make recvTimingResp less cluttered.
494 */
495 void handleUncacheableWriteResp(PacketPtr pkt);
496
497 /**
498 * Service non-deferred MSHR targets using the received response
499 *
500 * Iterates through the list of targets that can be serviced with
501 * the current response.
502 *
503 * @param mshr The MSHR that corresponds to the reponse
504 * @param pkt The response packet
505 * @param blk The reference block
506 */
507 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
508 CacheBlk *blk) = 0;
509
510 /**
511 * Handles a response (cache line fill/write ack) from the bus.
512 * @param pkt The response packet
513 */
514 virtual void recvTimingResp(PacketPtr pkt);
515
516 /**
517 * Snoops bus transactions to maintain coherence.
518 * @param pkt The current bus transaction.
519 */
520 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
521
522 /**
523 * Handle a snoop response.
524 * @param pkt Snoop response packet
525 */
526 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
527
528 /**
529 * Handle a request in atomic mode that missed in this cache
530 *
531 * Creates a downstream request, sends it to the memory below and
532 * handles the response. As we are in atomic mode all operations
533 * are performed immediately.
534 *
535 * @param pkt The packet with the requests
536 * @param blk The referenced block
537 * @param writebacks A list with packets for any performed writebacks
538 * @return Cycles for handling the request
539 */
540 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
541 PacketList &writebacks) = 0;
542
543 /**
544 * Performs the access specified by the request.
545 * @param pkt The request to perform.
546 * @return The number of ticks required for the access.
547 */
548 virtual Tick recvAtomic(PacketPtr pkt);
549
550 /**
551 * Snoop for the provided request in the cache and return the estimated
552 * time taken.
553 * @param pkt The memory request to snoop
554 * @return The number of ticks required for the snoop.
555 */
556 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
557
558 /**
559 * Performs the access specified by the request.
560 *
561 * @param pkt The request to perform.
562 * @param fromCpuSide from the CPU side port or the memory side port
563 */
564 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
565
566 /**
567 * Handle doing the Compare and Swap function for SPARC.
568 */
569 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
570
571 /**
572 * Return the next queue entry to service, either a pending miss
573 * from the MSHR queue, a buffered write from the write buffer, or
574 * something from the prefetcher. This function is responsible
575 * for prioritizing among those sources on the fly.
576 */
577 QueueEntry* getNextQueueEntry();
578
579 /**
580 * Insert writebacks into the write buffer
581 */
582 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
583
584 /**
585 * Send writebacks down the memory hierarchy in atomic mode
586 */
587 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
588
589 /**
590 * Create an appropriate downstream bus request packet.
591 *
592 * Creates a new packet with the request to be send to the memory
593 * below, or nullptr if the current request in cpu_pkt should just
594 * be forwarded on.
595 *
596 * @param cpu_pkt The miss packet that needs to be satisfied.
597 * @param blk The referenced block, can be nullptr.
598 * @param needs_writable Indicates that the block must be writable
599 * even if the request in cpu_pkt doesn't indicate that.
600 * @param is_whole_line_write True if there are writes for the
601 * whole line
602 * @return A packet send to the memory below
603 */
604 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
605 bool needs_writable,
606 bool is_whole_line_write) const = 0;
607
608 /**
609 * Determine if clean lines should be written back or not. In
610 * cases where a downstream cache is mostly inclusive we likely
611 * want it to act as a victim cache also for lines that have not
612 * been modified. Hence, we cannot simply drop the line (or send a
613 * clean evict), but rather need to send the actual data.
614 */
615 const bool writebackClean;
616
617 /**
618 * Writebacks from the tempBlock, resulting on the response path
619 * in atomic mode, must happen after the call to recvAtomic has
620 * finished (for the right ordering of the packets). We therefore
621 * need to hold on to the packets, and have a method and an event
622 * to send them.
623 */
624 PacketPtr tempBlockWriteback;
625
626 /**
627 * Send the outstanding tempBlock writeback. To be called after
628 * recvAtomic finishes in cases where the block we filled is in
629 * fact the tempBlock, and now needs to be written back.
630 */
631 void writebackTempBlockAtomic() {
632 assert(tempBlockWriteback != nullptr);
633 PacketList writebacks{tempBlockWriteback};
634 doWritebacksAtomic(writebacks);
635 tempBlockWriteback = nullptr;
636 }
637
638 /**
639 * An event to writeback the tempBlock after recvAtomic
640 * finishes. To avoid other calls to recvAtomic getting in
641 * between, we create this event with a higher priority.
642 */
643 EventFunctionWrapper writebackTempBlockAtomicEvent;
644
645 /**
646 * Perform any necessary updates to the block and perform any data
647 * exchange between the packet and the block. The flags of the
648 * packet are also set accordingly.
649 *
650 * @param pkt Request packet from upstream that hit a block
651 * @param blk Cache block that the packet hit
652 * @param deferred_response Whether this request originally missed
653 * @param pending_downgrade Whether the writable flag is to be removed
654 */
655 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
656 bool deferred_response = false,
657 bool pending_downgrade = false);
658
659 /**
660 * Maintain the clusivity of this cache by potentially
661 * invalidating a block. This method works in conjunction with
662 * satisfyRequest, but is separate to allow us to handle all MSHR
663 * targets before potentially dropping a block.
664 *
665 * @param from_cache Whether we have dealt with a packet from a cache
666 * @param blk The block that should potentially be dropped
667 */
668 void maintainClusivity(bool from_cache, CacheBlk *blk);
669
670 /**
671 * Handle a fill operation caused by a received packet.
672 *
673 * Populates a cache block and handles all outstanding requests for the
674 * satisfied fill request. This version takes two memory requests. One
675 * contains the fill data, the other is an optional target to satisfy.
676 * Note that the reason we return a list of writebacks rather than
677 * inserting them directly in the write buffer is that this function
678 * is called by both atomic and timing-mode accesses, and in atomic
679 * mode we don't mess with the write buffer (we just perform the
680 * writebacks atomically once the original request is complete).
681 *
682 * @param pkt The memory request with the fill data.
683 * @param blk The cache block if it already exists.
684 * @param writebacks List for any writebacks that need to be performed.
685 * @param allocate Whether to allocate a block or use the temp block
686 * @return Pointer to the new cache block.
687 */
688 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
689 PacketList &writebacks, bool allocate);
690
691 /**
692 * Allocate a new block and perform any necessary writebacks
693 *
694 * Find a victim block and if necessary prepare writebacks for any
695 * existing data. May return nullptr if there are no replaceable
696 * blocks. If a replaceable block is found, it inserts the new block in
697 * its place. The new block, however, is not set as valid yet.
698 *
699 * @param pkt Packet holding the address to update
700 * @param writebacks A list of writeback packets for the evicted blocks
701 * @return the allocated block
702 */
703 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
704 /**
705 * Evict a cache block.
706 *
707 * Performs a writeback if necesssary and invalidates the block
708 *
709 * @param blk Block to invalidate
710 * @return A packet with the writeback, can be nullptr
711 */
712 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
713
714 /**
715 * Evict a cache block.
716 *
717 * Performs a writeback if necesssary and invalidates the block
718 *
719 * @param blk Block to invalidate
720 * @param writebacks Return a list of packets with writebacks
721 */
722 void evictBlock(CacheBlk *blk, PacketList &writebacks);
723
724 /**
725 * Invalidate a cache block.
726 *
727 * @param blk Block to invalidate
728 */
729 void invalidateBlock(CacheBlk *blk);
730
731 /**
732 * Create a writeback request for the given block.
733 *
734 * @param blk The block to writeback.
735 * @return The writeback request for the block.
736 */
737 PacketPtr writebackBlk(CacheBlk *blk);
738
739 /**
740 * Create a writeclean request for the given block.
741 *
742 * Creates a request that writes the block to the cache below
743 * without evicting the block from the current cache.
744 *
745 * @param blk The block to write clean.
746 * @param dest The destination of the write clean operation.
747 * @param id Use the given packet id for the write clean operation.
748 * @return The generated write clean packet.
749 */
750 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
751
752 /**
753 * Write back dirty blocks in the cache using functional accesses.
754 */
755 virtual void memWriteback() override;
756
757 /**
758 * Invalidates all blocks in the cache.
759 *
760 * @warn Dirty cache lines will not be written back to
761 * memory. Make sure to call functionalWriteback() first if you
762 * want the to write them to memory.
763 */
764 virtual void memInvalidate() override;
765
766 /**
767 * Determine if there are any dirty blocks in the cache.
768 *
769 * @return true if at least one block is dirty, false otherwise.
770 */
771 bool isDirty() const;
772
773 /**
774 * Determine if an address is in the ranges covered by this
775 * cache. This is useful to filter snoops.
776 *
777 * @param addr Address to check against
778 *
779 * @return If the address in question is in range
780 */
781 bool inRange(Addr addr) const;
782
783 /**
784 * Find next request ready time from among possible sources.
785 */
786 Tick nextQueueReadyTime() const;
787
788 /** Block size of this cache */
789 const unsigned blkSize;
790
791 /**
792 * The latency of tag lookup of a cache. It occurs when there is
793 * an access to the cache.
794 */
795 const Cycles lookupLatency;
796
797 /**
798 * The latency of data access of a cache. It occurs when there is
799 * an access to the cache.
800 */
801 const Cycles dataLatency;
802
803 /**
804 * This is the forward latency of the cache. It occurs when there
805 * is a cache miss and a request is forwarded downstream, in
806 * particular an outbound miss.
807 */
808 const Cycles forwardLatency;
809
810 /** The latency to fill a cache block */
811 const Cycles fillLatency;
812
813 /**
814 * The latency of sending reponse to its upper level cache/core on
815 * a linefill. The responseLatency parameter captures this
816 * latency.
817 */
818 const Cycles responseLatency;
819
820 /**
821 * Whether tags and data are accessed sequentially.
822 */
823 const bool sequentialAccess;
824
825 /** The number of targets for each MSHR. */
826 const int numTarget;
827
828 /** Do we forward snoops from mem side port through to cpu side port? */
829 bool forwardSnoops;
830
831 /**
832 * Clusivity with respect to the upstream cache, determining if we
833 * fill into both this cache and the cache above on a miss. Note
834 * that we currently do not support strict clusivity policies.
835 */
836 const Enums::Clusivity clusivity;
837
838 /**
839 * Is this cache read only, for example the instruction cache, or
840 * table-walker cache. A cache that is read only should never see
841 * any writes, and should never get any dirty data (and hence
842 * never have to do any writebacks).
843 */
844 const bool isReadOnly;
845
846 /**
847 * Bit vector of the blocking reasons for the access path.
848 * @sa #BlockedCause
849 */
850 uint8_t blocked;
851
852 /** Increasing order number assigned to each incoming request. */
853 uint64_t order;
854
855 /** Stores time the cache blocked for statistics. */
856 Cycles blockedCycle;
857
858 /** Pointer to the MSHR that has no targets. */
859 MSHR *noTargetMSHR;
860
861 /** The number of misses to trigger an exit event. */
862 Counter missCount;
863
864 /**
865 * The address range to which the cache responds on the CPU side.
866 * Normally this is all possible memory addresses. */
867 const AddrRangeList addrRanges;
868
869 public:
870 /** System we are currently operating in. */
871 System *system;
872
873 // Statistics
874 /**
875 * @addtogroup CacheStatistics
876 * @{
877 */
878
879 /** Number of hits per thread for each type of command.
880 @sa Packet::Command */
881 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
882 /** Number of hits for demand accesses. */
883 Stats::Formula demandHits;
884 /** Number of hit for all accesses. */
885 Stats::Formula overallHits;
886
887 /** Number of misses per thread for each type of command.
888 @sa Packet::Command */
889 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
890 /** Number of misses for demand accesses. */
891 Stats::Formula demandMisses;
892 /** Number of misses for all accesses. */
893 Stats::Formula overallMisses;
894
895 /**
896 * Total number of cycles per thread/command spent waiting for a miss.
897 * Used to calculate the average miss latency.
898 */
899 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
900 /** Total number of cycles spent waiting for demand misses. */
901 Stats::Formula demandMissLatency;
902 /** Total number of cycles spent waiting for all misses. */
903 Stats::Formula overallMissLatency;
904
905 /** The number of accesses per command and thread. */
906 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
907 /** The number of demand accesses. */
908 Stats::Formula demandAccesses;
909 /** The number of overall accesses. */
910 Stats::Formula overallAccesses;
911
912 /** The miss rate per command and thread. */
913 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
914 /** The miss rate of all demand accesses. */
915 Stats::Formula demandMissRate;
916 /** The miss rate for all accesses. */
917 Stats::Formula overallMissRate;
918
919 /** The average miss latency per command and thread. */
920 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
921 /** The average miss latency for demand misses. */
922 Stats::Formula demandAvgMissLatency;
923 /** The average miss latency for all misses. */
924 Stats::Formula overallAvgMissLatency;
925
926 /** The total number of cycles blocked for each blocked cause. */
927 Stats::Vector blocked_cycles;
928 /** The number of times this cache blocked for each blocked cause. */
929 Stats::Vector blocked_causes;
930
931 /** The average number of cycles blocked for each blocked cause. */
932 Stats::Formula avg_blocked;
933
934 /** The number of times a HW-prefetched block is evicted w/o reference. */
935 Stats::Scalar unusedPrefetches;
936
937 /** Number of blocks written back per thread. */
938 Stats::Vector writebacks;
939
940 /** Number of misses that hit in the MSHRs per command and thread. */
941 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
942 /** Demand misses that hit in the MSHRs. */
943 Stats::Formula demandMshrHits;
944 /** Total number of misses that hit in the MSHRs. */
945 Stats::Formula overallMshrHits;
946
947 /** Number of misses that miss in the MSHRs, per command and thread. */
948 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
949 /** Demand misses that miss in the MSHRs. */
950 Stats::Formula demandMshrMisses;
951 /** Total number of misses that miss in the MSHRs. */
952 Stats::Formula overallMshrMisses;
953
954 /** Number of misses that miss in the MSHRs, per command and thread. */
955 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
956 /** Total number of misses that miss in the MSHRs. */
957 Stats::Formula overallMshrUncacheable;
958
959 /** Total cycle latency of each MSHR miss, per command and thread. */
960 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
961 /** Total cycle latency of demand MSHR misses. */
962 Stats::Formula demandMshrMissLatency;
963 /** Total cycle latency of overall MSHR misses. */
964 Stats::Formula overallMshrMissLatency;
965
966 /** Total cycle latency of each MSHR miss, per command and thread. */
967 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
968 /** Total cycle latency of overall MSHR misses. */
969 Stats::Formula overallMshrUncacheableLatency;
970
971#if 0
972 /** The total number of MSHR accesses per command and thread. */
973 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
974 /** The total number of demand MSHR accesses. */
975 Stats::Formula demandMshrAccesses;
976 /** The total number of MSHR accesses. */
977 Stats::Formula overallMshrAccesses;
978#endif
979
980 /** The miss rate in the MSHRs pre command and thread. */
981 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
982 /** The demand miss rate in the MSHRs. */
983 Stats::Formula demandMshrMissRate;
984 /** The overall miss rate in the MSHRs. */
985 Stats::Formula overallMshrMissRate;
986
987 /** The average latency of an MSHR miss, per command and thread. */
988 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
989 /** The average latency of a demand MSHR miss. */
990 Stats::Formula demandAvgMshrMissLatency;
991 /** The average overall latency of an MSHR miss. */
992 Stats::Formula overallAvgMshrMissLatency;
993
994 /** The average latency of an MSHR miss, per command and thread. */
995 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
996 /** The average overall latency of an MSHR miss. */
997 Stats::Formula overallAvgMshrUncacheableLatency;
998
999 /** Number of replacements of valid blocks. */
1000 Stats::Scalar replacements;
1001
1002 /**
1003 * @}
1004 */
1005
1006 /**
1007 * Register stats for this object.
1008 */
1009 void regStats() override;
1010
1011 /** Registers probes. */
1012 void regProbePoints() override;
1013
1014 public:
1015 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1016 ~BaseCache();
1017
1018 void init() override;
1019
1020 BaseMasterPort &getMasterPort(const std::string &if_name,
1021 PortID idx = InvalidPortID) override;
1022 BaseSlavePort &getSlavePort(const std::string &if_name,
1023 PortID idx = InvalidPortID) override;
1024
1025 /**
1026 * Query block size of a cache.
1027 * @return The block size
1028 */
1029 unsigned
1030 getBlockSize() const
1031 {
1032 return blkSize;
1033 }
1034
1035 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1036
1037 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1038 {
1039 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1040 pkt, time, order++,
1041 allocOnFill(pkt->cmd));
1042
1043 if (mshrQueue.isFull()) {
1044 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1045 }
1046
1047 if (sched_send) {
1048 // schedule the send
1049 schedMemSideSendEvent(time);
1050 }
1051
1052 return mshr;
1053 }
1054
1055 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1056 {
1057 // should only see writes or clean evicts here
1058 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1059
1060 Addr blk_addr = pkt->getBlockAddr(blkSize);
1061
1062 WriteQueueEntry *wq_entry =
1063 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1064 if (wq_entry && !wq_entry->inService) {
1065 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1066 }
1067
1068 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1069
1070 if (writeBuffer.isFull()) {
1071 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1072 }
1073
1074 // schedule the send
1075 schedMemSideSendEvent(time);
1076 }
1077
1078 /**
1079 * Returns true if the cache is blocked for accesses.
1080 */
1081 bool isBlocked() const
1082 {
1083 return blocked != 0;
1084 }
1085
1086 /**
1087 * Marks the access path of the cache as blocked for the given cause. This
1088 * also sets the blocked flag in the slave interface.
1089 * @param cause The reason for the cache blocking.
1090 */
1091 void setBlocked(BlockedCause cause)
1092 {
1093 uint8_t flag = 1 << cause;
1094 if (blocked == 0) {
1095 blocked_causes[cause]++;
1096 blockedCycle = curCycle();
1097 cpuSidePort.setBlocked();
1098 }
1099 blocked |= flag;
1100 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1101 }
1102
1103 /**
1104 * Marks the cache as unblocked for the given cause. This also clears the
1105 * blocked flags in the appropriate interfaces.
1106 * @param cause The newly unblocked cause.
1107 * @warning Calling this function can cause a blocked request on the bus to
1108 * access the cache. The cache must be in a state to handle that request.
1109 */
1110 void clearBlocked(BlockedCause cause)
1111 {
1112 uint8_t flag = 1 << cause;
1113 blocked &= ~flag;
1114 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1115 if (blocked == 0) {
1116 blocked_cycles[cause] += curCycle() - blockedCycle;
1117 cpuSidePort.clearBlocked();
1118 }
1119 }
1120
1121 /**
1122 * Schedule a send event for the memory-side port. If already
1123 * scheduled, this may reschedule the event at an earlier
1124 * time. When the specified time is reached, the port is free to
1125 * send either a response, a request, or a prefetch request.
1126 *
1127 * @param time The time when to attempt sending a packet.
1128 */
1129 void schedMemSideSendEvent(Tick time)
1130 {
1131 memSidePort.schedSendEvent(time);
1132 }
1133
1134 bool inCache(Addr addr, bool is_secure) const {
1135 return tags->findBlock(addr, is_secure);
1136 }
1137
1138 bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1139 CacheBlk *block = tags->findBlock(addr, is_secure);
1140 if (block) {
1141 return block->wasPrefetched();
1142 } else {
1143 return false;
1144 }
1145 }
1146
1147 bool inMissQueue(Addr addr, bool is_secure) const {
1148 return mshrQueue.findMatch(addr, is_secure);
1149 }
1150
1151 void incMissCount(PacketPtr pkt)
1152 {
1153 assert(pkt->req->masterId() < system->maxMasters());
1154 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1155 pkt->req->incAccessDepth();
1156 if (missCount) {
1157 --missCount;
1158 if (missCount == 0)
1159 exitSimLoop("A cache reached the maximum miss count");
1160 }
1161 }
1162 void incHitCount(PacketPtr pkt)
1163 {
1164 assert(pkt->req->masterId() < system->maxMasters());
1165 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1166
1167 }
1168
1169 /**
1170 * Checks if the cache is coalescing writes
1171 *
1172 * @return True if the cache is coalescing writes
1173 */
1174 bool coalesce() const;
1175
1176
1177 /**
1178 * Cache block visitor that writes back dirty cache blocks using
1179 * functional writes.
1180 */
1181 void writebackVisitor(CacheBlk &blk);
1182
1183 /**
1184 * Cache block visitor that invalidates all blocks in the cache.
1185 *
1186 * @warn Dirty cache lines will not be written back to memory.
1187 */
1188 void invalidateVisitor(CacheBlk &blk);
1189
1190 /**
1191 * Take an MSHR, turn it into a suitable downstream packet, and
1192 * send it out. This construct allows a queue entry to choose a suitable
1193 * approach based on its type.
1194 *
1195 * @param mshr The MSHR to turn into a packet and send
1196 * @return True if the port is waiting for a retry
1197 */
1198 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1199
1200 /**
1201 * Similar to sendMSHR, but for a write-queue entry
1202 * instead. Create the packet, and send it, and if successful also
1203 * mark the entry in service.
1204 *
1205 * @param wq_entry The write-queue entry to turn into a packet and send
1206 * @return True if the port is waiting for a retry
1207 */
1208 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1209
1210 /**
1211 * Serialize the state of the caches
1212 *
1213 * We currently don't support checkpointing cache state, so this panics.
1214 */
1215 void serialize(CheckpointOut &cp) const override;
1216 void unserialize(CheckpointIn &cp) override;
1217};
1218
1219/**
1220 * The write allocator inspects write packets and detects streaming
1221 * patterns. The write allocator supports a single stream where writes
1222 * are expected to access consecutive locations and keeps track of
1223 * size of the area covered by the concecutive writes in byteCount.
1224 *
1225 * 1) When byteCount has surpassed the coallesceLimit the mode
1226 * switches from ALLOCATE to COALESCE where writes should be delayed
1227 * until the whole block is written at which point a single packet
1228 * (whole line write) can service them.
1229 *
1230 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1231 * line) we switch to NO_ALLOCATE when writes should not allocate in
1232 * the cache but rather send a whole line write to the memory below.
1233 */
1234class WriteAllocator : public SimObject {
1235 public:
1236 WriteAllocator(const WriteAllocatorParams *p) :
1237 SimObject(p),
1238 coalesceLimit(p->coalesce_limit * p->block_size),
1239 noAllocateLimit(p->no_allocate_limit * p->block_size),
1240 delayThreshold(p->delay_threshold)
1241 {
1242 reset();
1243 }
1244
1245 /**
1246 * Should writes be coalesced? This is true if the mode is set to
1247 * NO_ALLOCATE.
1248 *
1249 * @return return true if the cache should coalesce writes.
1250 */
1251 bool coalesce() const {
1252 return mode != WriteMode::ALLOCATE;
1253 }
1254
1255 /**
1256 * Should writes allocate?
1257 *
1258 * @return return true if the cache should not allocate for writes.
1259 */
1260 bool allocate() const {
1261 return mode != WriteMode::NO_ALLOCATE;
1262 }
1263
1264 /**
1265 * Reset the write allocator state, meaning that it allocates for
1266 * writes and has not recorded any information about qualifying
1267 * writes that might trigger a switch to coalescing and later no
1268 * allocation.
1269 */
1270 void reset() {
1271 mode = WriteMode::ALLOCATE;
1272 byteCount = 0;
1273 nextAddr = 0;
1274 }
1275
1276 /**
1277 * Access whether we need to delay the current write.
1278 *
1279 * @param blk_addr The block address the packet writes to
1280 * @return true if the current packet should be delayed
1281 */
1282 bool delay(Addr blk_addr) {
1283 if (delayCtr[blk_addr] > 0) {
1284 --delayCtr[blk_addr];
1285 return true;
1286 } else {
1287 return false;
1288 }
1289 }
1290
1291 /**
1292 * Clear delay counter for the input block
1293 *
1294 * @param blk_addr The accessed cache block
1295 */
1296 void resetDelay(Addr blk_addr) {
1297 delayCtr.erase(blk_addr);
1298 }
1299
1300 /**
1301 * Update the write mode based on the current write
1302 * packet. This method compares the packet's address with any
1303 * current stream, and updates the tracking and the mode
1304 * accordingly.
1305 *
1306 * @param write_addr Start address of the write request
1307 * @param write_size Size of the write request
1308 * @param blk_addr The block address that this packet writes to
1309 */
1310 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1311
1312 private:
1313 /**
1314 * The current mode for write coalescing and allocation, either
1315 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1316 * write coalescing without allocation (NO_ALLOCATE).
1317 */
1318 enum class WriteMode : char {
1319 ALLOCATE,
1320 COALESCE,
1321 NO_ALLOCATE,
1322 };
1323 WriteMode mode;
1324
1325 /** Address to match writes against to detect streams. */
1326 Addr nextAddr;
1327
1328 /**
1329 * Bytes written contiguously. Saturating once we no longer
1330 * allocate.
1331 */
1332 uint32_t byteCount;
1333
1334 /**
1335 * Limits for when to switch between the different write modes.
1336 */
1337 const uint32_t coalesceLimit;
1338 const uint32_t noAllocateLimit;
1339 /**
1340 * The number of times the allocator will delay an WriteReq MSHR.
1341 */
1342 const uint32_t delayThreshold;
1343
1344 /**
1345 * Keep track of the number of times the allocator has delayed an
1346 * WriteReq MSHR.
1347 */
1348 std::unordered_map<Addr, Counter> delayCtr;
1349};
1350
1351#endif //__MEM_CACHE_BASE_HH__
434 const Cycles lookup_lat) const;
435
436 /**
437 * Does all the processing necessary to perform the provided request.
438 * @param pkt The memory request to perform.
439 * @param blk The cache block to be updated.
440 * @param lat The latency of the access.
441 * @param writebacks List for any writebacks that need to be performed.
442 * @return Boolean indicating whether the request was satisfied.
443 */
444 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
445 PacketList &writebacks);
446
447 /*
448 * Handle a timing request that hit in the cache
449 *
450 * @param ptk The request packet
451 * @param blk The referenced block
452 * @param request_time The tick at which the block lookup is compete
453 */
454 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
455 Tick request_time);
456
457 /*
458 * Handle a timing request that missed in the cache
459 *
460 * Implementation specific handling for different cache
461 * implementations
462 *
463 * @param ptk The request packet
464 * @param blk The referenced block
465 * @param forward_time The tick at which we can process dependent requests
466 * @param request_time The tick at which the block lookup is compete
467 */
468 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
469 Tick forward_time,
470 Tick request_time) = 0;
471
472 /*
473 * Handle a timing request that missed in the cache
474 *
475 * Common functionality across different cache implementations
476 *
477 * @param ptk The request packet
478 * @param blk The referenced block
479 * @param mshr Any existing mshr for the referenced cache block
480 * @param forward_time The tick at which we can process dependent requests
481 * @param request_time The tick at which the block lookup is compete
482 */
483 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
484 Tick forward_time, Tick request_time);
485
486 /**
487 * Performs the access specified by the request.
488 * @param pkt The request to perform.
489 */
490 virtual void recvTimingReq(PacketPtr pkt);
491
492 /**
493 * Handling the special case of uncacheable write responses to
494 * make recvTimingResp less cluttered.
495 */
496 void handleUncacheableWriteResp(PacketPtr pkt);
497
498 /**
499 * Service non-deferred MSHR targets using the received response
500 *
501 * Iterates through the list of targets that can be serviced with
502 * the current response.
503 *
504 * @param mshr The MSHR that corresponds to the reponse
505 * @param pkt The response packet
506 * @param blk The reference block
507 */
508 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
509 CacheBlk *blk) = 0;
510
511 /**
512 * Handles a response (cache line fill/write ack) from the bus.
513 * @param pkt The response packet
514 */
515 virtual void recvTimingResp(PacketPtr pkt);
516
517 /**
518 * Snoops bus transactions to maintain coherence.
519 * @param pkt The current bus transaction.
520 */
521 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
522
523 /**
524 * Handle a snoop response.
525 * @param pkt Snoop response packet
526 */
527 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
528
529 /**
530 * Handle a request in atomic mode that missed in this cache
531 *
532 * Creates a downstream request, sends it to the memory below and
533 * handles the response. As we are in atomic mode all operations
534 * are performed immediately.
535 *
536 * @param pkt The packet with the requests
537 * @param blk The referenced block
538 * @param writebacks A list with packets for any performed writebacks
539 * @return Cycles for handling the request
540 */
541 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
542 PacketList &writebacks) = 0;
543
544 /**
545 * Performs the access specified by the request.
546 * @param pkt The request to perform.
547 * @return The number of ticks required for the access.
548 */
549 virtual Tick recvAtomic(PacketPtr pkt);
550
551 /**
552 * Snoop for the provided request in the cache and return the estimated
553 * time taken.
554 * @param pkt The memory request to snoop
555 * @return The number of ticks required for the snoop.
556 */
557 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
558
559 /**
560 * Performs the access specified by the request.
561 *
562 * @param pkt The request to perform.
563 * @param fromCpuSide from the CPU side port or the memory side port
564 */
565 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
566
567 /**
568 * Handle doing the Compare and Swap function for SPARC.
569 */
570 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
571
572 /**
573 * Return the next queue entry to service, either a pending miss
574 * from the MSHR queue, a buffered write from the write buffer, or
575 * something from the prefetcher. This function is responsible
576 * for prioritizing among those sources on the fly.
577 */
578 QueueEntry* getNextQueueEntry();
579
580 /**
581 * Insert writebacks into the write buffer
582 */
583 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
584
585 /**
586 * Send writebacks down the memory hierarchy in atomic mode
587 */
588 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
589
590 /**
591 * Create an appropriate downstream bus request packet.
592 *
593 * Creates a new packet with the request to be send to the memory
594 * below, or nullptr if the current request in cpu_pkt should just
595 * be forwarded on.
596 *
597 * @param cpu_pkt The miss packet that needs to be satisfied.
598 * @param blk The referenced block, can be nullptr.
599 * @param needs_writable Indicates that the block must be writable
600 * even if the request in cpu_pkt doesn't indicate that.
601 * @param is_whole_line_write True if there are writes for the
602 * whole line
603 * @return A packet send to the memory below
604 */
605 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
606 bool needs_writable,
607 bool is_whole_line_write) const = 0;
608
609 /**
610 * Determine if clean lines should be written back or not. In
611 * cases where a downstream cache is mostly inclusive we likely
612 * want it to act as a victim cache also for lines that have not
613 * been modified. Hence, we cannot simply drop the line (or send a
614 * clean evict), but rather need to send the actual data.
615 */
616 const bool writebackClean;
617
618 /**
619 * Writebacks from the tempBlock, resulting on the response path
620 * in atomic mode, must happen after the call to recvAtomic has
621 * finished (for the right ordering of the packets). We therefore
622 * need to hold on to the packets, and have a method and an event
623 * to send them.
624 */
625 PacketPtr tempBlockWriteback;
626
627 /**
628 * Send the outstanding tempBlock writeback. To be called after
629 * recvAtomic finishes in cases where the block we filled is in
630 * fact the tempBlock, and now needs to be written back.
631 */
632 void writebackTempBlockAtomic() {
633 assert(tempBlockWriteback != nullptr);
634 PacketList writebacks{tempBlockWriteback};
635 doWritebacksAtomic(writebacks);
636 tempBlockWriteback = nullptr;
637 }
638
639 /**
640 * An event to writeback the tempBlock after recvAtomic
641 * finishes. To avoid other calls to recvAtomic getting in
642 * between, we create this event with a higher priority.
643 */
644 EventFunctionWrapper writebackTempBlockAtomicEvent;
645
646 /**
647 * Perform any necessary updates to the block and perform any data
648 * exchange between the packet and the block. The flags of the
649 * packet are also set accordingly.
650 *
651 * @param pkt Request packet from upstream that hit a block
652 * @param blk Cache block that the packet hit
653 * @param deferred_response Whether this request originally missed
654 * @param pending_downgrade Whether the writable flag is to be removed
655 */
656 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
657 bool deferred_response = false,
658 bool pending_downgrade = false);
659
660 /**
661 * Maintain the clusivity of this cache by potentially
662 * invalidating a block. This method works in conjunction with
663 * satisfyRequest, but is separate to allow us to handle all MSHR
664 * targets before potentially dropping a block.
665 *
666 * @param from_cache Whether we have dealt with a packet from a cache
667 * @param blk The block that should potentially be dropped
668 */
669 void maintainClusivity(bool from_cache, CacheBlk *blk);
670
671 /**
672 * Handle a fill operation caused by a received packet.
673 *
674 * Populates a cache block and handles all outstanding requests for the
675 * satisfied fill request. This version takes two memory requests. One
676 * contains the fill data, the other is an optional target to satisfy.
677 * Note that the reason we return a list of writebacks rather than
678 * inserting them directly in the write buffer is that this function
679 * is called by both atomic and timing-mode accesses, and in atomic
680 * mode we don't mess with the write buffer (we just perform the
681 * writebacks atomically once the original request is complete).
682 *
683 * @param pkt The memory request with the fill data.
684 * @param blk The cache block if it already exists.
685 * @param writebacks List for any writebacks that need to be performed.
686 * @param allocate Whether to allocate a block or use the temp block
687 * @return Pointer to the new cache block.
688 */
689 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
690 PacketList &writebacks, bool allocate);
691
692 /**
693 * Allocate a new block and perform any necessary writebacks
694 *
695 * Find a victim block and if necessary prepare writebacks for any
696 * existing data. May return nullptr if there are no replaceable
697 * blocks. If a replaceable block is found, it inserts the new block in
698 * its place. The new block, however, is not set as valid yet.
699 *
700 * @param pkt Packet holding the address to update
701 * @param writebacks A list of writeback packets for the evicted blocks
702 * @return the allocated block
703 */
704 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
705 /**
706 * Evict a cache block.
707 *
708 * Performs a writeback if necesssary and invalidates the block
709 *
710 * @param blk Block to invalidate
711 * @return A packet with the writeback, can be nullptr
712 */
713 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
714
715 /**
716 * Evict a cache block.
717 *
718 * Performs a writeback if necesssary and invalidates the block
719 *
720 * @param blk Block to invalidate
721 * @param writebacks Return a list of packets with writebacks
722 */
723 void evictBlock(CacheBlk *blk, PacketList &writebacks);
724
725 /**
726 * Invalidate a cache block.
727 *
728 * @param blk Block to invalidate
729 */
730 void invalidateBlock(CacheBlk *blk);
731
732 /**
733 * Create a writeback request for the given block.
734 *
735 * @param blk The block to writeback.
736 * @return The writeback request for the block.
737 */
738 PacketPtr writebackBlk(CacheBlk *blk);
739
740 /**
741 * Create a writeclean request for the given block.
742 *
743 * Creates a request that writes the block to the cache below
744 * without evicting the block from the current cache.
745 *
746 * @param blk The block to write clean.
747 * @param dest The destination of the write clean operation.
748 * @param id Use the given packet id for the write clean operation.
749 * @return The generated write clean packet.
750 */
751 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
752
753 /**
754 * Write back dirty blocks in the cache using functional accesses.
755 */
756 virtual void memWriteback() override;
757
758 /**
759 * Invalidates all blocks in the cache.
760 *
761 * @warn Dirty cache lines will not be written back to
762 * memory. Make sure to call functionalWriteback() first if you
763 * want the to write them to memory.
764 */
765 virtual void memInvalidate() override;
766
767 /**
768 * Determine if there are any dirty blocks in the cache.
769 *
770 * @return true if at least one block is dirty, false otherwise.
771 */
772 bool isDirty() const;
773
774 /**
775 * Determine if an address is in the ranges covered by this
776 * cache. This is useful to filter snoops.
777 *
778 * @param addr Address to check against
779 *
780 * @return If the address in question is in range
781 */
782 bool inRange(Addr addr) const;
783
784 /**
785 * Find next request ready time from among possible sources.
786 */
787 Tick nextQueueReadyTime() const;
788
789 /** Block size of this cache */
790 const unsigned blkSize;
791
792 /**
793 * The latency of tag lookup of a cache. It occurs when there is
794 * an access to the cache.
795 */
796 const Cycles lookupLatency;
797
798 /**
799 * The latency of data access of a cache. It occurs when there is
800 * an access to the cache.
801 */
802 const Cycles dataLatency;
803
804 /**
805 * This is the forward latency of the cache. It occurs when there
806 * is a cache miss and a request is forwarded downstream, in
807 * particular an outbound miss.
808 */
809 const Cycles forwardLatency;
810
811 /** The latency to fill a cache block */
812 const Cycles fillLatency;
813
814 /**
815 * The latency of sending reponse to its upper level cache/core on
816 * a linefill. The responseLatency parameter captures this
817 * latency.
818 */
819 const Cycles responseLatency;
820
821 /**
822 * Whether tags and data are accessed sequentially.
823 */
824 const bool sequentialAccess;
825
826 /** The number of targets for each MSHR. */
827 const int numTarget;
828
829 /** Do we forward snoops from mem side port through to cpu side port? */
830 bool forwardSnoops;
831
832 /**
833 * Clusivity with respect to the upstream cache, determining if we
834 * fill into both this cache and the cache above on a miss. Note
835 * that we currently do not support strict clusivity policies.
836 */
837 const Enums::Clusivity clusivity;
838
839 /**
840 * Is this cache read only, for example the instruction cache, or
841 * table-walker cache. A cache that is read only should never see
842 * any writes, and should never get any dirty data (and hence
843 * never have to do any writebacks).
844 */
845 const bool isReadOnly;
846
847 /**
848 * Bit vector of the blocking reasons for the access path.
849 * @sa #BlockedCause
850 */
851 uint8_t blocked;
852
853 /** Increasing order number assigned to each incoming request. */
854 uint64_t order;
855
856 /** Stores time the cache blocked for statistics. */
857 Cycles blockedCycle;
858
859 /** Pointer to the MSHR that has no targets. */
860 MSHR *noTargetMSHR;
861
862 /** The number of misses to trigger an exit event. */
863 Counter missCount;
864
865 /**
866 * The address range to which the cache responds on the CPU side.
867 * Normally this is all possible memory addresses. */
868 const AddrRangeList addrRanges;
869
870 public:
871 /** System we are currently operating in. */
872 System *system;
873
874 // Statistics
875 /**
876 * @addtogroup CacheStatistics
877 * @{
878 */
879
880 /** Number of hits per thread for each type of command.
881 @sa Packet::Command */
882 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
883 /** Number of hits for demand accesses. */
884 Stats::Formula demandHits;
885 /** Number of hit for all accesses. */
886 Stats::Formula overallHits;
887
888 /** Number of misses per thread for each type of command.
889 @sa Packet::Command */
890 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
891 /** Number of misses for demand accesses. */
892 Stats::Formula demandMisses;
893 /** Number of misses for all accesses. */
894 Stats::Formula overallMisses;
895
896 /**
897 * Total number of cycles per thread/command spent waiting for a miss.
898 * Used to calculate the average miss latency.
899 */
900 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
901 /** Total number of cycles spent waiting for demand misses. */
902 Stats::Formula demandMissLatency;
903 /** Total number of cycles spent waiting for all misses. */
904 Stats::Formula overallMissLatency;
905
906 /** The number of accesses per command and thread. */
907 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
908 /** The number of demand accesses. */
909 Stats::Formula demandAccesses;
910 /** The number of overall accesses. */
911 Stats::Formula overallAccesses;
912
913 /** The miss rate per command and thread. */
914 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
915 /** The miss rate of all demand accesses. */
916 Stats::Formula demandMissRate;
917 /** The miss rate for all accesses. */
918 Stats::Formula overallMissRate;
919
920 /** The average miss latency per command and thread. */
921 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
922 /** The average miss latency for demand misses. */
923 Stats::Formula demandAvgMissLatency;
924 /** The average miss latency for all misses. */
925 Stats::Formula overallAvgMissLatency;
926
927 /** The total number of cycles blocked for each blocked cause. */
928 Stats::Vector blocked_cycles;
929 /** The number of times this cache blocked for each blocked cause. */
930 Stats::Vector blocked_causes;
931
932 /** The average number of cycles blocked for each blocked cause. */
933 Stats::Formula avg_blocked;
934
935 /** The number of times a HW-prefetched block is evicted w/o reference. */
936 Stats::Scalar unusedPrefetches;
937
938 /** Number of blocks written back per thread. */
939 Stats::Vector writebacks;
940
941 /** Number of misses that hit in the MSHRs per command and thread. */
942 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
943 /** Demand misses that hit in the MSHRs. */
944 Stats::Formula demandMshrHits;
945 /** Total number of misses that hit in the MSHRs. */
946 Stats::Formula overallMshrHits;
947
948 /** Number of misses that miss in the MSHRs, per command and thread. */
949 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
950 /** Demand misses that miss in the MSHRs. */
951 Stats::Formula demandMshrMisses;
952 /** Total number of misses that miss in the MSHRs. */
953 Stats::Formula overallMshrMisses;
954
955 /** Number of misses that miss in the MSHRs, per command and thread. */
956 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
957 /** Total number of misses that miss in the MSHRs. */
958 Stats::Formula overallMshrUncacheable;
959
960 /** Total cycle latency of each MSHR miss, per command and thread. */
961 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
962 /** Total cycle latency of demand MSHR misses. */
963 Stats::Formula demandMshrMissLatency;
964 /** Total cycle latency of overall MSHR misses. */
965 Stats::Formula overallMshrMissLatency;
966
967 /** Total cycle latency of each MSHR miss, per command and thread. */
968 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
969 /** Total cycle latency of overall MSHR misses. */
970 Stats::Formula overallMshrUncacheableLatency;
971
972#if 0
973 /** The total number of MSHR accesses per command and thread. */
974 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
975 /** The total number of demand MSHR accesses. */
976 Stats::Formula demandMshrAccesses;
977 /** The total number of MSHR accesses. */
978 Stats::Formula overallMshrAccesses;
979#endif
980
981 /** The miss rate in the MSHRs pre command and thread. */
982 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
983 /** The demand miss rate in the MSHRs. */
984 Stats::Formula demandMshrMissRate;
985 /** The overall miss rate in the MSHRs. */
986 Stats::Formula overallMshrMissRate;
987
988 /** The average latency of an MSHR miss, per command and thread. */
989 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
990 /** The average latency of a demand MSHR miss. */
991 Stats::Formula demandAvgMshrMissLatency;
992 /** The average overall latency of an MSHR miss. */
993 Stats::Formula overallAvgMshrMissLatency;
994
995 /** The average latency of an MSHR miss, per command and thread. */
996 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
997 /** The average overall latency of an MSHR miss. */
998 Stats::Formula overallAvgMshrUncacheableLatency;
999
1000 /** Number of replacements of valid blocks. */
1001 Stats::Scalar replacements;
1002
1003 /**
1004 * @}
1005 */
1006
1007 /**
1008 * Register stats for this object.
1009 */
1010 void regStats() override;
1011
1012 /** Registers probes. */
1013 void regProbePoints() override;
1014
1015 public:
1016 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1017 ~BaseCache();
1018
1019 void init() override;
1020
1021 BaseMasterPort &getMasterPort(const std::string &if_name,
1022 PortID idx = InvalidPortID) override;
1023 BaseSlavePort &getSlavePort(const std::string &if_name,
1024 PortID idx = InvalidPortID) override;
1025
1026 /**
1027 * Query block size of a cache.
1028 * @return The block size
1029 */
1030 unsigned
1031 getBlockSize() const
1032 {
1033 return blkSize;
1034 }
1035
1036 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1037
1038 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1039 {
1040 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1041 pkt, time, order++,
1042 allocOnFill(pkt->cmd));
1043
1044 if (mshrQueue.isFull()) {
1045 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1046 }
1047
1048 if (sched_send) {
1049 // schedule the send
1050 schedMemSideSendEvent(time);
1051 }
1052
1053 return mshr;
1054 }
1055
1056 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1057 {
1058 // should only see writes or clean evicts here
1059 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1060
1061 Addr blk_addr = pkt->getBlockAddr(blkSize);
1062
1063 WriteQueueEntry *wq_entry =
1064 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1065 if (wq_entry && !wq_entry->inService) {
1066 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1067 }
1068
1069 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1070
1071 if (writeBuffer.isFull()) {
1072 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1073 }
1074
1075 // schedule the send
1076 schedMemSideSendEvent(time);
1077 }
1078
1079 /**
1080 * Returns true if the cache is blocked for accesses.
1081 */
1082 bool isBlocked() const
1083 {
1084 return blocked != 0;
1085 }
1086
1087 /**
1088 * Marks the access path of the cache as blocked for the given cause. This
1089 * also sets the blocked flag in the slave interface.
1090 * @param cause The reason for the cache blocking.
1091 */
1092 void setBlocked(BlockedCause cause)
1093 {
1094 uint8_t flag = 1 << cause;
1095 if (blocked == 0) {
1096 blocked_causes[cause]++;
1097 blockedCycle = curCycle();
1098 cpuSidePort.setBlocked();
1099 }
1100 blocked |= flag;
1101 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1102 }
1103
1104 /**
1105 * Marks the cache as unblocked for the given cause. This also clears the
1106 * blocked flags in the appropriate interfaces.
1107 * @param cause The newly unblocked cause.
1108 * @warning Calling this function can cause a blocked request on the bus to
1109 * access the cache. The cache must be in a state to handle that request.
1110 */
1111 void clearBlocked(BlockedCause cause)
1112 {
1113 uint8_t flag = 1 << cause;
1114 blocked &= ~flag;
1115 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1116 if (blocked == 0) {
1117 blocked_cycles[cause] += curCycle() - blockedCycle;
1118 cpuSidePort.clearBlocked();
1119 }
1120 }
1121
1122 /**
1123 * Schedule a send event for the memory-side port. If already
1124 * scheduled, this may reschedule the event at an earlier
1125 * time. When the specified time is reached, the port is free to
1126 * send either a response, a request, or a prefetch request.
1127 *
1128 * @param time The time when to attempt sending a packet.
1129 */
1130 void schedMemSideSendEvent(Tick time)
1131 {
1132 memSidePort.schedSendEvent(time);
1133 }
1134
1135 bool inCache(Addr addr, bool is_secure) const {
1136 return tags->findBlock(addr, is_secure);
1137 }
1138
1139 bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1140 CacheBlk *block = tags->findBlock(addr, is_secure);
1141 if (block) {
1142 return block->wasPrefetched();
1143 } else {
1144 return false;
1145 }
1146 }
1147
1148 bool inMissQueue(Addr addr, bool is_secure) const {
1149 return mshrQueue.findMatch(addr, is_secure);
1150 }
1151
1152 void incMissCount(PacketPtr pkt)
1153 {
1154 assert(pkt->req->masterId() < system->maxMasters());
1155 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1156 pkt->req->incAccessDepth();
1157 if (missCount) {
1158 --missCount;
1159 if (missCount == 0)
1160 exitSimLoop("A cache reached the maximum miss count");
1161 }
1162 }
1163 void incHitCount(PacketPtr pkt)
1164 {
1165 assert(pkt->req->masterId() < system->maxMasters());
1166 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1167
1168 }
1169
1170 /**
1171 * Checks if the cache is coalescing writes
1172 *
1173 * @return True if the cache is coalescing writes
1174 */
1175 bool coalesce() const;
1176
1177
1178 /**
1179 * Cache block visitor that writes back dirty cache blocks using
1180 * functional writes.
1181 */
1182 void writebackVisitor(CacheBlk &blk);
1183
1184 /**
1185 * Cache block visitor that invalidates all blocks in the cache.
1186 *
1187 * @warn Dirty cache lines will not be written back to memory.
1188 */
1189 void invalidateVisitor(CacheBlk &blk);
1190
1191 /**
1192 * Take an MSHR, turn it into a suitable downstream packet, and
1193 * send it out. This construct allows a queue entry to choose a suitable
1194 * approach based on its type.
1195 *
1196 * @param mshr The MSHR to turn into a packet and send
1197 * @return True if the port is waiting for a retry
1198 */
1199 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1200
1201 /**
1202 * Similar to sendMSHR, but for a write-queue entry
1203 * instead. Create the packet, and send it, and if successful also
1204 * mark the entry in service.
1205 *
1206 * @param wq_entry The write-queue entry to turn into a packet and send
1207 * @return True if the port is waiting for a retry
1208 */
1209 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1210
1211 /**
1212 * Serialize the state of the caches
1213 *
1214 * We currently don't support checkpointing cache state, so this panics.
1215 */
1216 void serialize(CheckpointOut &cp) const override;
1217 void unserialize(CheckpointIn &cp) override;
1218};
1219
1220/**
1221 * The write allocator inspects write packets and detects streaming
1222 * patterns. The write allocator supports a single stream where writes
1223 * are expected to access consecutive locations and keeps track of
1224 * size of the area covered by the concecutive writes in byteCount.
1225 *
1226 * 1) When byteCount has surpassed the coallesceLimit the mode
1227 * switches from ALLOCATE to COALESCE where writes should be delayed
1228 * until the whole block is written at which point a single packet
1229 * (whole line write) can service them.
1230 *
1231 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1232 * line) we switch to NO_ALLOCATE when writes should not allocate in
1233 * the cache but rather send a whole line write to the memory below.
1234 */
1235class WriteAllocator : public SimObject {
1236 public:
1237 WriteAllocator(const WriteAllocatorParams *p) :
1238 SimObject(p),
1239 coalesceLimit(p->coalesce_limit * p->block_size),
1240 noAllocateLimit(p->no_allocate_limit * p->block_size),
1241 delayThreshold(p->delay_threshold)
1242 {
1243 reset();
1244 }
1245
1246 /**
1247 * Should writes be coalesced? This is true if the mode is set to
1248 * NO_ALLOCATE.
1249 *
1250 * @return return true if the cache should coalesce writes.
1251 */
1252 bool coalesce() const {
1253 return mode != WriteMode::ALLOCATE;
1254 }
1255
1256 /**
1257 * Should writes allocate?
1258 *
1259 * @return return true if the cache should not allocate for writes.
1260 */
1261 bool allocate() const {
1262 return mode != WriteMode::NO_ALLOCATE;
1263 }
1264
1265 /**
1266 * Reset the write allocator state, meaning that it allocates for
1267 * writes and has not recorded any information about qualifying
1268 * writes that might trigger a switch to coalescing and later no
1269 * allocation.
1270 */
1271 void reset() {
1272 mode = WriteMode::ALLOCATE;
1273 byteCount = 0;
1274 nextAddr = 0;
1275 }
1276
1277 /**
1278 * Access whether we need to delay the current write.
1279 *
1280 * @param blk_addr The block address the packet writes to
1281 * @return true if the current packet should be delayed
1282 */
1283 bool delay(Addr blk_addr) {
1284 if (delayCtr[blk_addr] > 0) {
1285 --delayCtr[blk_addr];
1286 return true;
1287 } else {
1288 return false;
1289 }
1290 }
1291
1292 /**
1293 * Clear delay counter for the input block
1294 *
1295 * @param blk_addr The accessed cache block
1296 */
1297 void resetDelay(Addr blk_addr) {
1298 delayCtr.erase(blk_addr);
1299 }
1300
1301 /**
1302 * Update the write mode based on the current write
1303 * packet. This method compares the packet's address with any
1304 * current stream, and updates the tracking and the mode
1305 * accordingly.
1306 *
1307 * @param write_addr Start address of the write request
1308 * @param write_size Size of the write request
1309 * @param blk_addr The block address that this packet writes to
1310 */
1311 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1312
1313 private:
1314 /**
1315 * The current mode for write coalescing and allocation, either
1316 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1317 * write coalescing without allocation (NO_ALLOCATE).
1318 */
1319 enum class WriteMode : char {
1320 ALLOCATE,
1321 COALESCE,
1322 NO_ALLOCATE,
1323 };
1324 WriteMode mode;
1325
1326 /** Address to match writes against to detect streams. */
1327 Addr nextAddr;
1328
1329 /**
1330 * Bytes written contiguously. Saturating once we no longer
1331 * allocate.
1332 */
1333 uint32_t byteCount;
1334
1335 /**
1336 * Limits for when to switch between the different write modes.
1337 */
1338 const uint32_t coalesceLimit;
1339 const uint32_t noAllocateLimit;
1340 /**
1341 * The number of times the allocator will delay an WriteReq MSHR.
1342 */
1343 const uint32_t delayThreshold;
1344
1345 /**
1346 * Keep track of the number of times the allocator has delayed an
1347 * WriteReq MSHR.
1348 */
1349 std::unordered_map<Addr, Counter> delayCtr;
1350};
1351
1352#endif //__MEM_CACHE_BASE_HH__