base.hh (12702:27cb33a96e0f) base.hh (12724:4f6fac3191d2)
1/*
1/*
2 * Copyright (c) 2012-2013, 2015-2016 ARM Limited
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
43 */
44
45/**
46 * @file
47 * Declares a basic cache interface BaseCache.
48 */
49
50#ifndef __MEM_CACHE_BASE_HH__
51#define __MEM_CACHE_BASE_HH__
52
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
53#include <algorithm>
54#include <list>
55#include <cassert>
56#include <cstdint>
55#include <string>
57#include <string>
56#include <vector>
57
58
58#include "base/logging.hh"
59#include "base/addr_range.hh"
59#include "base/statistics.hh"
60#include "base/trace.hh"
61#include "base/types.hh"
62#include "debug/Cache.hh"
63#include "debug/CachePort.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/blk.hh"
64#include "mem/cache/mshr_queue.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
65#include "mem/cache/write_queue.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
66#include "mem/mem_object.hh"
67#include "mem/packet.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
68#include "mem/qport.hh"
69#include "mem/request.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
70#include "params/BaseCache.hh"
71#include "sim/eventq.hh"
76#include "sim/eventq.hh"
72#include "sim/full_system.hh"
77#include "sim/serialize.hh"
73#include "sim/sim_exit.hh"
74#include "sim/system.hh"
75
78#include "sim/sim_exit.hh"
79#include "sim/system.hh"
80
81class BaseMasterPort;
82class BasePrefetcher;
83class BaseSlavePort;
84class MSHR;
85class MasterPort;
86class QueueEntry;
87struct BaseCacheParams;
88
76/**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79class BaseCache : public MemObject
80{
81 protected:
82 /**
83 * Indexes to enumerate the MSHR queues.
84 */
85 enum MSHRQueueIndex {
86 MSHRQueue_MSHRs,
87 MSHRQueue_WriteBuffer
88 };
89
90 public:
91 /**
92 * Reasons for caches to be blocked.
93 */
94 enum BlockedCause {
95 Blocked_NoMSHRs = MSHRQueue_MSHRs,
96 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
97 Blocked_NoTargets,
98 NUM_BLOCKED_CAUSES
99 };
100
101 protected:
102
103 /**
104 * A cache master port is used for the memory-side port of the
105 * cache, and in addition to the basic timing port that only sends
106 * response packets through a transmit list, it also offers the
107 * ability to schedule and send request packets (requests &
108 * writebacks). The send event is scheduled through schedSendEvent,
109 * and the sendDeferredPacket of the timing port is modified to
110 * consider both the transmit list and the requests from the MSHR.
111 */
112 class CacheMasterPort : public QueuedMasterPort
113 {
114
115 public:
116
117 /**
118 * Schedule a send of a request packet (from the MSHR). Note
119 * that we could already have a retry outstanding.
120 */
121 void schedSendEvent(Tick time)
122 {
123 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
124 reqQueue.schedSendEvent(time);
125 }
126
127 protected:
128
129 CacheMasterPort(const std::string &_name, BaseCache *_cache,
130 ReqPacketQueue &_reqQueue,
131 SnoopRespPacketQueue &_snoopRespQueue) :
132 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
133 { }
134
135 /**
136 * Memory-side port always snoops.
137 *
138 * @return always true
139 */
140 virtual bool isSnooping() const { return true; }
141 };
142
143 /**
89/**
90 * A basic cache interface. Implements some common functions for speed.
91 */
92class BaseCache : public MemObject
93{
94 protected:
95 /**
96 * Indexes to enumerate the MSHR queues.
97 */
98 enum MSHRQueueIndex {
99 MSHRQueue_MSHRs,
100 MSHRQueue_WriteBuffer
101 };
102
103 public:
104 /**
105 * Reasons for caches to be blocked.
106 */
107 enum BlockedCause {
108 Blocked_NoMSHRs = MSHRQueue_MSHRs,
109 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
110 Blocked_NoTargets,
111 NUM_BLOCKED_CAUSES
112 };
113
114 protected:
115
116 /**
117 * A cache master port is used for the memory-side port of the
118 * cache, and in addition to the basic timing port that only sends
119 * response packets through a transmit list, it also offers the
120 * ability to schedule and send request packets (requests &
121 * writebacks). The send event is scheduled through schedSendEvent,
122 * and the sendDeferredPacket of the timing port is modified to
123 * consider both the transmit list and the requests from the MSHR.
124 */
125 class CacheMasterPort : public QueuedMasterPort
126 {
127
128 public:
129
130 /**
131 * Schedule a send of a request packet (from the MSHR). Note
132 * that we could already have a retry outstanding.
133 */
134 void schedSendEvent(Tick time)
135 {
136 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
137 reqQueue.schedSendEvent(time);
138 }
139
140 protected:
141
142 CacheMasterPort(const std::string &_name, BaseCache *_cache,
143 ReqPacketQueue &_reqQueue,
144 SnoopRespPacketQueue &_snoopRespQueue) :
145 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
146 { }
147
148 /**
149 * Memory-side port always snoops.
150 *
151 * @return always true
152 */
153 virtual bool isSnooping() const { return true; }
154 };
155
156 /**
157 * Override the default behaviour of sendDeferredPacket to enable
158 * the memory-side cache port to also send requests based on the
159 * current MSHR status. This queue has a pointer to our specific
160 * cache implementation and is used by the MemSidePort.
161 */
162 class CacheReqPacketQueue : public ReqPacketQueue
163 {
164
165 protected:
166
167 BaseCache &cache;
168 SnoopRespPacketQueue &snoopRespQueue;
169
170 public:
171
172 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
173 SnoopRespPacketQueue &snoop_resp_queue,
174 const std::string &label) :
175 ReqPacketQueue(cache, port, label), cache(cache),
176 snoopRespQueue(snoop_resp_queue) { }
177
178 /**
179 * Override the normal sendDeferredPacket and do not only
180 * consider the transmit list (used for responses), but also
181 * requests.
182 */
183 virtual void sendDeferredPacket();
184
185 /**
186 * Check if there is a conflicting snoop response about to be
187 * send out, and if so simply stall any requests, and schedule
188 * a send event at the same time as the next snoop response is
189 * being sent out.
190 */
191 bool checkConflictingSnoop(Addr addr)
192 {
193 if (snoopRespQueue.hasAddr(addr)) {
194 DPRINTF(CachePort, "Waiting for snoop response to be "
195 "sent\n");
196 Tick when = snoopRespQueue.deferredPacketReadyTime();
197 schedSendEvent(when);
198 return true;
199 }
200 return false;
201 }
202 };
203
204
205 /**
206 * The memory-side port extends the base cache master port with
207 * access functions for functional, atomic and timing snoops.
208 */
209 class MemSidePort : public CacheMasterPort
210 {
211 private:
212
213 /** The cache-specific queue. */
214 CacheReqPacketQueue _reqQueue;
215
216 SnoopRespPacketQueue _snoopRespQueue;
217
218 // a pointer to our specific cache implementation
219 BaseCache *cache;
220
221 protected:
222
223 virtual void recvTimingSnoopReq(PacketPtr pkt);
224
225 virtual bool recvTimingResp(PacketPtr pkt);
226
227 virtual Tick recvAtomicSnoop(PacketPtr pkt);
228
229 virtual void recvFunctionalSnoop(PacketPtr pkt);
230
231 public:
232
233 MemSidePort(const std::string &_name, BaseCache *_cache,
234 const std::string &_label);
235 };
236
237 /**
144 * A cache slave port is used for the CPU-side port of the cache,
145 * and it is basically a simple timing port that uses a transmit
146 * list for responses to the CPU (or connected master). In
147 * addition, it has the functionality to block the port for
148 * incoming requests. If blocked, the port will issue a retry once
149 * unblocked.
150 */
151 class CacheSlavePort : public QueuedSlavePort
152 {
153
154 public:
155
156 /** Do not accept any new requests. */
157 void setBlocked();
158
159 /** Return to normal operation and accept new requests. */
160 void clearBlocked();
161
162 bool isBlocked() const { return blocked; }
163
164 protected:
165
166 CacheSlavePort(const std::string &_name, BaseCache *_cache,
167 const std::string &_label);
168
169 /** A normal packet queue used to store responses. */
170 RespPacketQueue queue;
171
172 bool blocked;
173
174 bool mustSendRetry;
175
176 private:
177
178 void processSendRetry();
179
180 EventFunctionWrapper sendRetryEvent;
181
182 };
183
238 * A cache slave port is used for the CPU-side port of the cache,
239 * and it is basically a simple timing port that uses a transmit
240 * list for responses to the CPU (or connected master). In
241 * addition, it has the functionality to block the port for
242 * incoming requests. If blocked, the port will issue a retry once
243 * unblocked.
244 */
245 class CacheSlavePort : public QueuedSlavePort
246 {
247
248 public:
249
250 /** Do not accept any new requests. */
251 void setBlocked();
252
253 /** Return to normal operation and accept new requests. */
254 void clearBlocked();
255
256 bool isBlocked() const { return blocked; }
257
258 protected:
259
260 CacheSlavePort(const std::string &_name, BaseCache *_cache,
261 const std::string &_label);
262
263 /** A normal packet queue used to store responses. */
264 RespPacketQueue queue;
265
266 bool blocked;
267
268 bool mustSendRetry;
269
270 private:
271
272 void processSendRetry();
273
274 EventFunctionWrapper sendRetryEvent;
275
276 };
277
184 CacheSlavePort *cpuSidePort;
185 CacheMasterPort *memSidePort;
278 /**
279 * The CPU-side port extends the base cache slave port with access
280 * functions for functional, atomic and timing requests.
281 */
282 class CpuSidePort : public CacheSlavePort
283 {
284 private:
186
285
286 // a pointer to our specific cache implementation
287 BaseCache *cache;
288
289 protected:
290 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
291
292 virtual bool tryTiming(PacketPtr pkt) override;
293
294 virtual bool recvTimingReq(PacketPtr pkt) override;
295
296 virtual Tick recvAtomic(PacketPtr pkt) override;
297
298 virtual void recvFunctional(PacketPtr pkt) override;
299
300 virtual AddrRangeList getAddrRanges() const override;
301
302 public:
303
304 CpuSidePort(const std::string &_name, BaseCache *_cache,
305 const std::string &_label);
306
307 };
308
309 CpuSidePort cpuSidePort;
310 MemSidePort memSidePort;
311
187 protected:
188
189 /** Miss status registers */
190 MSHRQueue mshrQueue;
191
192 /** Write/writeback buffer */
193 WriteQueue writeBuffer;
194
312 protected:
313
314 /** Miss status registers */
315 MSHRQueue mshrQueue;
316
317 /** Write/writeback buffer */
318 WriteQueue writeBuffer;
319
320 /** Tag and data Storage */
321 BaseTags *tags;
322
323 /** Prefetcher */
324 BasePrefetcher *prefetcher;
325
195 /**
326 /**
327 * Notify the prefetcher on every access, not just misses.
328 */
329 const bool prefetchOnAccess;
330
331 /**
332 * Temporary cache block for occasional transitory use. We use
333 * the tempBlock to fill when allocation fails (e.g., when there
334 * is an outstanding request that accesses the victim block) or
335 * when we want to avoid allocation (e.g., exclusive caches)
336 */
337 CacheBlk *tempBlock;
338
339 /**
340 * Upstream caches need this packet until true is returned, so
341 * hold it for deletion until a subsequent call
342 */
343 std::unique_ptr<Packet> pendingDelete;
344
345 /**
196 * Mark a request as in service (sent downstream in the memory
197 * system), effectively making this MSHR the ordering point.
198 */
199 void markInService(MSHR *mshr, bool pending_modified_resp)
200 {
201 bool wasFull = mshrQueue.isFull();
202 mshrQueue.markInService(mshr, pending_modified_resp);
203
204 if (wasFull && !mshrQueue.isFull()) {
205 clearBlocked(Blocked_NoMSHRs);
206 }
207 }
208
209 void markInService(WriteQueueEntry *entry)
210 {
211 bool wasFull = writeBuffer.isFull();
212 writeBuffer.markInService(entry);
213
214 if (wasFull && !writeBuffer.isFull()) {
215 clearBlocked(Blocked_NoWBBuffers);
216 }
217 }
218
219 /**
346 * Mark a request as in service (sent downstream in the memory
347 * system), effectively making this MSHR the ordering point.
348 */
349 void markInService(MSHR *mshr, bool pending_modified_resp)
350 {
351 bool wasFull = mshrQueue.isFull();
352 mshrQueue.markInService(mshr, pending_modified_resp);
353
354 if (wasFull && !mshrQueue.isFull()) {
355 clearBlocked(Blocked_NoMSHRs);
356 }
357 }
358
359 void markInService(WriteQueueEntry *entry)
360 {
361 bool wasFull = writeBuffer.isFull();
362 writeBuffer.markInService(entry);
363
364 if (wasFull && !writeBuffer.isFull()) {
365 clearBlocked(Blocked_NoWBBuffers);
366 }
367 }
368
369 /**
220 * Determine if we should allocate on a fill or not.
370 * Determine whether we should allocate on a fill or not. If this
371 * cache is mostly inclusive with regards to the upstream cache(s)
372 * we always allocate (for any non-forwarded and cacheable
373 * requests). In the case of a mostly exclusive cache, we allocate
374 * on fill if the packet did not come from a cache, thus if we:
375 * are dealing with a whole-line write (the latter behaves much
376 * like a writeback), the original target packet came from a
377 * non-caching source, or if we are performing a prefetch or LLSC.
221 *
378 *
222 * @param cmd Packet command being added as an MSHR target
379 * @param cmd Command of the incoming requesting packet
380 * @return Whether we should allocate on the fill
381 */
382 inline bool allocOnFill(MemCmd cmd) const
383 {
384 return clusivity == Enums::mostly_incl ||
385 cmd == MemCmd::WriteLineReq ||
386 cmd == MemCmd::ReadReq ||
387 cmd == MemCmd::WriteReq ||
388 cmd.isPrefetch() ||
389 cmd.isLLSC();
390 }
391
392 /**
393 * Does all the processing necessary to perform the provided request.
394 * @param pkt The memory request to perform.
395 * @param blk The cache block to be updated.
396 * @param lat The latency of the access.
397 * @param writebacks List for any writebacks that need to be performed.
398 * @return Boolean indicating whether the request was satisfied.
399 */
400 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
401 PacketList &writebacks);
402
403 /*
404 * Handle a timing request that hit in the cache
223 *
405 *
224 * @return Whether we should allocate on a fill or not
406 * @param ptk The request packet
407 * @param blk The referenced block
408 * @param request_time The tick at which the block lookup is compete
225 */
409 */
226 virtual bool allocOnFill(MemCmd cmd) const = 0;
410 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
411 Tick request_time);
227
412
413 /*
414 * Handle a timing request that missed in the cache
415 *
416 * Implementation specific handling for different cache
417 * implementations
418 *
419 * @param ptk The request packet
420 * @param blk The referenced block
421 * @param forward_time The tick at which we can process dependent requests
422 * @param request_time The tick at which the block lookup is compete
423 */
424 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
425 Tick forward_time,
426 Tick request_time) = 0;
427
428 /*
429 * Handle a timing request that missed in the cache
430 *
431 * Common functionality across different cache implementations
432 *
433 * @param ptk The request packet
434 * @param blk The referenced block
435 * @param mshr Any existing mshr for the referenced cache block
436 * @param forward_time The tick at which we can process dependent requests
437 * @param request_time The tick at which the block lookup is compete
438 */
439 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
440 Tick forward_time, Tick request_time);
441
228 /**
442 /**
443 * Performs the access specified by the request.
444 * @param pkt The request to perform.
445 */
446 virtual void recvTimingReq(PacketPtr pkt);
447
448 /**
449 * Handling the special case of uncacheable write responses to
450 * make recvTimingResp less cluttered.
451 */
452 void handleUncacheableWriteResp(PacketPtr pkt);
453
454 /**
455 * Service non-deferred MSHR targets using the received response
456 *
457 * Iterates through the list of targets that can be serviced with
458 * the current response. Any writebacks that need to performed
459 * must be appended to the writebacks parameter.
460 *
461 * @param mshr The MSHR that corresponds to the reponse
462 * @param pkt The response packet
463 * @param blk The reference block
464 * @param writebacks List of writebacks that need to be performed
465 */
466 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
467 CacheBlk *blk, PacketList& writebacks) = 0;
468
469 /**
470 * Handles a response (cache line fill/write ack) from the bus.
471 * @param pkt The response packet
472 */
473 virtual void recvTimingResp(PacketPtr pkt);
474
475 /**
476 * Snoops bus transactions to maintain coherence.
477 * @param pkt The current bus transaction.
478 */
479 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
480
481 /**
482 * Handle a snoop response.
483 * @param pkt Snoop response packet
484 */
485 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
486
487 /**
488 * Handle a request in atomic mode that missed in this cache
489 *
490 * Creates a downstream request, sends it to the memory below and
491 * handles the response. As we are in atomic mode all operations
492 * are performed immediately.
493 *
494 * @param pkt The packet with the requests
495 * @param blk The referenced block
496 * @param writebacks A list with packets for any performed writebacks
497 * @return Cycles for handling the request
498 */
499 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
500 PacketList &writebacks) = 0;
501
502 /**
503 * Performs the access specified by the request.
504 * @param pkt The request to perform.
505 * @return The number of ticks required for the access.
506 */
507 virtual Tick recvAtomic(PacketPtr pkt);
508
509 /**
510 * Snoop for the provided request in the cache and return the estimated
511 * time taken.
512 * @param pkt The memory request to snoop
513 * @return The number of ticks required for the snoop.
514 */
515 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
516
517 /**
518 * Performs the access specified by the request.
519 *
520 * @param pkt The request to perform.
521 * @param fromCpuSide from the CPU side port or the memory side port
522 */
523 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
524
525 /**
526 * Handle doing the Compare and Swap function for SPARC.
527 */
528 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
529
530 /**
531 * Return the next queue entry to service, either a pending miss
532 * from the MSHR queue, a buffered write from the write buffer, or
533 * something from the prefetcher. This function is responsible
534 * for prioritizing among those sources on the fly.
535 */
536 QueueEntry* getNextQueueEntry();
537
538 /**
539 * Insert writebacks into the write buffer
540 */
541 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
542
543 /**
544 * Send writebacks down the memory hierarchy in atomic mode
545 */
546 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
547
548 /**
549 * Create an appropriate downstream bus request packet.
550 *
551 * Creates a new packet with the request to be send to the memory
552 * below, or nullptr if the current request in cpu_pkt should just
553 * be forwarded on.
554 *
555 * @param cpu_pkt The miss packet that needs to be satisfied.
556 * @param blk The referenced block, can be nullptr.
557 * @param needs_writable Indicates that the block must be writable
558 * even if the request in cpu_pkt doesn't indicate that.
559 * @return A packet send to the memory below
560 */
561 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
562 bool needs_writable) const = 0;
563
564 /**
565 * Determine if clean lines should be written back or not. In
566 * cases where a downstream cache is mostly inclusive we likely
567 * want it to act as a victim cache also for lines that have not
568 * been modified. Hence, we cannot simply drop the line (or send a
569 * clean evict), but rather need to send the actual data.
570 */
571 const bool writebackClean;
572
573 /**
574 * Writebacks from the tempBlock, resulting on the response path
575 * in atomic mode, must happen after the call to recvAtomic has
576 * finished (for the right ordering of the packets). We therefore
577 * need to hold on to the packets, and have a method and an event
578 * to send them.
579 */
580 PacketPtr tempBlockWriteback;
581
582 /**
583 * Send the outstanding tempBlock writeback. To be called after
584 * recvAtomic finishes in cases where the block we filled is in
585 * fact the tempBlock, and now needs to be written back.
586 */
587 void writebackTempBlockAtomic() {
588 assert(tempBlockWriteback != nullptr);
589 PacketList writebacks{tempBlockWriteback};
590 doWritebacksAtomic(writebacks);
591 tempBlockWriteback = nullptr;
592 }
593
594 /**
595 * An event to writeback the tempBlock after recvAtomic
596 * finishes. To avoid other calls to recvAtomic getting in
597 * between, we create this event with a higher priority.
598 */
599 EventFunctionWrapper writebackTempBlockAtomicEvent;
600
601 /**
602 * Perform any necessary updates to the block and perform any data
603 * exchange between the packet and the block. The flags of the
604 * packet are also set accordingly.
605 *
606 * @param pkt Request packet from upstream that hit a block
607 * @param blk Cache block that the packet hit
608 * @param deferred_response Whether this request originally missed
609 * @param pending_downgrade Whether the writable flag is to be removed
610 */
611 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
612 bool deferred_response = false,
613 bool pending_downgrade = false);
614
615 /**
616 * Maintain the clusivity of this cache by potentially
617 * invalidating a block. This method works in conjunction with
618 * satisfyRequest, but is separate to allow us to handle all MSHR
619 * targets before potentially dropping a block.
620 *
621 * @param from_cache Whether we have dealt with a packet from a cache
622 * @param blk The block that should potentially be dropped
623 */
624 void maintainClusivity(bool from_cache, CacheBlk *blk);
625
626 /**
627 * Handle a fill operation caused by a received packet.
628 *
629 * Populates a cache block and handles all outstanding requests for the
630 * satisfied fill request. This version takes two memory requests. One
631 * contains the fill data, the other is an optional target to satisfy.
632 * Note that the reason we return a list of writebacks rather than
633 * inserting them directly in the write buffer is that this function
634 * is called by both atomic and timing-mode accesses, and in atomic
635 * mode we don't mess with the write buffer (we just perform the
636 * writebacks atomically once the original request is complete).
637 *
638 * @param pkt The memory request with the fill data.
639 * @param blk The cache block if it already exists.
640 * @param writebacks List for any writebacks that need to be performed.
641 * @param allocate Whether to allocate a block or use the temp block
642 * @return Pointer to the new cache block.
643 */
644 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
645 PacketList &writebacks, bool allocate);
646
647 /**
648 * Allocate a new block and perform any necessary writebacks
649 *
650 * Find a victim block and if necessary prepare writebacks for any
651 * existing data. May return nullptr if there are no replaceable
652 * blocks.
653 *
654 * @param addr Physical address of the new block
655 * @param is_secure Set if the block should be secure
656 * @param writebacks A list of writeback packets for the evicted blocks
657 * @return the allocated block
658 */
659 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
660 /**
661 * Evict a cache block.
662 *
663 * Performs a writeback if necesssary and invalidates the block
664 *
665 * @param blk Block to invalidate
666 * @return A packet with the writeback, can be nullptr
667 */
668 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
669
670 /**
671 * Evict a cache block.
672 *
673 * Performs a writeback if necesssary and invalidates the block
674 *
675 * @param blk Block to invalidate
676 * @param writebacks Return a list of packets with writebacks
677 */
678 virtual void evictBlock(CacheBlk *blk, PacketList &writebacks) = 0;
679
680 /**
681 * Invalidate a cache block.
682 *
683 * @param blk Block to invalidate
684 */
685 void invalidateBlock(CacheBlk *blk);
686
687 /**
688 * Create a writeback request for the given block.
689 *
690 * @param blk The block to writeback.
691 * @return The writeback request for the block.
692 */
693 PacketPtr writebackBlk(CacheBlk *blk);
694
695 /**
696 * Create a writeclean request for the given block.
697 *
698 * Creates a request that writes the block to the cache below
699 * without evicting the block from the current cache.
700 *
701 * @param blk The block to write clean.
702 * @param dest The destination of the write clean operation.
703 * @param id Use the given packet id for the write clean operation.
704 * @return The generated write clean packet.
705 */
706 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
707
708 /**
229 * Write back dirty blocks in the cache using functional accesses.
230 */
709 * Write back dirty blocks in the cache using functional accesses.
710 */
231 virtual void memWriteback() override = 0;
711 virtual void memWriteback() override;
712
232 /**
233 * Invalidates all blocks in the cache.
234 *
235 * @warn Dirty cache lines will not be written back to
236 * memory. Make sure to call functionalWriteback() first if you
237 * want the to write them to memory.
238 */
713 /**
714 * Invalidates all blocks in the cache.
715 *
716 * @warn Dirty cache lines will not be written back to
717 * memory. Make sure to call functionalWriteback() first if you
718 * want the to write them to memory.
719 */
239 virtual void memInvalidate() override = 0;
720 virtual void memInvalidate() override;
721
240 /**
241 * Determine if there are any dirty blocks in the cache.
242 *
722 /**
723 * Determine if there are any dirty blocks in the cache.
724 *
243 * \return true if at least one block is dirty, false otherwise.
725 * @return true if at least one block is dirty, false otherwise.
244 */
726 */
245 virtual bool isDirty() const = 0;
727 bool isDirty() const;
246
247 /**
248 * Determine if an address is in the ranges covered by this
249 * cache. This is useful to filter snoops.
250 *
251 * @param addr Address to check against
252 *
253 * @return If the address in question is in range
254 */
255 bool inRange(Addr addr) const;
256
728
729 /**
730 * Determine if an address is in the ranges covered by this
731 * cache. This is useful to filter snoops.
732 *
733 * @param addr Address to check against
734 *
735 * @return If the address in question is in range
736 */
737 bool inRange(Addr addr) const;
738
739 /**
740 * Find next request ready time from among possible sources.
741 */
742 Tick nextQueueReadyTime() const;
743
257 /** Block size of this cache */
258 const unsigned blkSize;
259
260 /**
261 * The latency of tag lookup of a cache. It occurs when there is
262 * an access to the cache.
263 */
264 const Cycles lookupLatency;
265
266 /**
267 * The latency of data access of a cache. It occurs when there is
268 * an access to the cache.
269 */
270 const Cycles dataLatency;
271
272 /**
273 * This is the forward latency of the cache. It occurs when there
274 * is a cache miss and a request is forwarded downstream, in
275 * particular an outbound miss.
276 */
277 const Cycles forwardLatency;
278
279 /** The latency to fill a cache block */
280 const Cycles fillLatency;
281
282 /**
283 * The latency of sending reponse to its upper level cache/core on
284 * a linefill. The responseLatency parameter captures this
285 * latency.
286 */
287 const Cycles responseLatency;
288
289 /** The number of targets for each MSHR. */
290 const int numTarget;
291
292 /** Do we forward snoops from mem side port through to cpu side port? */
293 bool forwardSnoops;
294
295 /**
744 /** Block size of this cache */
745 const unsigned blkSize;
746
747 /**
748 * The latency of tag lookup of a cache. It occurs when there is
749 * an access to the cache.
750 */
751 const Cycles lookupLatency;
752
753 /**
754 * The latency of data access of a cache. It occurs when there is
755 * an access to the cache.
756 */
757 const Cycles dataLatency;
758
759 /**
760 * This is the forward latency of the cache. It occurs when there
761 * is a cache miss and a request is forwarded downstream, in
762 * particular an outbound miss.
763 */
764 const Cycles forwardLatency;
765
766 /** The latency to fill a cache block */
767 const Cycles fillLatency;
768
769 /**
770 * The latency of sending reponse to its upper level cache/core on
771 * a linefill. The responseLatency parameter captures this
772 * latency.
773 */
774 const Cycles responseLatency;
775
776 /** The number of targets for each MSHR. */
777 const int numTarget;
778
779 /** Do we forward snoops from mem side port through to cpu side port? */
780 bool forwardSnoops;
781
782 /**
783 * Clusivity with respect to the upstream cache, determining if we
784 * fill into both this cache and the cache above on a miss. Note
785 * that we currently do not support strict clusivity policies.
786 */
787 const Enums::Clusivity clusivity;
788
789 /**
296 * Is this cache read only, for example the instruction cache, or
297 * table-walker cache. A cache that is read only should never see
298 * any writes, and should never get any dirty data (and hence
299 * never have to do any writebacks).
300 */
301 const bool isReadOnly;
302
303 /**
304 * Bit vector of the blocking reasons for the access path.
305 * @sa #BlockedCause
306 */
307 uint8_t blocked;
308
309 /** Increasing order number assigned to each incoming request. */
310 uint64_t order;
311
312 /** Stores time the cache blocked for statistics. */
313 Cycles blockedCycle;
314
315 /** Pointer to the MSHR that has no targets. */
316 MSHR *noTargetMSHR;
317
318 /** The number of misses to trigger an exit event. */
319 Counter missCount;
320
321 /**
322 * The address range to which the cache responds on the CPU side.
323 * Normally this is all possible memory addresses. */
324 const AddrRangeList addrRanges;
325
326 public:
327 /** System we are currently operating in. */
328 System *system;
329
330 // Statistics
331 /**
332 * @addtogroup CacheStatistics
333 * @{
334 */
335
336 /** Number of hits per thread for each type of command.
337 @sa Packet::Command */
338 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
339 /** Number of hits for demand accesses. */
340 Stats::Formula demandHits;
341 /** Number of hit for all accesses. */
342 Stats::Formula overallHits;
343
344 /** Number of misses per thread for each type of command.
345 @sa Packet::Command */
346 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
347 /** Number of misses for demand accesses. */
348 Stats::Formula demandMisses;
349 /** Number of misses for all accesses. */
350 Stats::Formula overallMisses;
351
352 /**
353 * Total number of cycles per thread/command spent waiting for a miss.
354 * Used to calculate the average miss latency.
355 */
356 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
357 /** Total number of cycles spent waiting for demand misses. */
358 Stats::Formula demandMissLatency;
359 /** Total number of cycles spent waiting for all misses. */
360 Stats::Formula overallMissLatency;
361
362 /** The number of accesses per command and thread. */
363 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
364 /** The number of demand accesses. */
365 Stats::Formula demandAccesses;
366 /** The number of overall accesses. */
367 Stats::Formula overallAccesses;
368
369 /** The miss rate per command and thread. */
370 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
371 /** The miss rate of all demand accesses. */
372 Stats::Formula demandMissRate;
373 /** The miss rate for all accesses. */
374 Stats::Formula overallMissRate;
375
376 /** The average miss latency per command and thread. */
377 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
378 /** The average miss latency for demand misses. */
379 Stats::Formula demandAvgMissLatency;
380 /** The average miss latency for all misses. */
381 Stats::Formula overallAvgMissLatency;
382
383 /** The total number of cycles blocked for each blocked cause. */
384 Stats::Vector blocked_cycles;
385 /** The number of times this cache blocked for each blocked cause. */
386 Stats::Vector blocked_causes;
387
388 /** The average number of cycles blocked for each blocked cause. */
389 Stats::Formula avg_blocked;
390
391 /** The number of times a HW-prefetched block is evicted w/o reference. */
392 Stats::Scalar unusedPrefetches;
393
394 /** Number of blocks written back per thread. */
395 Stats::Vector writebacks;
396
397 /** Number of misses that hit in the MSHRs per command and thread. */
398 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
399 /** Demand misses that hit in the MSHRs. */
400 Stats::Formula demandMshrHits;
401 /** Total number of misses that hit in the MSHRs. */
402 Stats::Formula overallMshrHits;
403
404 /** Number of misses that miss in the MSHRs, per command and thread. */
405 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
406 /** Demand misses that miss in the MSHRs. */
407 Stats::Formula demandMshrMisses;
408 /** Total number of misses that miss in the MSHRs. */
409 Stats::Formula overallMshrMisses;
410
411 /** Number of misses that miss in the MSHRs, per command and thread. */
412 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
413 /** Total number of misses that miss in the MSHRs. */
414 Stats::Formula overallMshrUncacheable;
415
416 /** Total cycle latency of each MSHR miss, per command and thread. */
417 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
418 /** Total cycle latency of demand MSHR misses. */
419 Stats::Formula demandMshrMissLatency;
420 /** Total cycle latency of overall MSHR misses. */
421 Stats::Formula overallMshrMissLatency;
422
423 /** Total cycle latency of each MSHR miss, per command and thread. */
424 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
425 /** Total cycle latency of overall MSHR misses. */
426 Stats::Formula overallMshrUncacheableLatency;
427
428#if 0
429 /** The total number of MSHR accesses per command and thread. */
430 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
431 /** The total number of demand MSHR accesses. */
432 Stats::Formula demandMshrAccesses;
433 /** The total number of MSHR accesses. */
434 Stats::Formula overallMshrAccesses;
435#endif
436
437 /** The miss rate in the MSHRs pre command and thread. */
438 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
439 /** The demand miss rate in the MSHRs. */
440 Stats::Formula demandMshrMissRate;
441 /** The overall miss rate in the MSHRs. */
442 Stats::Formula overallMshrMissRate;
443
444 /** The average latency of an MSHR miss, per command and thread. */
445 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
446 /** The average latency of a demand MSHR miss. */
447 Stats::Formula demandAvgMshrMissLatency;
448 /** The average overall latency of an MSHR miss. */
449 Stats::Formula overallAvgMshrMissLatency;
450
451 /** The average latency of an MSHR miss, per command and thread. */
452 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
453 /** The average overall latency of an MSHR miss. */
454 Stats::Formula overallAvgMshrUncacheableLatency;
455
456 /** Number of replacements of valid blocks. */
457 Stats::Scalar replacements;
458
459 /**
460 * @}
461 */
462
463 /**
464 * Register stats for this object.
465 */
790 * Is this cache read only, for example the instruction cache, or
791 * table-walker cache. A cache that is read only should never see
792 * any writes, and should never get any dirty data (and hence
793 * never have to do any writebacks).
794 */
795 const bool isReadOnly;
796
797 /**
798 * Bit vector of the blocking reasons for the access path.
799 * @sa #BlockedCause
800 */
801 uint8_t blocked;
802
803 /** Increasing order number assigned to each incoming request. */
804 uint64_t order;
805
806 /** Stores time the cache blocked for statistics. */
807 Cycles blockedCycle;
808
809 /** Pointer to the MSHR that has no targets. */
810 MSHR *noTargetMSHR;
811
812 /** The number of misses to trigger an exit event. */
813 Counter missCount;
814
815 /**
816 * The address range to which the cache responds on the CPU side.
817 * Normally this is all possible memory addresses. */
818 const AddrRangeList addrRanges;
819
820 public:
821 /** System we are currently operating in. */
822 System *system;
823
824 // Statistics
825 /**
826 * @addtogroup CacheStatistics
827 * @{
828 */
829
830 /** Number of hits per thread for each type of command.
831 @sa Packet::Command */
832 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
833 /** Number of hits for demand accesses. */
834 Stats::Formula demandHits;
835 /** Number of hit for all accesses. */
836 Stats::Formula overallHits;
837
838 /** Number of misses per thread for each type of command.
839 @sa Packet::Command */
840 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
841 /** Number of misses for demand accesses. */
842 Stats::Formula demandMisses;
843 /** Number of misses for all accesses. */
844 Stats::Formula overallMisses;
845
846 /**
847 * Total number of cycles per thread/command spent waiting for a miss.
848 * Used to calculate the average miss latency.
849 */
850 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
851 /** Total number of cycles spent waiting for demand misses. */
852 Stats::Formula demandMissLatency;
853 /** Total number of cycles spent waiting for all misses. */
854 Stats::Formula overallMissLatency;
855
856 /** The number of accesses per command and thread. */
857 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
858 /** The number of demand accesses. */
859 Stats::Formula demandAccesses;
860 /** The number of overall accesses. */
861 Stats::Formula overallAccesses;
862
863 /** The miss rate per command and thread. */
864 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
865 /** The miss rate of all demand accesses. */
866 Stats::Formula demandMissRate;
867 /** The miss rate for all accesses. */
868 Stats::Formula overallMissRate;
869
870 /** The average miss latency per command and thread. */
871 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
872 /** The average miss latency for demand misses. */
873 Stats::Formula demandAvgMissLatency;
874 /** The average miss latency for all misses. */
875 Stats::Formula overallAvgMissLatency;
876
877 /** The total number of cycles blocked for each blocked cause. */
878 Stats::Vector blocked_cycles;
879 /** The number of times this cache blocked for each blocked cause. */
880 Stats::Vector blocked_causes;
881
882 /** The average number of cycles blocked for each blocked cause. */
883 Stats::Formula avg_blocked;
884
885 /** The number of times a HW-prefetched block is evicted w/o reference. */
886 Stats::Scalar unusedPrefetches;
887
888 /** Number of blocks written back per thread. */
889 Stats::Vector writebacks;
890
891 /** Number of misses that hit in the MSHRs per command and thread. */
892 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
893 /** Demand misses that hit in the MSHRs. */
894 Stats::Formula demandMshrHits;
895 /** Total number of misses that hit in the MSHRs. */
896 Stats::Formula overallMshrHits;
897
898 /** Number of misses that miss in the MSHRs, per command and thread. */
899 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
900 /** Demand misses that miss in the MSHRs. */
901 Stats::Formula demandMshrMisses;
902 /** Total number of misses that miss in the MSHRs. */
903 Stats::Formula overallMshrMisses;
904
905 /** Number of misses that miss in the MSHRs, per command and thread. */
906 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
907 /** Total number of misses that miss in the MSHRs. */
908 Stats::Formula overallMshrUncacheable;
909
910 /** Total cycle latency of each MSHR miss, per command and thread. */
911 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
912 /** Total cycle latency of demand MSHR misses. */
913 Stats::Formula demandMshrMissLatency;
914 /** Total cycle latency of overall MSHR misses. */
915 Stats::Formula overallMshrMissLatency;
916
917 /** Total cycle latency of each MSHR miss, per command and thread. */
918 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
919 /** Total cycle latency of overall MSHR misses. */
920 Stats::Formula overallMshrUncacheableLatency;
921
922#if 0
923 /** The total number of MSHR accesses per command and thread. */
924 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
925 /** The total number of demand MSHR accesses. */
926 Stats::Formula demandMshrAccesses;
927 /** The total number of MSHR accesses. */
928 Stats::Formula overallMshrAccesses;
929#endif
930
931 /** The miss rate in the MSHRs pre command and thread. */
932 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
933 /** The demand miss rate in the MSHRs. */
934 Stats::Formula demandMshrMissRate;
935 /** The overall miss rate in the MSHRs. */
936 Stats::Formula overallMshrMissRate;
937
938 /** The average latency of an MSHR miss, per command and thread. */
939 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
940 /** The average latency of a demand MSHR miss. */
941 Stats::Formula demandAvgMshrMissLatency;
942 /** The average overall latency of an MSHR miss. */
943 Stats::Formula overallAvgMshrMissLatency;
944
945 /** The average latency of an MSHR miss, per command and thread. */
946 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
947 /** The average overall latency of an MSHR miss. */
948 Stats::Formula overallAvgMshrUncacheableLatency;
949
950 /** Number of replacements of valid blocks. */
951 Stats::Scalar replacements;
952
953 /**
954 * @}
955 */
956
957 /**
958 * Register stats for this object.
959 */
466 virtual void regStats() override;
960 void regStats() override;
467
468 public:
469 BaseCache(const BaseCacheParams *p, unsigned blk_size);
961
962 public:
963 BaseCache(const BaseCacheParams *p, unsigned blk_size);
470 ~BaseCache() {}
964 ~BaseCache();
471
965
472 virtual void init() override;
966 void init() override;
473
967
474 virtual BaseMasterPort &getMasterPort(const std::string &if_name,
475 PortID idx = InvalidPortID) override;
476 virtual BaseSlavePort &getSlavePort(const std::string &if_name,
477 PortID idx = InvalidPortID) override;
968 BaseMasterPort &getMasterPort(const std::string &if_name,
969 PortID idx = InvalidPortID) override;
970 BaseSlavePort &getSlavePort(const std::string &if_name,
971 PortID idx = InvalidPortID) override;
478
479 /**
480 * Query block size of a cache.
481 * @return The block size
482 */
483 unsigned
484 getBlockSize() const
485 {
486 return blkSize;
487 }
488
489 const AddrRangeList &getAddrRanges() const { return addrRanges; }
490
491 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
492 {
493 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
494 pkt, time, order++,
495 allocOnFill(pkt->cmd));
496
497 if (mshrQueue.isFull()) {
498 setBlocked((BlockedCause)MSHRQueue_MSHRs);
499 }
500
501 if (sched_send) {
502 // schedule the send
503 schedMemSideSendEvent(time);
504 }
505
506 return mshr;
507 }
508
509 void allocateWriteBuffer(PacketPtr pkt, Tick time)
510 {
511 // should only see writes or clean evicts here
512 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
513
514 Addr blk_addr = pkt->getBlockAddr(blkSize);
515
516 WriteQueueEntry *wq_entry =
517 writeBuffer.findMatch(blk_addr, pkt->isSecure());
518 if (wq_entry && !wq_entry->inService) {
519 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
520 }
521
522 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
523
524 if (writeBuffer.isFull()) {
525 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
526 }
527
528 // schedule the send
529 schedMemSideSendEvent(time);
530 }
531
532 /**
533 * Returns true if the cache is blocked for accesses.
534 */
535 bool isBlocked() const
536 {
537 return blocked != 0;
538 }
539
540 /**
541 * Marks the access path of the cache as blocked for the given cause. This
542 * also sets the blocked flag in the slave interface.
543 * @param cause The reason for the cache blocking.
544 */
545 void setBlocked(BlockedCause cause)
546 {
547 uint8_t flag = 1 << cause;
548 if (blocked == 0) {
549 blocked_causes[cause]++;
550 blockedCycle = curCycle();
972
973 /**
974 * Query block size of a cache.
975 * @return The block size
976 */
977 unsigned
978 getBlockSize() const
979 {
980 return blkSize;
981 }
982
983 const AddrRangeList &getAddrRanges() const { return addrRanges; }
984
985 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
986 {
987 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
988 pkt, time, order++,
989 allocOnFill(pkt->cmd));
990
991 if (mshrQueue.isFull()) {
992 setBlocked((BlockedCause)MSHRQueue_MSHRs);
993 }
994
995 if (sched_send) {
996 // schedule the send
997 schedMemSideSendEvent(time);
998 }
999
1000 return mshr;
1001 }
1002
1003 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1004 {
1005 // should only see writes or clean evicts here
1006 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1007
1008 Addr blk_addr = pkt->getBlockAddr(blkSize);
1009
1010 WriteQueueEntry *wq_entry =
1011 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1012 if (wq_entry && !wq_entry->inService) {
1013 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1014 }
1015
1016 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1017
1018 if (writeBuffer.isFull()) {
1019 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1020 }
1021
1022 // schedule the send
1023 schedMemSideSendEvent(time);
1024 }
1025
1026 /**
1027 * Returns true if the cache is blocked for accesses.
1028 */
1029 bool isBlocked() const
1030 {
1031 return blocked != 0;
1032 }
1033
1034 /**
1035 * Marks the access path of the cache as blocked for the given cause. This
1036 * also sets the blocked flag in the slave interface.
1037 * @param cause The reason for the cache blocking.
1038 */
1039 void setBlocked(BlockedCause cause)
1040 {
1041 uint8_t flag = 1 << cause;
1042 if (blocked == 0) {
1043 blocked_causes[cause]++;
1044 blockedCycle = curCycle();
551 cpuSidePort->setBlocked();
1045 cpuSidePort.setBlocked();
552 }
553 blocked |= flag;
554 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
555 }
556
557 /**
558 * Marks the cache as unblocked for the given cause. This also clears the
559 * blocked flags in the appropriate interfaces.
560 * @param cause The newly unblocked cause.
561 * @warning Calling this function can cause a blocked request on the bus to
562 * access the cache. The cache must be in a state to handle that request.
563 */
564 void clearBlocked(BlockedCause cause)
565 {
566 uint8_t flag = 1 << cause;
567 blocked &= ~flag;
568 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
569 if (blocked == 0) {
570 blocked_cycles[cause] += curCycle() - blockedCycle;
1046 }
1047 blocked |= flag;
1048 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1049 }
1050
1051 /**
1052 * Marks the cache as unblocked for the given cause. This also clears the
1053 * blocked flags in the appropriate interfaces.
1054 * @param cause The newly unblocked cause.
1055 * @warning Calling this function can cause a blocked request on the bus to
1056 * access the cache. The cache must be in a state to handle that request.
1057 */
1058 void clearBlocked(BlockedCause cause)
1059 {
1060 uint8_t flag = 1 << cause;
1061 blocked &= ~flag;
1062 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1063 if (blocked == 0) {
1064 blocked_cycles[cause] += curCycle() - blockedCycle;
571 cpuSidePort->clearBlocked();
1065 cpuSidePort.clearBlocked();
572 }
573 }
574
575 /**
576 * Schedule a send event for the memory-side port. If already
577 * scheduled, this may reschedule the event at an earlier
578 * time. When the specified time is reached, the port is free to
579 * send either a response, a request, or a prefetch request.
580 *
581 * @param time The time when to attempt sending a packet.
582 */
583 void schedMemSideSendEvent(Tick time)
584 {
1066 }
1067 }
1068
1069 /**
1070 * Schedule a send event for the memory-side port. If already
1071 * scheduled, this may reschedule the event at an earlier
1072 * time. When the specified time is reached, the port is free to
1073 * send either a response, a request, or a prefetch request.
1074 *
1075 * @param time The time when to attempt sending a packet.
1076 */
1077 void schedMemSideSendEvent(Tick time)
1078 {
585 memSidePort->schedSendEvent(time);
1079 memSidePort.schedSendEvent(time);
586 }
587
1080 }
1081
588 virtual bool inCache(Addr addr, bool is_secure) const = 0;
1082 bool inCache(Addr addr, bool is_secure) const {
1083 return tags->findBlock(addr, is_secure);
1084 }
589
1085
590 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0;
1086 bool inMissQueue(Addr addr, bool is_secure) const {
1087 return mshrQueue.findMatch(addr, is_secure);
1088 }
591
592 void incMissCount(PacketPtr pkt)
593 {
594 assert(pkt->req->masterId() < system->maxMasters());
595 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
596 pkt->req->incAccessDepth();
597 if (missCount) {
598 --missCount;
599 if (missCount == 0)
600 exitSimLoop("A cache reached the maximum miss count");
601 }
602 }
603 void incHitCount(PacketPtr pkt)
604 {
605 assert(pkt->req->masterId() < system->maxMasters());
606 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
607
608 }
609
1089
1090 void incMissCount(PacketPtr pkt)
1091 {
1092 assert(pkt->req->masterId() < system->maxMasters());
1093 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1094 pkt->req->incAccessDepth();
1095 if (missCount) {
1096 --missCount;
1097 if (missCount == 0)
1098 exitSimLoop("A cache reached the maximum miss count");
1099 }
1100 }
1101 void incHitCount(PacketPtr pkt)
1102 {
1103 assert(pkt->req->masterId() < system->maxMasters());
1104 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1105
1106 }
1107
1108 /**
1109 * Cache block visitor that writes back dirty cache blocks using
1110 * functional writes.
1111 *
1112 * @return Always returns true.
1113 */
1114 bool writebackVisitor(CacheBlk &blk);
1115
1116 /**
1117 * Cache block visitor that invalidates all blocks in the cache.
1118 *
1119 * @warn Dirty cache lines will not be written back to memory.
1120 *
1121 * @return Always returns true.
1122 */
1123 bool invalidateVisitor(CacheBlk &blk);
1124
1125 /**
1126 * Take an MSHR, turn it into a suitable downstream packet, and
1127 * send it out. This construct allows a queue entry to choose a suitable
1128 * approach based on its type.
1129 *
1130 * @param mshr The MSHR to turn into a packet and send
1131 * @return True if the port is waiting for a retry
1132 */
1133 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1134
1135 /**
1136 * Similar to sendMSHR, but for a write-queue entry
1137 * instead. Create the packet, and send it, and if successful also
1138 * mark the entry in service.
1139 *
1140 * @param wq_entry The write-queue entry to turn into a packet and send
1141 * @return True if the port is waiting for a retry
1142 */
1143 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1144
1145 /**
1146 * Serialize the state of the caches
1147 *
1148 * We currently don't support checkpointing cache state, so this panics.
1149 */
1150 void serialize(CheckpointOut &cp) const override;
1151 void unserialize(CheckpointIn &cp) override;
1152
610};
611
1153};
1154
1155/**
1156 * Wrap a method and present it as a cache block visitor.
1157 *
1158 * For example the forEachBlk method in the tag arrays expects a
1159 * callable object/function as their parameter. This class wraps a
1160 * method in an object and presents callable object that adheres to
1161 * the cache block visitor protocol.
1162 */
1163class CacheBlkVisitorWrapper : public CacheBlkVisitor
1164{
1165 public:
1166 typedef bool (BaseCache::*VisitorPtr)(CacheBlk &blk);
1167
1168 CacheBlkVisitorWrapper(BaseCache &_cache, VisitorPtr _visitor)
1169 : cache(_cache), visitor(_visitor) {}
1170
1171 bool operator()(CacheBlk &blk) override {
1172 return (cache.*visitor)(blk);
1173 }
1174
1175 private:
1176 BaseCache &cache;
1177 VisitorPtr visitor;
1178};
1179
1180/**
1181 * Cache block visitor that determines if there are dirty blocks in a
1182 * cache.
1183 *
1184 * Use with the forEachBlk method in the tag array to determine if the
1185 * array contains dirty blocks.
1186 */
1187class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
1188{
1189 public:
1190 CacheBlkIsDirtyVisitor()
1191 : _isDirty(false) {}
1192
1193 bool operator()(CacheBlk &blk) override {
1194 if (blk.isDirty()) {
1195 _isDirty = true;
1196 return false;
1197 } else {
1198 return true;
1199 }
1200 }
1201
1202 /**
1203 * Does the array contain a dirty line?
1204 *
1205 * @return true if yes, false otherwise.
1206 */
1207 bool isDirty() const { return _isDirty; };
1208
1209 private:
1210 bool _isDirty;
1211};
1212
612#endif //__MEM_CACHE_BASE_HH__
1213#endif //__MEM_CACHE_BASE_HH__