base.hh (13945:a573bed35a8b) base.hh (13947:4cf8087cab09)
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/compressors/base.hh"
68#include "mem/cache/mshr_queue.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue.hh"
71#include "mem/cache/write_queue_entry.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/clocked_object.hh"
78#include "sim/eventq.hh"
79#include "sim/probe/probe.hh"
80#include "sim/serialize.hh"
81#include "sim/sim_exit.hh"
82#include "sim/system.hh"
83
84class BaseMasterPort;
85class BasePrefetcher;
86class BaseSlavePort;
87class MSHR;
88class MasterPort;
89class QueueEntry;
90struct BaseCacheParams;
91
92/**
93 * A basic cache interface. Implements some common functions for speed.
94 */
95class BaseCache : public ClockedObject
96{
97 protected:
98 /**
99 * Indexes to enumerate the MSHR queues.
100 */
101 enum MSHRQueueIndex {
102 MSHRQueue_MSHRs,
103 MSHRQueue_WriteBuffer
104 };
105
106 public:
107 /**
108 * Reasons for caches to be blocked.
109 */
110 enum BlockedCause {
111 Blocked_NoMSHRs = MSHRQueue_MSHRs,
112 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
113 Blocked_NoTargets,
114 NUM_BLOCKED_CAUSES
115 };
116
117 protected:
118
119 /**
120 * A cache master port is used for the memory-side port of the
121 * cache, and in addition to the basic timing port that only sends
122 * response packets through a transmit list, it also offers the
123 * ability to schedule and send request packets (requests &
124 * writebacks). The send event is scheduled through schedSendEvent,
125 * and the sendDeferredPacket of the timing port is modified to
126 * consider both the transmit list and the requests from the MSHR.
127 */
128 class CacheMasterPort : public QueuedMasterPort
129 {
130
131 public:
132
133 /**
134 * Schedule a send of a request packet (from the MSHR). Note
135 * that we could already have a retry outstanding.
136 */
137 void schedSendEvent(Tick time)
138 {
139 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
140 reqQueue.schedSendEvent(time);
141 }
142
143 protected:
144
145 CacheMasterPort(const std::string &_name, BaseCache *_cache,
146 ReqPacketQueue &_reqQueue,
147 SnoopRespPacketQueue &_snoopRespQueue) :
148 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
149 { }
150
151 /**
152 * Memory-side port always snoops.
153 *
154 * @return always true
155 */
156 virtual bool isSnooping() const { return true; }
157 };
158
159 /**
160 * Override the default behaviour of sendDeferredPacket to enable
161 * the memory-side cache port to also send requests based on the
162 * current MSHR status. This queue has a pointer to our specific
163 * cache implementation and is used by the MemSidePort.
164 */
165 class CacheReqPacketQueue : public ReqPacketQueue
166 {
167
168 protected:
169
170 BaseCache &cache;
171 SnoopRespPacketQueue &snoopRespQueue;
172
173 public:
174
175 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
176 SnoopRespPacketQueue &snoop_resp_queue,
177 const std::string &label) :
178 ReqPacketQueue(cache, port, label), cache(cache),
179 snoopRespQueue(snoop_resp_queue) { }
180
181 /**
182 * Override the normal sendDeferredPacket and do not only
183 * consider the transmit list (used for responses), but also
184 * requests.
185 */
186 virtual void sendDeferredPacket();
187
188 /**
189 * Check if there is a conflicting snoop response about to be
190 * send out, and if so simply stall any requests, and schedule
191 * a send event at the same time as the next snoop response is
192 * being sent out.
193 *
194 * @param pkt The packet to check for conflicts against.
195 */
196 bool checkConflictingSnoop(const PacketPtr pkt)
197 {
198 if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
199 DPRINTF(CachePort, "Waiting for snoop response to be "
200 "sent\n");
201 Tick when = snoopRespQueue.deferredPacketReadyTime();
202 schedSendEvent(when);
203 return true;
204 }
205 return false;
206 }
207 };
208
209
210 /**
211 * The memory-side port extends the base cache master port with
212 * access functions for functional, atomic and timing snoops.
213 */
214 class MemSidePort : public CacheMasterPort
215 {
216 private:
217
218 /** The cache-specific queue. */
219 CacheReqPacketQueue _reqQueue;
220
221 SnoopRespPacketQueue _snoopRespQueue;
222
223 // a pointer to our specific cache implementation
224 BaseCache *cache;
225
226 protected:
227
228 virtual void recvTimingSnoopReq(PacketPtr pkt);
229
230 virtual bool recvTimingResp(PacketPtr pkt);
231
232 virtual Tick recvAtomicSnoop(PacketPtr pkt);
233
234 virtual void recvFunctionalSnoop(PacketPtr pkt);
235
236 public:
237
238 MemSidePort(const std::string &_name, BaseCache *_cache,
239 const std::string &_label);
240 };
241
242 /**
243 * A cache slave port is used for the CPU-side port of the cache,
244 * and it is basically a simple timing port that uses a transmit
245 * list for responses to the CPU (or connected master). In
246 * addition, it has the functionality to block the port for
247 * incoming requests. If blocked, the port will issue a retry once
248 * unblocked.
249 */
250 class CacheSlavePort : public QueuedSlavePort
251 {
252
253 public:
254
255 /** Do not accept any new requests. */
256 void setBlocked();
257
258 /** Return to normal operation and accept new requests. */
259 void clearBlocked();
260
261 bool isBlocked() const { return blocked; }
262
263 protected:
264
265 CacheSlavePort(const std::string &_name, BaseCache *_cache,
266 const std::string &_label);
267
268 /** A normal packet queue used to store responses. */
269 RespPacketQueue queue;
270
271 bool blocked;
272
273 bool mustSendRetry;
274
275 private:
276
277 void processSendRetry();
278
279 EventFunctionWrapper sendRetryEvent;
280
281 };
282
283 /**
284 * The CPU-side port extends the base cache slave port with access
285 * functions for functional, atomic and timing requests.
286 */
287 class CpuSidePort : public CacheSlavePort
288 {
289 private:
290
291 // a pointer to our specific cache implementation
292 BaseCache *cache;
293
294 protected:
295 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
296
297 virtual bool tryTiming(PacketPtr pkt) override;
298
299 virtual bool recvTimingReq(PacketPtr pkt) override;
300
301 virtual Tick recvAtomic(PacketPtr pkt) override;
302
303 virtual void recvFunctional(PacketPtr pkt) override;
304
305 virtual AddrRangeList getAddrRanges() const override;
306
307 public:
308
309 CpuSidePort(const std::string &_name, BaseCache *_cache,
310 const std::string &_label);
311
312 };
313
314 CpuSidePort cpuSidePort;
315 MemSidePort memSidePort;
316
317 protected:
318
319 /** Miss status registers */
320 MSHRQueue mshrQueue;
321
322 /** Write/writeback buffer */
323 WriteQueue writeBuffer;
324
325 /** Tag and data Storage */
326 BaseTags *tags;
327
328 /** Compression method being used. */
329 BaseCacheCompressor* compressor;
330
331 /** Prefetcher */
332 BasePrefetcher *prefetcher;
333
334 /** To probe when a cache hit occurs */
335 ProbePointArg<PacketPtr> *ppHit;
336
337 /** To probe when a cache miss occurs */
338 ProbePointArg<PacketPtr> *ppMiss;
339
340 /** To probe when a cache fill occurs */
341 ProbePointArg<PacketPtr> *ppFill;
342
343 /**
344 * The writeAllocator drive optimizations for streaming writes.
345 * It first determines whether a WriteReq MSHR should be delayed,
346 * thus ensuring that we wait longer in cases when we are write
347 * coalescing and allowing all the bytes of the line to be written
348 * before the MSHR packet is sent downstream. This works in unison
349 * with the tracking in the MSHR to check if the entire line is
350 * written. The write mode also affects the behaviour on filling
351 * any whole-line writes. Normally the cache allocates the line
352 * when receiving the InvalidateResp, but after seeing enough
353 * consecutive lines we switch to using the tempBlock, and thus
354 * end up not allocating the line, and instead turning the
355 * whole-line write into a writeback straight away.
356 */
357 WriteAllocator * const writeAllocator;
358
359 /**
360 * Temporary cache block for occasional transitory use. We use
361 * the tempBlock to fill when allocation fails (e.g., when there
362 * is an outstanding request that accesses the victim block) or
363 * when we want to avoid allocation (e.g., exclusive caches)
364 */
365 TempCacheBlk *tempBlock;
366
367 /**
368 * Upstream caches need this packet until true is returned, so
369 * hold it for deletion until a subsequent call
370 */
371 std::unique_ptr<Packet> pendingDelete;
372
373 /**
374 * Mark a request as in service (sent downstream in the memory
375 * system), effectively making this MSHR the ordering point.
376 */
377 void markInService(MSHR *mshr, bool pending_modified_resp)
378 {
379 bool wasFull = mshrQueue.isFull();
380 mshrQueue.markInService(mshr, pending_modified_resp);
381
382 if (wasFull && !mshrQueue.isFull()) {
383 clearBlocked(Blocked_NoMSHRs);
384 }
385 }
386
387 void markInService(WriteQueueEntry *entry)
388 {
389 bool wasFull = writeBuffer.isFull();
390 writeBuffer.markInService(entry);
391
392 if (wasFull && !writeBuffer.isFull()) {
393 clearBlocked(Blocked_NoWBBuffers);
394 }
395 }
396
397 /**
398 * Determine whether we should allocate on a fill or not. If this
399 * cache is mostly inclusive with regards to the upstream cache(s)
400 * we always allocate (for any non-forwarded and cacheable
401 * requests). In the case of a mostly exclusive cache, we allocate
402 * on fill if the packet did not come from a cache, thus if we:
403 * are dealing with a whole-line write (the latter behaves much
404 * like a writeback), the original target packet came from a
405 * non-caching source, or if we are performing a prefetch or LLSC.
406 *
407 * @param cmd Command of the incoming requesting packet
408 * @return Whether we should allocate on the fill
409 */
410 inline bool allocOnFill(MemCmd cmd) const
411 {
412 return clusivity == Enums::mostly_incl ||
413 cmd == MemCmd::WriteLineReq ||
414 cmd == MemCmd::ReadReq ||
415 cmd == MemCmd::WriteReq ||
416 cmd.isPrefetch() ||
417 cmd.isLLSC();
418 }
419
420 /**
421 * Regenerate block address using tags.
422 * Block address regeneration depends on whether we're using a temporary
423 * block or not.
424 *
425 * @param blk The block to regenerate address.
426 * @return The block's address.
427 */
428 Addr regenerateBlkAddr(CacheBlk* blk);
429
430 /**
431 * Calculate latency of accesses that only touch the tag array.
432 * @sa calculateAccessLatency
433 *
434 * @param delay The delay until the packet's metadata is present.
435 * @param lookup_lat Latency of the respective tag lookup.
436 * @return The number of ticks that pass due to a tag-only access.
437 */
438 Cycles calculateTagOnlyLatency(const uint32_t delay,
439 const Cycles lookup_lat) const;
440 /**
441 * Calculate access latency in ticks given a tag lookup latency, and
442 * whether access was a hit or miss.
443 *
444 * @param blk The cache block that was accessed.
445 * @param delay The delay until the packet's metadata is present.
446 * @param lookup_lat Latency of the respective tag lookup.
447 * @return The number of ticks that pass due to a block access.
448 */
449 Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
450 const Cycles lookup_lat) const;
451
452 /**
453 * Does all the processing necessary to perform the provided request.
454 * @param pkt The memory request to perform.
455 * @param blk The cache block to be updated.
456 * @param lat The latency of the access.
457 * @param writebacks List for any writebacks that need to be performed.
458 * @return Boolean indicating whether the request was satisfied.
459 */
460 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
461 PacketList &writebacks);
462
463 /*
464 * Handle a timing request that hit in the cache
465 *
466 * @param ptk The request packet
467 * @param blk The referenced block
468 * @param request_time The tick at which the block lookup is compete
469 */
470 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
471 Tick request_time);
472
473 /*
474 * Handle a timing request that missed in the cache
475 *
476 * Implementation specific handling for different cache
477 * implementations
478 *
479 * @param ptk The request packet
480 * @param blk The referenced block
481 * @param forward_time The tick at which we can process dependent requests
482 * @param request_time The tick at which the block lookup is compete
483 */
484 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
485 Tick forward_time,
486 Tick request_time) = 0;
487
488 /*
489 * Handle a timing request that missed in the cache
490 *
491 * Common functionality across different cache implementations
492 *
493 * @param ptk The request packet
494 * @param blk The referenced block
495 * @param mshr Any existing mshr for the referenced cache block
496 * @param forward_time The tick at which we can process dependent requests
497 * @param request_time The tick at which the block lookup is compete
498 */
499 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
500 Tick forward_time, Tick request_time);
501
502 /**
503 * Performs the access specified by the request.
504 * @param pkt The request to perform.
505 */
506 virtual void recvTimingReq(PacketPtr pkt);
507
508 /**
509 * Handling the special case of uncacheable write responses to
510 * make recvTimingResp less cluttered.
511 */
512 void handleUncacheableWriteResp(PacketPtr pkt);
513
514 /**
515 * Service non-deferred MSHR targets using the received response
516 *
517 * Iterates through the list of targets that can be serviced with
518 * the current response.
519 *
520 * @param mshr The MSHR that corresponds to the reponse
521 * @param pkt The response packet
522 * @param blk The reference block
523 */
524 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
525 CacheBlk *blk) = 0;
526
527 /**
528 * Handles a response (cache line fill/write ack) from the bus.
529 * @param pkt The response packet
530 */
531 virtual void recvTimingResp(PacketPtr pkt);
532
533 /**
534 * Snoops bus transactions to maintain coherence.
535 * @param pkt The current bus transaction.
536 */
537 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
538
539 /**
540 * Handle a snoop response.
541 * @param pkt Snoop response packet
542 */
543 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
544
545 /**
546 * Handle a request in atomic mode that missed in this cache
547 *
548 * Creates a downstream request, sends it to the memory below and
549 * handles the response. As we are in atomic mode all operations
550 * are performed immediately.
551 *
552 * @param pkt The packet with the requests
553 * @param blk The referenced block
554 * @param writebacks A list with packets for any performed writebacks
555 * @return Cycles for handling the request
556 */
557 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
558 PacketList &writebacks) = 0;
559
560 /**
561 * Performs the access specified by the request.
562 * @param pkt The request to perform.
563 * @return The number of ticks required for the access.
564 */
565 virtual Tick recvAtomic(PacketPtr pkt);
566
567 /**
568 * Snoop for the provided request in the cache and return the estimated
569 * time taken.
570 * @param pkt The memory request to snoop
571 * @return The number of ticks required for the snoop.
572 */
573 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
574
575 /**
576 * Performs the access specified by the request.
577 *
578 * @param pkt The request to perform.
579 * @param fromCpuSide from the CPU side port or the memory side port
580 */
581 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
582
583 /**
584 * Handle doing the Compare and Swap function for SPARC.
585 */
586 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
587
588 /**
589 * Return the next queue entry to service, either a pending miss
590 * from the MSHR queue, a buffered write from the write buffer, or
591 * something from the prefetcher. This function is responsible
592 * for prioritizing among those sources on the fly.
593 */
594 QueueEntry* getNextQueueEntry();
595
596 /**
597 * Insert writebacks into the write buffer
598 */
599 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
600
601 /**
602 * Send writebacks down the memory hierarchy in atomic mode
603 */
604 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
605
606 /**
607 * Create an appropriate downstream bus request packet.
608 *
609 * Creates a new packet with the request to be send to the memory
610 * below, or nullptr if the current request in cpu_pkt should just
611 * be forwarded on.
612 *
613 * @param cpu_pkt The miss packet that needs to be satisfied.
614 * @param blk The referenced block, can be nullptr.
615 * @param needs_writable Indicates that the block must be writable
616 * even if the request in cpu_pkt doesn't indicate that.
617 * @param is_whole_line_write True if there are writes for the
618 * whole line
619 * @return A packet send to the memory below
620 */
621 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
622 bool needs_writable,
623 bool is_whole_line_write) const = 0;
624
625 /**
626 * Determine if clean lines should be written back or not. In
627 * cases where a downstream cache is mostly inclusive we likely
628 * want it to act as a victim cache also for lines that have not
629 * been modified. Hence, we cannot simply drop the line (or send a
630 * clean evict), but rather need to send the actual data.
631 */
632 const bool writebackClean;
633
634 /**
635 * Writebacks from the tempBlock, resulting on the response path
636 * in atomic mode, must happen after the call to recvAtomic has
637 * finished (for the right ordering of the packets). We therefore
638 * need to hold on to the packets, and have a method and an event
639 * to send them.
640 */
641 PacketPtr tempBlockWriteback;
642
643 /**
644 * Send the outstanding tempBlock writeback. To be called after
645 * recvAtomic finishes in cases where the block we filled is in
646 * fact the tempBlock, and now needs to be written back.
647 */
648 void writebackTempBlockAtomic() {
649 assert(tempBlockWriteback != nullptr);
650 PacketList writebacks{tempBlockWriteback};
651 doWritebacksAtomic(writebacks);
652 tempBlockWriteback = nullptr;
653 }
654
655 /**
656 * An event to writeback the tempBlock after recvAtomic
657 * finishes. To avoid other calls to recvAtomic getting in
658 * between, we create this event with a higher priority.
659 */
660 EventFunctionWrapper writebackTempBlockAtomicEvent;
661
662 /**
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/compressors/base.hh"
68#include "mem/cache/mshr_queue.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue.hh"
71#include "mem/cache/write_queue_entry.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/clocked_object.hh"
78#include "sim/eventq.hh"
79#include "sim/probe/probe.hh"
80#include "sim/serialize.hh"
81#include "sim/sim_exit.hh"
82#include "sim/system.hh"
83
84class BaseMasterPort;
85class BasePrefetcher;
86class BaseSlavePort;
87class MSHR;
88class MasterPort;
89class QueueEntry;
90struct BaseCacheParams;
91
92/**
93 * A basic cache interface. Implements some common functions for speed.
94 */
95class BaseCache : public ClockedObject
96{
97 protected:
98 /**
99 * Indexes to enumerate the MSHR queues.
100 */
101 enum MSHRQueueIndex {
102 MSHRQueue_MSHRs,
103 MSHRQueue_WriteBuffer
104 };
105
106 public:
107 /**
108 * Reasons for caches to be blocked.
109 */
110 enum BlockedCause {
111 Blocked_NoMSHRs = MSHRQueue_MSHRs,
112 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
113 Blocked_NoTargets,
114 NUM_BLOCKED_CAUSES
115 };
116
117 protected:
118
119 /**
120 * A cache master port is used for the memory-side port of the
121 * cache, and in addition to the basic timing port that only sends
122 * response packets through a transmit list, it also offers the
123 * ability to schedule and send request packets (requests &
124 * writebacks). The send event is scheduled through schedSendEvent,
125 * and the sendDeferredPacket of the timing port is modified to
126 * consider both the transmit list and the requests from the MSHR.
127 */
128 class CacheMasterPort : public QueuedMasterPort
129 {
130
131 public:
132
133 /**
134 * Schedule a send of a request packet (from the MSHR). Note
135 * that we could already have a retry outstanding.
136 */
137 void schedSendEvent(Tick time)
138 {
139 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
140 reqQueue.schedSendEvent(time);
141 }
142
143 protected:
144
145 CacheMasterPort(const std::string &_name, BaseCache *_cache,
146 ReqPacketQueue &_reqQueue,
147 SnoopRespPacketQueue &_snoopRespQueue) :
148 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
149 { }
150
151 /**
152 * Memory-side port always snoops.
153 *
154 * @return always true
155 */
156 virtual bool isSnooping() const { return true; }
157 };
158
159 /**
160 * Override the default behaviour of sendDeferredPacket to enable
161 * the memory-side cache port to also send requests based on the
162 * current MSHR status. This queue has a pointer to our specific
163 * cache implementation and is used by the MemSidePort.
164 */
165 class CacheReqPacketQueue : public ReqPacketQueue
166 {
167
168 protected:
169
170 BaseCache &cache;
171 SnoopRespPacketQueue &snoopRespQueue;
172
173 public:
174
175 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
176 SnoopRespPacketQueue &snoop_resp_queue,
177 const std::string &label) :
178 ReqPacketQueue(cache, port, label), cache(cache),
179 snoopRespQueue(snoop_resp_queue) { }
180
181 /**
182 * Override the normal sendDeferredPacket and do not only
183 * consider the transmit list (used for responses), but also
184 * requests.
185 */
186 virtual void sendDeferredPacket();
187
188 /**
189 * Check if there is a conflicting snoop response about to be
190 * send out, and if so simply stall any requests, and schedule
191 * a send event at the same time as the next snoop response is
192 * being sent out.
193 *
194 * @param pkt The packet to check for conflicts against.
195 */
196 bool checkConflictingSnoop(const PacketPtr pkt)
197 {
198 if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
199 DPRINTF(CachePort, "Waiting for snoop response to be "
200 "sent\n");
201 Tick when = snoopRespQueue.deferredPacketReadyTime();
202 schedSendEvent(when);
203 return true;
204 }
205 return false;
206 }
207 };
208
209
210 /**
211 * The memory-side port extends the base cache master port with
212 * access functions for functional, atomic and timing snoops.
213 */
214 class MemSidePort : public CacheMasterPort
215 {
216 private:
217
218 /** The cache-specific queue. */
219 CacheReqPacketQueue _reqQueue;
220
221 SnoopRespPacketQueue _snoopRespQueue;
222
223 // a pointer to our specific cache implementation
224 BaseCache *cache;
225
226 protected:
227
228 virtual void recvTimingSnoopReq(PacketPtr pkt);
229
230 virtual bool recvTimingResp(PacketPtr pkt);
231
232 virtual Tick recvAtomicSnoop(PacketPtr pkt);
233
234 virtual void recvFunctionalSnoop(PacketPtr pkt);
235
236 public:
237
238 MemSidePort(const std::string &_name, BaseCache *_cache,
239 const std::string &_label);
240 };
241
242 /**
243 * A cache slave port is used for the CPU-side port of the cache,
244 * and it is basically a simple timing port that uses a transmit
245 * list for responses to the CPU (or connected master). In
246 * addition, it has the functionality to block the port for
247 * incoming requests. If blocked, the port will issue a retry once
248 * unblocked.
249 */
250 class CacheSlavePort : public QueuedSlavePort
251 {
252
253 public:
254
255 /** Do not accept any new requests. */
256 void setBlocked();
257
258 /** Return to normal operation and accept new requests. */
259 void clearBlocked();
260
261 bool isBlocked() const { return blocked; }
262
263 protected:
264
265 CacheSlavePort(const std::string &_name, BaseCache *_cache,
266 const std::string &_label);
267
268 /** A normal packet queue used to store responses. */
269 RespPacketQueue queue;
270
271 bool blocked;
272
273 bool mustSendRetry;
274
275 private:
276
277 void processSendRetry();
278
279 EventFunctionWrapper sendRetryEvent;
280
281 };
282
283 /**
284 * The CPU-side port extends the base cache slave port with access
285 * functions for functional, atomic and timing requests.
286 */
287 class CpuSidePort : public CacheSlavePort
288 {
289 private:
290
291 // a pointer to our specific cache implementation
292 BaseCache *cache;
293
294 protected:
295 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
296
297 virtual bool tryTiming(PacketPtr pkt) override;
298
299 virtual bool recvTimingReq(PacketPtr pkt) override;
300
301 virtual Tick recvAtomic(PacketPtr pkt) override;
302
303 virtual void recvFunctional(PacketPtr pkt) override;
304
305 virtual AddrRangeList getAddrRanges() const override;
306
307 public:
308
309 CpuSidePort(const std::string &_name, BaseCache *_cache,
310 const std::string &_label);
311
312 };
313
314 CpuSidePort cpuSidePort;
315 MemSidePort memSidePort;
316
317 protected:
318
319 /** Miss status registers */
320 MSHRQueue mshrQueue;
321
322 /** Write/writeback buffer */
323 WriteQueue writeBuffer;
324
325 /** Tag and data Storage */
326 BaseTags *tags;
327
328 /** Compression method being used. */
329 BaseCacheCompressor* compressor;
330
331 /** Prefetcher */
332 BasePrefetcher *prefetcher;
333
334 /** To probe when a cache hit occurs */
335 ProbePointArg<PacketPtr> *ppHit;
336
337 /** To probe when a cache miss occurs */
338 ProbePointArg<PacketPtr> *ppMiss;
339
340 /** To probe when a cache fill occurs */
341 ProbePointArg<PacketPtr> *ppFill;
342
343 /**
344 * The writeAllocator drive optimizations for streaming writes.
345 * It first determines whether a WriteReq MSHR should be delayed,
346 * thus ensuring that we wait longer in cases when we are write
347 * coalescing and allowing all the bytes of the line to be written
348 * before the MSHR packet is sent downstream. This works in unison
349 * with the tracking in the MSHR to check if the entire line is
350 * written. The write mode also affects the behaviour on filling
351 * any whole-line writes. Normally the cache allocates the line
352 * when receiving the InvalidateResp, but after seeing enough
353 * consecutive lines we switch to using the tempBlock, and thus
354 * end up not allocating the line, and instead turning the
355 * whole-line write into a writeback straight away.
356 */
357 WriteAllocator * const writeAllocator;
358
359 /**
360 * Temporary cache block for occasional transitory use. We use
361 * the tempBlock to fill when allocation fails (e.g., when there
362 * is an outstanding request that accesses the victim block) or
363 * when we want to avoid allocation (e.g., exclusive caches)
364 */
365 TempCacheBlk *tempBlock;
366
367 /**
368 * Upstream caches need this packet until true is returned, so
369 * hold it for deletion until a subsequent call
370 */
371 std::unique_ptr<Packet> pendingDelete;
372
373 /**
374 * Mark a request as in service (sent downstream in the memory
375 * system), effectively making this MSHR the ordering point.
376 */
377 void markInService(MSHR *mshr, bool pending_modified_resp)
378 {
379 bool wasFull = mshrQueue.isFull();
380 mshrQueue.markInService(mshr, pending_modified_resp);
381
382 if (wasFull && !mshrQueue.isFull()) {
383 clearBlocked(Blocked_NoMSHRs);
384 }
385 }
386
387 void markInService(WriteQueueEntry *entry)
388 {
389 bool wasFull = writeBuffer.isFull();
390 writeBuffer.markInService(entry);
391
392 if (wasFull && !writeBuffer.isFull()) {
393 clearBlocked(Blocked_NoWBBuffers);
394 }
395 }
396
397 /**
398 * Determine whether we should allocate on a fill or not. If this
399 * cache is mostly inclusive with regards to the upstream cache(s)
400 * we always allocate (for any non-forwarded and cacheable
401 * requests). In the case of a mostly exclusive cache, we allocate
402 * on fill if the packet did not come from a cache, thus if we:
403 * are dealing with a whole-line write (the latter behaves much
404 * like a writeback), the original target packet came from a
405 * non-caching source, or if we are performing a prefetch or LLSC.
406 *
407 * @param cmd Command of the incoming requesting packet
408 * @return Whether we should allocate on the fill
409 */
410 inline bool allocOnFill(MemCmd cmd) const
411 {
412 return clusivity == Enums::mostly_incl ||
413 cmd == MemCmd::WriteLineReq ||
414 cmd == MemCmd::ReadReq ||
415 cmd == MemCmd::WriteReq ||
416 cmd.isPrefetch() ||
417 cmd.isLLSC();
418 }
419
420 /**
421 * Regenerate block address using tags.
422 * Block address regeneration depends on whether we're using a temporary
423 * block or not.
424 *
425 * @param blk The block to regenerate address.
426 * @return The block's address.
427 */
428 Addr regenerateBlkAddr(CacheBlk* blk);
429
430 /**
431 * Calculate latency of accesses that only touch the tag array.
432 * @sa calculateAccessLatency
433 *
434 * @param delay The delay until the packet's metadata is present.
435 * @param lookup_lat Latency of the respective tag lookup.
436 * @return The number of ticks that pass due to a tag-only access.
437 */
438 Cycles calculateTagOnlyLatency(const uint32_t delay,
439 const Cycles lookup_lat) const;
440 /**
441 * Calculate access latency in ticks given a tag lookup latency, and
442 * whether access was a hit or miss.
443 *
444 * @param blk The cache block that was accessed.
445 * @param delay The delay until the packet's metadata is present.
446 * @param lookup_lat Latency of the respective tag lookup.
447 * @return The number of ticks that pass due to a block access.
448 */
449 Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
450 const Cycles lookup_lat) const;
451
452 /**
453 * Does all the processing necessary to perform the provided request.
454 * @param pkt The memory request to perform.
455 * @param blk The cache block to be updated.
456 * @param lat The latency of the access.
457 * @param writebacks List for any writebacks that need to be performed.
458 * @return Boolean indicating whether the request was satisfied.
459 */
460 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
461 PacketList &writebacks);
462
463 /*
464 * Handle a timing request that hit in the cache
465 *
466 * @param ptk The request packet
467 * @param blk The referenced block
468 * @param request_time The tick at which the block lookup is compete
469 */
470 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
471 Tick request_time);
472
473 /*
474 * Handle a timing request that missed in the cache
475 *
476 * Implementation specific handling for different cache
477 * implementations
478 *
479 * @param ptk The request packet
480 * @param blk The referenced block
481 * @param forward_time The tick at which we can process dependent requests
482 * @param request_time The tick at which the block lookup is compete
483 */
484 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
485 Tick forward_time,
486 Tick request_time) = 0;
487
488 /*
489 * Handle a timing request that missed in the cache
490 *
491 * Common functionality across different cache implementations
492 *
493 * @param ptk The request packet
494 * @param blk The referenced block
495 * @param mshr Any existing mshr for the referenced cache block
496 * @param forward_time The tick at which we can process dependent requests
497 * @param request_time The tick at which the block lookup is compete
498 */
499 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
500 Tick forward_time, Tick request_time);
501
502 /**
503 * Performs the access specified by the request.
504 * @param pkt The request to perform.
505 */
506 virtual void recvTimingReq(PacketPtr pkt);
507
508 /**
509 * Handling the special case of uncacheable write responses to
510 * make recvTimingResp less cluttered.
511 */
512 void handleUncacheableWriteResp(PacketPtr pkt);
513
514 /**
515 * Service non-deferred MSHR targets using the received response
516 *
517 * Iterates through the list of targets that can be serviced with
518 * the current response.
519 *
520 * @param mshr The MSHR that corresponds to the reponse
521 * @param pkt The response packet
522 * @param blk The reference block
523 */
524 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
525 CacheBlk *blk) = 0;
526
527 /**
528 * Handles a response (cache line fill/write ack) from the bus.
529 * @param pkt The response packet
530 */
531 virtual void recvTimingResp(PacketPtr pkt);
532
533 /**
534 * Snoops bus transactions to maintain coherence.
535 * @param pkt The current bus transaction.
536 */
537 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
538
539 /**
540 * Handle a snoop response.
541 * @param pkt Snoop response packet
542 */
543 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
544
545 /**
546 * Handle a request in atomic mode that missed in this cache
547 *
548 * Creates a downstream request, sends it to the memory below and
549 * handles the response. As we are in atomic mode all operations
550 * are performed immediately.
551 *
552 * @param pkt The packet with the requests
553 * @param blk The referenced block
554 * @param writebacks A list with packets for any performed writebacks
555 * @return Cycles for handling the request
556 */
557 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
558 PacketList &writebacks) = 0;
559
560 /**
561 * Performs the access specified by the request.
562 * @param pkt The request to perform.
563 * @return The number of ticks required for the access.
564 */
565 virtual Tick recvAtomic(PacketPtr pkt);
566
567 /**
568 * Snoop for the provided request in the cache and return the estimated
569 * time taken.
570 * @param pkt The memory request to snoop
571 * @return The number of ticks required for the snoop.
572 */
573 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
574
575 /**
576 * Performs the access specified by the request.
577 *
578 * @param pkt The request to perform.
579 * @param fromCpuSide from the CPU side port or the memory side port
580 */
581 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
582
583 /**
584 * Handle doing the Compare and Swap function for SPARC.
585 */
586 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
587
588 /**
589 * Return the next queue entry to service, either a pending miss
590 * from the MSHR queue, a buffered write from the write buffer, or
591 * something from the prefetcher. This function is responsible
592 * for prioritizing among those sources on the fly.
593 */
594 QueueEntry* getNextQueueEntry();
595
596 /**
597 * Insert writebacks into the write buffer
598 */
599 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
600
601 /**
602 * Send writebacks down the memory hierarchy in atomic mode
603 */
604 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
605
606 /**
607 * Create an appropriate downstream bus request packet.
608 *
609 * Creates a new packet with the request to be send to the memory
610 * below, or nullptr if the current request in cpu_pkt should just
611 * be forwarded on.
612 *
613 * @param cpu_pkt The miss packet that needs to be satisfied.
614 * @param blk The referenced block, can be nullptr.
615 * @param needs_writable Indicates that the block must be writable
616 * even if the request in cpu_pkt doesn't indicate that.
617 * @param is_whole_line_write True if there are writes for the
618 * whole line
619 * @return A packet send to the memory below
620 */
621 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
622 bool needs_writable,
623 bool is_whole_line_write) const = 0;
624
625 /**
626 * Determine if clean lines should be written back or not. In
627 * cases where a downstream cache is mostly inclusive we likely
628 * want it to act as a victim cache also for lines that have not
629 * been modified. Hence, we cannot simply drop the line (or send a
630 * clean evict), but rather need to send the actual data.
631 */
632 const bool writebackClean;
633
634 /**
635 * Writebacks from the tempBlock, resulting on the response path
636 * in atomic mode, must happen after the call to recvAtomic has
637 * finished (for the right ordering of the packets). We therefore
638 * need to hold on to the packets, and have a method and an event
639 * to send them.
640 */
641 PacketPtr tempBlockWriteback;
642
643 /**
644 * Send the outstanding tempBlock writeback. To be called after
645 * recvAtomic finishes in cases where the block we filled is in
646 * fact the tempBlock, and now needs to be written back.
647 */
648 void writebackTempBlockAtomic() {
649 assert(tempBlockWriteback != nullptr);
650 PacketList writebacks{tempBlockWriteback};
651 doWritebacksAtomic(writebacks);
652 tempBlockWriteback = nullptr;
653 }
654
655 /**
656 * An event to writeback the tempBlock after recvAtomic
657 * finishes. To avoid other calls to recvAtomic getting in
658 * between, we create this event with a higher priority.
659 */
660 EventFunctionWrapper writebackTempBlockAtomicEvent;
661
662 /**
663 * When a block is overwriten, its compression information must be updated,
664 * and it may need to be recompressed. If the compression size changes, the
665 * block may either become smaller, in which case there is no side effect,
666 * or bigger (data expansion; fat write), in which case the block might not
667 * fit in its current location anymore. If that happens, there are usually
668 * two options to be taken:
669 *
670 * - The co-allocated blocks must be evicted to make room for this block.
671 * Simpler, but ignores replacement data.
672 * - The block itself is moved elsewhere (used in policies where the CF
673 * determines the location of the block).
674 *
675 * This implementation uses the first approach.
676 *
677 * Notice that this is only called for writebacks, which means that L1
678 * caches (which see regular Writes), do not support compression.
679 * @sa CompressedTags
680 *
681 * @param blk The block to be overwriten.
682 * @param data A pointer to the data to be compressed (blk's new data).
683 * @param writebacks List for any writebacks that need to be performed.
684 * @return Whether operation is successful or not.
685 */
686 bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
687 PacketList &writebacks);
688
689 /**
663 * Perform any necessary updates to the block and perform any data
664 * exchange between the packet and the block. The flags of the
665 * packet are also set accordingly.
666 *
667 * @param pkt Request packet from upstream that hit a block
668 * @param blk Cache block that the packet hit
669 * @param deferred_response Whether this request originally missed
670 * @param pending_downgrade Whether the writable flag is to be removed
671 */
672 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
673 bool deferred_response = false,
674 bool pending_downgrade = false);
675
676 /**
677 * Maintain the clusivity of this cache by potentially
678 * invalidating a block. This method works in conjunction with
679 * satisfyRequest, but is separate to allow us to handle all MSHR
680 * targets before potentially dropping a block.
681 *
682 * @param from_cache Whether we have dealt with a packet from a cache
683 * @param blk The block that should potentially be dropped
684 */
685 void maintainClusivity(bool from_cache, CacheBlk *blk);
686
687 /**
688 * Handle a fill operation caused by a received packet.
689 *
690 * Populates a cache block and handles all outstanding requests for the
691 * satisfied fill request. This version takes two memory requests. One
692 * contains the fill data, the other is an optional target to satisfy.
693 * Note that the reason we return a list of writebacks rather than
694 * inserting them directly in the write buffer is that this function
695 * is called by both atomic and timing-mode accesses, and in atomic
696 * mode we don't mess with the write buffer (we just perform the
697 * writebacks atomically once the original request is complete).
698 *
699 * @param pkt The memory request with the fill data.
700 * @param blk The cache block if it already exists.
701 * @param writebacks List for any writebacks that need to be performed.
702 * @param allocate Whether to allocate a block or use the temp block
703 * @return Pointer to the new cache block.
704 */
705 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
706 PacketList &writebacks, bool allocate);
707
708 /**
709 * Allocate a new block and perform any necessary writebacks
710 *
711 * Find a victim block and if necessary prepare writebacks for any
712 * existing data. May return nullptr if there are no replaceable
713 * blocks. If a replaceable block is found, it inserts the new block in
714 * its place. The new block, however, is not set as valid yet.
715 *
716 * @param pkt Packet holding the address to update
717 * @param writebacks A list of writeback packets for the evicted blocks
718 * @return the allocated block
719 */
720 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
721 /**
722 * Evict a cache block.
723 *
724 * Performs a writeback if necesssary and invalidates the block
725 *
726 * @param blk Block to invalidate
727 * @return A packet with the writeback, can be nullptr
728 */
729 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
730
731 /**
732 * Evict a cache block.
733 *
734 * Performs a writeback if necesssary and invalidates the block
735 *
736 * @param blk Block to invalidate
737 * @param writebacks Return a list of packets with writebacks
738 */
739 void evictBlock(CacheBlk *blk, PacketList &writebacks);
740
741 /**
742 * Invalidate a cache block.
743 *
744 * @param blk Block to invalidate
745 */
746 void invalidateBlock(CacheBlk *blk);
747
748 /**
749 * Create a writeback request for the given block.
750 *
751 * @param blk The block to writeback.
752 * @return The writeback request for the block.
753 */
754 PacketPtr writebackBlk(CacheBlk *blk);
755
756 /**
757 * Create a writeclean request for the given block.
758 *
759 * Creates a request that writes the block to the cache below
760 * without evicting the block from the current cache.
761 *
762 * @param blk The block to write clean.
763 * @param dest The destination of the write clean operation.
764 * @param id Use the given packet id for the write clean operation.
765 * @return The generated write clean packet.
766 */
767 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
768
769 /**
770 * Write back dirty blocks in the cache using functional accesses.
771 */
772 virtual void memWriteback() override;
773
774 /**
775 * Invalidates all blocks in the cache.
776 *
777 * @warn Dirty cache lines will not be written back to
778 * memory. Make sure to call functionalWriteback() first if you
779 * want the to write them to memory.
780 */
781 virtual void memInvalidate() override;
782
783 /**
784 * Determine if there are any dirty blocks in the cache.
785 *
786 * @return true if at least one block is dirty, false otherwise.
787 */
788 bool isDirty() const;
789
790 /**
791 * Determine if an address is in the ranges covered by this
792 * cache. This is useful to filter snoops.
793 *
794 * @param addr Address to check against
795 *
796 * @return If the address in question is in range
797 */
798 bool inRange(Addr addr) const;
799
800 /**
801 * Find next request ready time from among possible sources.
802 */
803 Tick nextQueueReadyTime() const;
804
805 /** Block size of this cache */
806 const unsigned blkSize;
807
808 /**
809 * The latency of tag lookup of a cache. It occurs when there is
810 * an access to the cache.
811 */
812 const Cycles lookupLatency;
813
814 /**
815 * The latency of data access of a cache. It occurs when there is
816 * an access to the cache.
817 */
818 const Cycles dataLatency;
819
820 /**
821 * This is the forward latency of the cache. It occurs when there
822 * is a cache miss and a request is forwarded downstream, in
823 * particular an outbound miss.
824 */
825 const Cycles forwardLatency;
826
827 /** The latency to fill a cache block */
828 const Cycles fillLatency;
829
830 /**
831 * The latency of sending reponse to its upper level cache/core on
832 * a linefill. The responseLatency parameter captures this
833 * latency.
834 */
835 const Cycles responseLatency;
836
837 /**
838 * Whether tags and data are accessed sequentially.
839 */
840 const bool sequentialAccess;
841
842 /** The number of targets for each MSHR. */
843 const int numTarget;
844
845 /** Do we forward snoops from mem side port through to cpu side port? */
846 bool forwardSnoops;
847
848 /**
849 * Clusivity with respect to the upstream cache, determining if we
850 * fill into both this cache and the cache above on a miss. Note
851 * that we currently do not support strict clusivity policies.
852 */
853 const Enums::Clusivity clusivity;
854
855 /**
856 * Is this cache read only, for example the instruction cache, or
857 * table-walker cache. A cache that is read only should never see
858 * any writes, and should never get any dirty data (and hence
859 * never have to do any writebacks).
860 */
861 const bool isReadOnly;
862
863 /**
864 * Bit vector of the blocking reasons for the access path.
865 * @sa #BlockedCause
866 */
867 uint8_t blocked;
868
869 /** Increasing order number assigned to each incoming request. */
870 uint64_t order;
871
872 /** Stores time the cache blocked for statistics. */
873 Cycles blockedCycle;
874
875 /** Pointer to the MSHR that has no targets. */
876 MSHR *noTargetMSHR;
877
878 /** The number of misses to trigger an exit event. */
879 Counter missCount;
880
881 /**
882 * The address range to which the cache responds on the CPU side.
883 * Normally this is all possible memory addresses. */
884 const AddrRangeList addrRanges;
885
886 public:
887 /** System we are currently operating in. */
888 System *system;
889
890 // Statistics
891 /**
892 * @addtogroup CacheStatistics
893 * @{
894 */
895
896 /** Number of hits per thread for each type of command.
897 @sa Packet::Command */
898 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
899 /** Number of hits for demand accesses. */
900 Stats::Formula demandHits;
901 /** Number of hit for all accesses. */
902 Stats::Formula overallHits;
903
904 /** Number of misses per thread for each type of command.
905 @sa Packet::Command */
906 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
907 /** Number of misses for demand accesses. */
908 Stats::Formula demandMisses;
909 /** Number of misses for all accesses. */
910 Stats::Formula overallMisses;
911
912 /**
913 * Total number of cycles per thread/command spent waiting for a miss.
914 * Used to calculate the average miss latency.
915 */
916 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
917 /** Total number of cycles spent waiting for demand misses. */
918 Stats::Formula demandMissLatency;
919 /** Total number of cycles spent waiting for all misses. */
920 Stats::Formula overallMissLatency;
921
922 /** The number of accesses per command and thread. */
923 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
924 /** The number of demand accesses. */
925 Stats::Formula demandAccesses;
926 /** The number of overall accesses. */
927 Stats::Formula overallAccesses;
928
929 /** The miss rate per command and thread. */
930 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
931 /** The miss rate of all demand accesses. */
932 Stats::Formula demandMissRate;
933 /** The miss rate for all accesses. */
934 Stats::Formula overallMissRate;
935
936 /** The average miss latency per command and thread. */
937 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
938 /** The average miss latency for demand misses. */
939 Stats::Formula demandAvgMissLatency;
940 /** The average miss latency for all misses. */
941 Stats::Formula overallAvgMissLatency;
942
943 /** The total number of cycles blocked for each blocked cause. */
944 Stats::Vector blocked_cycles;
945 /** The number of times this cache blocked for each blocked cause. */
946 Stats::Vector blocked_causes;
947
948 /** The average number of cycles blocked for each blocked cause. */
949 Stats::Formula avg_blocked;
950
951 /** The number of times a HW-prefetched block is evicted w/o reference. */
952 Stats::Scalar unusedPrefetches;
953
954 /** Number of blocks written back per thread. */
955 Stats::Vector writebacks;
956
957 /** Number of misses that hit in the MSHRs per command and thread. */
958 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
959 /** Demand misses that hit in the MSHRs. */
960 Stats::Formula demandMshrHits;
961 /** Total number of misses that hit in the MSHRs. */
962 Stats::Formula overallMshrHits;
963
964 /** Number of misses that miss in the MSHRs, per command and thread. */
965 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
966 /** Demand misses that miss in the MSHRs. */
967 Stats::Formula demandMshrMisses;
968 /** Total number of misses that miss in the MSHRs. */
969 Stats::Formula overallMshrMisses;
970
971 /** Number of misses that miss in the MSHRs, per command and thread. */
972 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
973 /** Total number of misses that miss in the MSHRs. */
974 Stats::Formula overallMshrUncacheable;
975
976 /** Total cycle latency of each MSHR miss, per command and thread. */
977 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
978 /** Total cycle latency of demand MSHR misses. */
979 Stats::Formula demandMshrMissLatency;
980 /** Total cycle latency of overall MSHR misses. */
981 Stats::Formula overallMshrMissLatency;
982
983 /** Total cycle latency of each MSHR miss, per command and thread. */
984 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
985 /** Total cycle latency of overall MSHR misses. */
986 Stats::Formula overallMshrUncacheableLatency;
987
988#if 0
989 /** The total number of MSHR accesses per command and thread. */
990 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
991 /** The total number of demand MSHR accesses. */
992 Stats::Formula demandMshrAccesses;
993 /** The total number of MSHR accesses. */
994 Stats::Formula overallMshrAccesses;
995#endif
996
997 /** The miss rate in the MSHRs pre command and thread. */
998 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
999 /** The demand miss rate in the MSHRs. */
1000 Stats::Formula demandMshrMissRate;
1001 /** The overall miss rate in the MSHRs. */
1002 Stats::Formula overallMshrMissRate;
1003
1004 /** The average latency of an MSHR miss, per command and thread. */
1005 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
1006 /** The average latency of a demand MSHR miss. */
1007 Stats::Formula demandAvgMshrMissLatency;
1008 /** The average overall latency of an MSHR miss. */
1009 Stats::Formula overallAvgMshrMissLatency;
1010
1011 /** The average latency of an MSHR miss, per command and thread. */
1012 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
1013 /** The average overall latency of an MSHR miss. */
1014 Stats::Formula overallAvgMshrUncacheableLatency;
1015
1016 /** Number of replacements of valid blocks. */
1017 Stats::Scalar replacements;
1018
690 * Perform any necessary updates to the block and perform any data
691 * exchange between the packet and the block. The flags of the
692 * packet are also set accordingly.
693 *
694 * @param pkt Request packet from upstream that hit a block
695 * @param blk Cache block that the packet hit
696 * @param deferred_response Whether this request originally missed
697 * @param pending_downgrade Whether the writable flag is to be removed
698 */
699 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
700 bool deferred_response = false,
701 bool pending_downgrade = false);
702
703 /**
704 * Maintain the clusivity of this cache by potentially
705 * invalidating a block. This method works in conjunction with
706 * satisfyRequest, but is separate to allow us to handle all MSHR
707 * targets before potentially dropping a block.
708 *
709 * @param from_cache Whether we have dealt with a packet from a cache
710 * @param blk The block that should potentially be dropped
711 */
712 void maintainClusivity(bool from_cache, CacheBlk *blk);
713
714 /**
715 * Handle a fill operation caused by a received packet.
716 *
717 * Populates a cache block and handles all outstanding requests for the
718 * satisfied fill request. This version takes two memory requests. One
719 * contains the fill data, the other is an optional target to satisfy.
720 * Note that the reason we return a list of writebacks rather than
721 * inserting them directly in the write buffer is that this function
722 * is called by both atomic and timing-mode accesses, and in atomic
723 * mode we don't mess with the write buffer (we just perform the
724 * writebacks atomically once the original request is complete).
725 *
726 * @param pkt The memory request with the fill data.
727 * @param blk The cache block if it already exists.
728 * @param writebacks List for any writebacks that need to be performed.
729 * @param allocate Whether to allocate a block or use the temp block
730 * @return Pointer to the new cache block.
731 */
732 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
733 PacketList &writebacks, bool allocate);
734
735 /**
736 * Allocate a new block and perform any necessary writebacks
737 *
738 * Find a victim block and if necessary prepare writebacks for any
739 * existing data. May return nullptr if there are no replaceable
740 * blocks. If a replaceable block is found, it inserts the new block in
741 * its place. The new block, however, is not set as valid yet.
742 *
743 * @param pkt Packet holding the address to update
744 * @param writebacks A list of writeback packets for the evicted blocks
745 * @return the allocated block
746 */
747 CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
748 /**
749 * Evict a cache block.
750 *
751 * Performs a writeback if necesssary and invalidates the block
752 *
753 * @param blk Block to invalidate
754 * @return A packet with the writeback, can be nullptr
755 */
756 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
757
758 /**
759 * Evict a cache block.
760 *
761 * Performs a writeback if necesssary and invalidates the block
762 *
763 * @param blk Block to invalidate
764 * @param writebacks Return a list of packets with writebacks
765 */
766 void evictBlock(CacheBlk *blk, PacketList &writebacks);
767
768 /**
769 * Invalidate a cache block.
770 *
771 * @param blk Block to invalidate
772 */
773 void invalidateBlock(CacheBlk *blk);
774
775 /**
776 * Create a writeback request for the given block.
777 *
778 * @param blk The block to writeback.
779 * @return The writeback request for the block.
780 */
781 PacketPtr writebackBlk(CacheBlk *blk);
782
783 /**
784 * Create a writeclean request for the given block.
785 *
786 * Creates a request that writes the block to the cache below
787 * without evicting the block from the current cache.
788 *
789 * @param blk The block to write clean.
790 * @param dest The destination of the write clean operation.
791 * @param id Use the given packet id for the write clean operation.
792 * @return The generated write clean packet.
793 */
794 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
795
796 /**
797 * Write back dirty blocks in the cache using functional accesses.
798 */
799 virtual void memWriteback() override;
800
801 /**
802 * Invalidates all blocks in the cache.
803 *
804 * @warn Dirty cache lines will not be written back to
805 * memory. Make sure to call functionalWriteback() first if you
806 * want the to write them to memory.
807 */
808 virtual void memInvalidate() override;
809
810 /**
811 * Determine if there are any dirty blocks in the cache.
812 *
813 * @return true if at least one block is dirty, false otherwise.
814 */
815 bool isDirty() const;
816
817 /**
818 * Determine if an address is in the ranges covered by this
819 * cache. This is useful to filter snoops.
820 *
821 * @param addr Address to check against
822 *
823 * @return If the address in question is in range
824 */
825 bool inRange(Addr addr) const;
826
827 /**
828 * Find next request ready time from among possible sources.
829 */
830 Tick nextQueueReadyTime() const;
831
832 /** Block size of this cache */
833 const unsigned blkSize;
834
835 /**
836 * The latency of tag lookup of a cache. It occurs when there is
837 * an access to the cache.
838 */
839 const Cycles lookupLatency;
840
841 /**
842 * The latency of data access of a cache. It occurs when there is
843 * an access to the cache.
844 */
845 const Cycles dataLatency;
846
847 /**
848 * This is the forward latency of the cache. It occurs when there
849 * is a cache miss and a request is forwarded downstream, in
850 * particular an outbound miss.
851 */
852 const Cycles forwardLatency;
853
854 /** The latency to fill a cache block */
855 const Cycles fillLatency;
856
857 /**
858 * The latency of sending reponse to its upper level cache/core on
859 * a linefill. The responseLatency parameter captures this
860 * latency.
861 */
862 const Cycles responseLatency;
863
864 /**
865 * Whether tags and data are accessed sequentially.
866 */
867 const bool sequentialAccess;
868
869 /** The number of targets for each MSHR. */
870 const int numTarget;
871
872 /** Do we forward snoops from mem side port through to cpu side port? */
873 bool forwardSnoops;
874
875 /**
876 * Clusivity with respect to the upstream cache, determining if we
877 * fill into both this cache and the cache above on a miss. Note
878 * that we currently do not support strict clusivity policies.
879 */
880 const Enums::Clusivity clusivity;
881
882 /**
883 * Is this cache read only, for example the instruction cache, or
884 * table-walker cache. A cache that is read only should never see
885 * any writes, and should never get any dirty data (and hence
886 * never have to do any writebacks).
887 */
888 const bool isReadOnly;
889
890 /**
891 * Bit vector of the blocking reasons for the access path.
892 * @sa #BlockedCause
893 */
894 uint8_t blocked;
895
896 /** Increasing order number assigned to each incoming request. */
897 uint64_t order;
898
899 /** Stores time the cache blocked for statistics. */
900 Cycles blockedCycle;
901
902 /** Pointer to the MSHR that has no targets. */
903 MSHR *noTargetMSHR;
904
905 /** The number of misses to trigger an exit event. */
906 Counter missCount;
907
908 /**
909 * The address range to which the cache responds on the CPU side.
910 * Normally this is all possible memory addresses. */
911 const AddrRangeList addrRanges;
912
913 public:
914 /** System we are currently operating in. */
915 System *system;
916
917 // Statistics
918 /**
919 * @addtogroup CacheStatistics
920 * @{
921 */
922
923 /** Number of hits per thread for each type of command.
924 @sa Packet::Command */
925 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
926 /** Number of hits for demand accesses. */
927 Stats::Formula demandHits;
928 /** Number of hit for all accesses. */
929 Stats::Formula overallHits;
930
931 /** Number of misses per thread for each type of command.
932 @sa Packet::Command */
933 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
934 /** Number of misses for demand accesses. */
935 Stats::Formula demandMisses;
936 /** Number of misses for all accesses. */
937 Stats::Formula overallMisses;
938
939 /**
940 * Total number of cycles per thread/command spent waiting for a miss.
941 * Used to calculate the average miss latency.
942 */
943 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
944 /** Total number of cycles spent waiting for demand misses. */
945 Stats::Formula demandMissLatency;
946 /** Total number of cycles spent waiting for all misses. */
947 Stats::Formula overallMissLatency;
948
949 /** The number of accesses per command and thread. */
950 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
951 /** The number of demand accesses. */
952 Stats::Formula demandAccesses;
953 /** The number of overall accesses. */
954 Stats::Formula overallAccesses;
955
956 /** The miss rate per command and thread. */
957 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
958 /** The miss rate of all demand accesses. */
959 Stats::Formula demandMissRate;
960 /** The miss rate for all accesses. */
961 Stats::Formula overallMissRate;
962
963 /** The average miss latency per command and thread. */
964 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
965 /** The average miss latency for demand misses. */
966 Stats::Formula demandAvgMissLatency;
967 /** The average miss latency for all misses. */
968 Stats::Formula overallAvgMissLatency;
969
970 /** The total number of cycles blocked for each blocked cause. */
971 Stats::Vector blocked_cycles;
972 /** The number of times this cache blocked for each blocked cause. */
973 Stats::Vector blocked_causes;
974
975 /** The average number of cycles blocked for each blocked cause. */
976 Stats::Formula avg_blocked;
977
978 /** The number of times a HW-prefetched block is evicted w/o reference. */
979 Stats::Scalar unusedPrefetches;
980
981 /** Number of blocks written back per thread. */
982 Stats::Vector writebacks;
983
984 /** Number of misses that hit in the MSHRs per command and thread. */
985 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
986 /** Demand misses that hit in the MSHRs. */
987 Stats::Formula demandMshrHits;
988 /** Total number of misses that hit in the MSHRs. */
989 Stats::Formula overallMshrHits;
990
991 /** Number of misses that miss in the MSHRs, per command and thread. */
992 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
993 /** Demand misses that miss in the MSHRs. */
994 Stats::Formula demandMshrMisses;
995 /** Total number of misses that miss in the MSHRs. */
996 Stats::Formula overallMshrMisses;
997
998 /** Number of misses that miss in the MSHRs, per command and thread. */
999 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
1000 /** Total number of misses that miss in the MSHRs. */
1001 Stats::Formula overallMshrUncacheable;
1002
1003 /** Total cycle latency of each MSHR miss, per command and thread. */
1004 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
1005 /** Total cycle latency of demand MSHR misses. */
1006 Stats::Formula demandMshrMissLatency;
1007 /** Total cycle latency of overall MSHR misses. */
1008 Stats::Formula overallMshrMissLatency;
1009
1010 /** Total cycle latency of each MSHR miss, per command and thread. */
1011 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
1012 /** Total cycle latency of overall MSHR misses. */
1013 Stats::Formula overallMshrUncacheableLatency;
1014
1015#if 0
1016 /** The total number of MSHR accesses per command and thread. */
1017 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
1018 /** The total number of demand MSHR accesses. */
1019 Stats::Formula demandMshrAccesses;
1020 /** The total number of MSHR accesses. */
1021 Stats::Formula overallMshrAccesses;
1022#endif
1023
1024 /** The miss rate in the MSHRs pre command and thread. */
1025 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
1026 /** The demand miss rate in the MSHRs. */
1027 Stats::Formula demandMshrMissRate;
1028 /** The overall miss rate in the MSHRs. */
1029 Stats::Formula overallMshrMissRate;
1030
1031 /** The average latency of an MSHR miss, per command and thread. */
1032 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
1033 /** The average latency of a demand MSHR miss. */
1034 Stats::Formula demandAvgMshrMissLatency;
1035 /** The average overall latency of an MSHR miss. */
1036 Stats::Formula overallAvgMshrMissLatency;
1037
1038 /** The average latency of an MSHR miss, per command and thread. */
1039 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
1040 /** The average overall latency of an MSHR miss. */
1041 Stats::Formula overallAvgMshrUncacheableLatency;
1042
1043 /** Number of replacements of valid blocks. */
1044 Stats::Scalar replacements;
1045
1046 /** Number of data expansions. */
1047 Stats::Scalar dataExpansions;
1048
1019 /**
1020 * @}
1021 */
1022
1023 /**
1024 * Register stats for this object.
1025 */
1026 void regStats() override;
1027
1028 /** Registers probes. */
1029 void regProbePoints() override;
1030
1031 public:
1032 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1033 ~BaseCache();
1034
1035 void init() override;
1036
1037 Port &getPort(const std::string &if_name,
1038 PortID idx=InvalidPortID) override;
1039
1040 /**
1041 * Query block size of a cache.
1042 * @return The block size
1043 */
1044 unsigned
1045 getBlockSize() const
1046 {
1047 return blkSize;
1048 }
1049
1050 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1051
1052 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1053 {
1054 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1055 pkt, time, order++,
1056 allocOnFill(pkt->cmd));
1057
1058 if (mshrQueue.isFull()) {
1059 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1060 }
1061
1062 if (sched_send) {
1063 // schedule the send
1064 schedMemSideSendEvent(time);
1065 }
1066
1067 return mshr;
1068 }
1069
1070 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1071 {
1072 // should only see writes or clean evicts here
1073 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1074
1075 Addr blk_addr = pkt->getBlockAddr(blkSize);
1076
1077 // If using compression, on evictions the block is decompressed and
1078 // the operation's latency is added to the payload delay. Consume
1079 // that payload delay here, meaning that the data is always stored
1080 // uncompressed in the writebuffer
1081 if (compressor) {
1082 time += pkt->payloadDelay;
1083 pkt->payloadDelay = 0;
1084 }
1085
1086 WriteQueueEntry *wq_entry =
1087 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1088 if (wq_entry && !wq_entry->inService) {
1089 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1090 }
1091
1092 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1093
1094 if (writeBuffer.isFull()) {
1095 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1096 }
1097
1098 // schedule the send
1099 schedMemSideSendEvent(time);
1100 }
1101
1102 /**
1103 * Returns true if the cache is blocked for accesses.
1104 */
1105 bool isBlocked() const
1106 {
1107 return blocked != 0;
1108 }
1109
1110 /**
1111 * Marks the access path of the cache as blocked for the given cause. This
1112 * also sets the blocked flag in the slave interface.
1113 * @param cause The reason for the cache blocking.
1114 */
1115 void setBlocked(BlockedCause cause)
1116 {
1117 uint8_t flag = 1 << cause;
1118 if (blocked == 0) {
1119 blocked_causes[cause]++;
1120 blockedCycle = curCycle();
1121 cpuSidePort.setBlocked();
1122 }
1123 blocked |= flag;
1124 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1125 }
1126
1127 /**
1128 * Marks the cache as unblocked for the given cause. This also clears the
1129 * blocked flags in the appropriate interfaces.
1130 * @param cause The newly unblocked cause.
1131 * @warning Calling this function can cause a blocked request on the bus to
1132 * access the cache. The cache must be in a state to handle that request.
1133 */
1134 void clearBlocked(BlockedCause cause)
1135 {
1136 uint8_t flag = 1 << cause;
1137 blocked &= ~flag;
1138 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1139 if (blocked == 0) {
1140 blocked_cycles[cause] += curCycle() - blockedCycle;
1141 cpuSidePort.clearBlocked();
1142 }
1143 }
1144
1145 /**
1146 * Schedule a send event for the memory-side port. If already
1147 * scheduled, this may reschedule the event at an earlier
1148 * time. When the specified time is reached, the port is free to
1149 * send either a response, a request, or a prefetch request.
1150 *
1151 * @param time The time when to attempt sending a packet.
1152 */
1153 void schedMemSideSendEvent(Tick time)
1154 {
1155 memSidePort.schedSendEvent(time);
1156 }
1157
1158 bool inCache(Addr addr, bool is_secure) const {
1159 return tags->findBlock(addr, is_secure);
1160 }
1161
1162 bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1163 CacheBlk *block = tags->findBlock(addr, is_secure);
1164 if (block) {
1165 return block->wasPrefetched();
1166 } else {
1167 return false;
1168 }
1169 }
1170
1171 bool inMissQueue(Addr addr, bool is_secure) const {
1172 return mshrQueue.findMatch(addr, is_secure);
1173 }
1174
1175 void incMissCount(PacketPtr pkt)
1176 {
1177 assert(pkt->req->masterId() < system->maxMasters());
1178 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1179 pkt->req->incAccessDepth();
1180 if (missCount) {
1181 --missCount;
1182 if (missCount == 0)
1183 exitSimLoop("A cache reached the maximum miss count");
1184 }
1185 }
1186 void incHitCount(PacketPtr pkt)
1187 {
1188 assert(pkt->req->masterId() < system->maxMasters());
1189 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1190
1191 }
1192
1193 /**
1194 * Checks if the cache is coalescing writes
1195 *
1196 * @return True if the cache is coalescing writes
1197 */
1198 bool coalesce() const;
1199
1200
1201 /**
1202 * Cache block visitor that writes back dirty cache blocks using
1203 * functional writes.
1204 */
1205 void writebackVisitor(CacheBlk &blk);
1206
1207 /**
1208 * Cache block visitor that invalidates all blocks in the cache.
1209 *
1210 * @warn Dirty cache lines will not be written back to memory.
1211 */
1212 void invalidateVisitor(CacheBlk &blk);
1213
1214 /**
1215 * Take an MSHR, turn it into a suitable downstream packet, and
1216 * send it out. This construct allows a queue entry to choose a suitable
1217 * approach based on its type.
1218 *
1219 * @param mshr The MSHR to turn into a packet and send
1220 * @return True if the port is waiting for a retry
1221 */
1222 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1223
1224 /**
1225 * Similar to sendMSHR, but for a write-queue entry
1226 * instead. Create the packet, and send it, and if successful also
1227 * mark the entry in service.
1228 *
1229 * @param wq_entry The write-queue entry to turn into a packet and send
1230 * @return True if the port is waiting for a retry
1231 */
1232 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1233
1234 /**
1235 * Serialize the state of the caches
1236 *
1237 * We currently don't support checkpointing cache state, so this panics.
1238 */
1239 void serialize(CheckpointOut &cp) const override;
1240 void unserialize(CheckpointIn &cp) override;
1241};
1242
1243/**
1244 * The write allocator inspects write packets and detects streaming
1245 * patterns. The write allocator supports a single stream where writes
1246 * are expected to access consecutive locations and keeps track of
1247 * size of the area covered by the concecutive writes in byteCount.
1248 *
1249 * 1) When byteCount has surpassed the coallesceLimit the mode
1250 * switches from ALLOCATE to COALESCE where writes should be delayed
1251 * until the whole block is written at which point a single packet
1252 * (whole line write) can service them.
1253 *
1254 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1255 * line) we switch to NO_ALLOCATE when writes should not allocate in
1256 * the cache but rather send a whole line write to the memory below.
1257 */
1258class WriteAllocator : public SimObject {
1259 public:
1260 WriteAllocator(const WriteAllocatorParams *p) :
1261 SimObject(p),
1262 coalesceLimit(p->coalesce_limit * p->block_size),
1263 noAllocateLimit(p->no_allocate_limit * p->block_size),
1264 delayThreshold(p->delay_threshold)
1265 {
1266 reset();
1267 }
1268
1269 /**
1270 * Should writes be coalesced? This is true if the mode is set to
1271 * NO_ALLOCATE.
1272 *
1273 * @return return true if the cache should coalesce writes.
1274 */
1275 bool coalesce() const {
1276 return mode != WriteMode::ALLOCATE;
1277 }
1278
1279 /**
1280 * Should writes allocate?
1281 *
1282 * @return return true if the cache should not allocate for writes.
1283 */
1284 bool allocate() const {
1285 return mode != WriteMode::NO_ALLOCATE;
1286 }
1287
1288 /**
1289 * Reset the write allocator state, meaning that it allocates for
1290 * writes and has not recorded any information about qualifying
1291 * writes that might trigger a switch to coalescing and later no
1292 * allocation.
1293 */
1294 void reset() {
1295 mode = WriteMode::ALLOCATE;
1296 byteCount = 0;
1297 nextAddr = 0;
1298 }
1299
1300 /**
1301 * Access whether we need to delay the current write.
1302 *
1303 * @param blk_addr The block address the packet writes to
1304 * @return true if the current packet should be delayed
1305 */
1306 bool delay(Addr blk_addr) {
1307 if (delayCtr[blk_addr] > 0) {
1308 --delayCtr[blk_addr];
1309 return true;
1310 } else {
1311 return false;
1312 }
1313 }
1314
1315 /**
1316 * Clear delay counter for the input block
1317 *
1318 * @param blk_addr The accessed cache block
1319 */
1320 void resetDelay(Addr blk_addr) {
1321 delayCtr.erase(blk_addr);
1322 }
1323
1324 /**
1325 * Update the write mode based on the current write
1326 * packet. This method compares the packet's address with any
1327 * current stream, and updates the tracking and the mode
1328 * accordingly.
1329 *
1330 * @param write_addr Start address of the write request
1331 * @param write_size Size of the write request
1332 * @param blk_addr The block address that this packet writes to
1333 */
1334 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1335
1336 private:
1337 /**
1338 * The current mode for write coalescing and allocation, either
1339 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1340 * write coalescing without allocation (NO_ALLOCATE).
1341 */
1342 enum class WriteMode : char {
1343 ALLOCATE,
1344 COALESCE,
1345 NO_ALLOCATE,
1346 };
1347 WriteMode mode;
1348
1349 /** Address to match writes against to detect streams. */
1350 Addr nextAddr;
1351
1352 /**
1353 * Bytes written contiguously. Saturating once we no longer
1354 * allocate.
1355 */
1356 uint32_t byteCount;
1357
1358 /**
1359 * Limits for when to switch between the different write modes.
1360 */
1361 const uint32_t coalesceLimit;
1362 const uint32_t noAllocateLimit;
1363 /**
1364 * The number of times the allocator will delay an WriteReq MSHR.
1365 */
1366 const uint32_t delayThreshold;
1367
1368 /**
1369 * Keep track of the number of times the allocator has delayed an
1370 * WriteReq MSHR.
1371 */
1372 std::unordered_map<Addr, Counter> delayCtr;
1373};
1374
1375#endif //__MEM_CACHE_BASE_HH__
1049 /**
1050 * @}
1051 */
1052
1053 /**
1054 * Register stats for this object.
1055 */
1056 void regStats() override;
1057
1058 /** Registers probes. */
1059 void regProbePoints() override;
1060
1061 public:
1062 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1063 ~BaseCache();
1064
1065 void init() override;
1066
1067 Port &getPort(const std::string &if_name,
1068 PortID idx=InvalidPortID) override;
1069
1070 /**
1071 * Query block size of a cache.
1072 * @return The block size
1073 */
1074 unsigned
1075 getBlockSize() const
1076 {
1077 return blkSize;
1078 }
1079
1080 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1081
1082 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1083 {
1084 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1085 pkt, time, order++,
1086 allocOnFill(pkt->cmd));
1087
1088 if (mshrQueue.isFull()) {
1089 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1090 }
1091
1092 if (sched_send) {
1093 // schedule the send
1094 schedMemSideSendEvent(time);
1095 }
1096
1097 return mshr;
1098 }
1099
1100 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1101 {
1102 // should only see writes or clean evicts here
1103 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1104
1105 Addr blk_addr = pkt->getBlockAddr(blkSize);
1106
1107 // If using compression, on evictions the block is decompressed and
1108 // the operation's latency is added to the payload delay. Consume
1109 // that payload delay here, meaning that the data is always stored
1110 // uncompressed in the writebuffer
1111 if (compressor) {
1112 time += pkt->payloadDelay;
1113 pkt->payloadDelay = 0;
1114 }
1115
1116 WriteQueueEntry *wq_entry =
1117 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1118 if (wq_entry && !wq_entry->inService) {
1119 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1120 }
1121
1122 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1123
1124 if (writeBuffer.isFull()) {
1125 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1126 }
1127
1128 // schedule the send
1129 schedMemSideSendEvent(time);
1130 }
1131
1132 /**
1133 * Returns true if the cache is blocked for accesses.
1134 */
1135 bool isBlocked() const
1136 {
1137 return blocked != 0;
1138 }
1139
1140 /**
1141 * Marks the access path of the cache as blocked for the given cause. This
1142 * also sets the blocked flag in the slave interface.
1143 * @param cause The reason for the cache blocking.
1144 */
1145 void setBlocked(BlockedCause cause)
1146 {
1147 uint8_t flag = 1 << cause;
1148 if (blocked == 0) {
1149 blocked_causes[cause]++;
1150 blockedCycle = curCycle();
1151 cpuSidePort.setBlocked();
1152 }
1153 blocked |= flag;
1154 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1155 }
1156
1157 /**
1158 * Marks the cache as unblocked for the given cause. This also clears the
1159 * blocked flags in the appropriate interfaces.
1160 * @param cause The newly unblocked cause.
1161 * @warning Calling this function can cause a blocked request on the bus to
1162 * access the cache. The cache must be in a state to handle that request.
1163 */
1164 void clearBlocked(BlockedCause cause)
1165 {
1166 uint8_t flag = 1 << cause;
1167 blocked &= ~flag;
1168 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1169 if (blocked == 0) {
1170 blocked_cycles[cause] += curCycle() - blockedCycle;
1171 cpuSidePort.clearBlocked();
1172 }
1173 }
1174
1175 /**
1176 * Schedule a send event for the memory-side port. If already
1177 * scheduled, this may reschedule the event at an earlier
1178 * time. When the specified time is reached, the port is free to
1179 * send either a response, a request, or a prefetch request.
1180 *
1181 * @param time The time when to attempt sending a packet.
1182 */
1183 void schedMemSideSendEvent(Tick time)
1184 {
1185 memSidePort.schedSendEvent(time);
1186 }
1187
1188 bool inCache(Addr addr, bool is_secure) const {
1189 return tags->findBlock(addr, is_secure);
1190 }
1191
1192 bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1193 CacheBlk *block = tags->findBlock(addr, is_secure);
1194 if (block) {
1195 return block->wasPrefetched();
1196 } else {
1197 return false;
1198 }
1199 }
1200
1201 bool inMissQueue(Addr addr, bool is_secure) const {
1202 return mshrQueue.findMatch(addr, is_secure);
1203 }
1204
1205 void incMissCount(PacketPtr pkt)
1206 {
1207 assert(pkt->req->masterId() < system->maxMasters());
1208 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1209 pkt->req->incAccessDepth();
1210 if (missCount) {
1211 --missCount;
1212 if (missCount == 0)
1213 exitSimLoop("A cache reached the maximum miss count");
1214 }
1215 }
1216 void incHitCount(PacketPtr pkt)
1217 {
1218 assert(pkt->req->masterId() < system->maxMasters());
1219 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1220
1221 }
1222
1223 /**
1224 * Checks if the cache is coalescing writes
1225 *
1226 * @return True if the cache is coalescing writes
1227 */
1228 bool coalesce() const;
1229
1230
1231 /**
1232 * Cache block visitor that writes back dirty cache blocks using
1233 * functional writes.
1234 */
1235 void writebackVisitor(CacheBlk &blk);
1236
1237 /**
1238 * Cache block visitor that invalidates all blocks in the cache.
1239 *
1240 * @warn Dirty cache lines will not be written back to memory.
1241 */
1242 void invalidateVisitor(CacheBlk &blk);
1243
1244 /**
1245 * Take an MSHR, turn it into a suitable downstream packet, and
1246 * send it out. This construct allows a queue entry to choose a suitable
1247 * approach based on its type.
1248 *
1249 * @param mshr The MSHR to turn into a packet and send
1250 * @return True if the port is waiting for a retry
1251 */
1252 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1253
1254 /**
1255 * Similar to sendMSHR, but for a write-queue entry
1256 * instead. Create the packet, and send it, and if successful also
1257 * mark the entry in service.
1258 *
1259 * @param wq_entry The write-queue entry to turn into a packet and send
1260 * @return True if the port is waiting for a retry
1261 */
1262 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1263
1264 /**
1265 * Serialize the state of the caches
1266 *
1267 * We currently don't support checkpointing cache state, so this panics.
1268 */
1269 void serialize(CheckpointOut &cp) const override;
1270 void unserialize(CheckpointIn &cp) override;
1271};
1272
1273/**
1274 * The write allocator inspects write packets and detects streaming
1275 * patterns. The write allocator supports a single stream where writes
1276 * are expected to access consecutive locations and keeps track of
1277 * size of the area covered by the concecutive writes in byteCount.
1278 *
1279 * 1) When byteCount has surpassed the coallesceLimit the mode
1280 * switches from ALLOCATE to COALESCE where writes should be delayed
1281 * until the whole block is written at which point a single packet
1282 * (whole line write) can service them.
1283 *
1284 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1285 * line) we switch to NO_ALLOCATE when writes should not allocate in
1286 * the cache but rather send a whole line write to the memory below.
1287 */
1288class WriteAllocator : public SimObject {
1289 public:
1290 WriteAllocator(const WriteAllocatorParams *p) :
1291 SimObject(p),
1292 coalesceLimit(p->coalesce_limit * p->block_size),
1293 noAllocateLimit(p->no_allocate_limit * p->block_size),
1294 delayThreshold(p->delay_threshold)
1295 {
1296 reset();
1297 }
1298
1299 /**
1300 * Should writes be coalesced? This is true if the mode is set to
1301 * NO_ALLOCATE.
1302 *
1303 * @return return true if the cache should coalesce writes.
1304 */
1305 bool coalesce() const {
1306 return mode != WriteMode::ALLOCATE;
1307 }
1308
1309 /**
1310 * Should writes allocate?
1311 *
1312 * @return return true if the cache should not allocate for writes.
1313 */
1314 bool allocate() const {
1315 return mode != WriteMode::NO_ALLOCATE;
1316 }
1317
1318 /**
1319 * Reset the write allocator state, meaning that it allocates for
1320 * writes and has not recorded any information about qualifying
1321 * writes that might trigger a switch to coalescing and later no
1322 * allocation.
1323 */
1324 void reset() {
1325 mode = WriteMode::ALLOCATE;
1326 byteCount = 0;
1327 nextAddr = 0;
1328 }
1329
1330 /**
1331 * Access whether we need to delay the current write.
1332 *
1333 * @param blk_addr The block address the packet writes to
1334 * @return true if the current packet should be delayed
1335 */
1336 bool delay(Addr blk_addr) {
1337 if (delayCtr[blk_addr] > 0) {
1338 --delayCtr[blk_addr];
1339 return true;
1340 } else {
1341 return false;
1342 }
1343 }
1344
1345 /**
1346 * Clear delay counter for the input block
1347 *
1348 * @param blk_addr The accessed cache block
1349 */
1350 void resetDelay(Addr blk_addr) {
1351 delayCtr.erase(blk_addr);
1352 }
1353
1354 /**
1355 * Update the write mode based on the current write
1356 * packet. This method compares the packet's address with any
1357 * current stream, and updates the tracking and the mode
1358 * accordingly.
1359 *
1360 * @param write_addr Start address of the write request
1361 * @param write_size Size of the write request
1362 * @param blk_addr The block address that this packet writes to
1363 */
1364 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1365
1366 private:
1367 /**
1368 * The current mode for write coalescing and allocation, either
1369 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1370 * write coalescing without allocation (NO_ALLOCATE).
1371 */
1372 enum class WriteMode : char {
1373 ALLOCATE,
1374 COALESCE,
1375 NO_ALLOCATE,
1376 };
1377 WriteMode mode;
1378
1379 /** Address to match writes against to detect streams. */
1380 Addr nextAddr;
1381
1382 /**
1383 * Bytes written contiguously. Saturating once we no longer
1384 * allocate.
1385 */
1386 uint32_t byteCount;
1387
1388 /**
1389 * Limits for when to switch between the different write modes.
1390 */
1391 const uint32_t coalesceLimit;
1392 const uint32_t noAllocateLimit;
1393 /**
1394 * The number of times the allocator will delay an WriteReq MSHR.
1395 */
1396 const uint32_t delayThreshold;
1397
1398 /**
1399 * Keep track of the number of times the allocator has delayed an
1400 * WriteReq MSHR.
1401 */
1402 std::unordered_map<Addr, Counter> delayCtr;
1403};
1404
1405#endif //__MEM_CACHE_BASE_HH__