Deleted Added
sdiff udiff text old ( 13981:577196ddd040 ) new ( 14035:60068a2d56e0 )
full compact
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/compressors/base.hh"
68#include "mem/cache/mshr_queue.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue.hh"
71#include "mem/cache/write_queue_entry.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/clocked_object.hh"
78#include "sim/eventq.hh"
79#include "sim/probe/probe.hh"
80#include "sim/serialize.hh"
81#include "sim/sim_exit.hh"
82#include "sim/system.hh"
83
84class BaseMasterPort;
85class BasePrefetcher;
86class BaseSlavePort;
87class MSHR;
88class MasterPort;
89class QueueEntry;
90struct BaseCacheParams;
91
92/**
93 * A basic cache interface. Implements some common functions for speed.
94 */
95class BaseCache : public ClockedObject
96{
97 protected:
98 /**
99 * Indexes to enumerate the MSHR queues.
100 */
101 enum MSHRQueueIndex {
102 MSHRQueue_MSHRs,
103 MSHRQueue_WriteBuffer
104 };
105
106 public:
107 /**
108 * Reasons for caches to be blocked.
109 */
110 enum BlockedCause {
111 Blocked_NoMSHRs = MSHRQueue_MSHRs,
112 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
113 Blocked_NoTargets,
114 NUM_BLOCKED_CAUSES
115 };
116
117 protected:
118
119 /**
120 * A cache master port is used for the memory-side port of the
121 * cache, and in addition to the basic timing port that only sends
122 * response packets through a transmit list, it also offers the
123 * ability to schedule and send request packets (requests &
124 * writebacks). The send event is scheduled through schedSendEvent,
125 * and the sendDeferredPacket of the timing port is modified to
126 * consider both the transmit list and the requests from the MSHR.
127 */
128 class CacheMasterPort : public QueuedMasterPort
129 {
130
131 public:
132
133 /**
134 * Schedule a send of a request packet (from the MSHR). Note
135 * that we could already have a retry outstanding.
136 */
137 void schedSendEvent(Tick time)
138 {
139 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
140 reqQueue.schedSendEvent(time);
141 }
142
143 protected:
144
145 CacheMasterPort(const std::string &_name, BaseCache *_cache,
146 ReqPacketQueue &_reqQueue,
147 SnoopRespPacketQueue &_snoopRespQueue) :
148 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
149 { }
150
151 /**
152 * Memory-side port always snoops.
153 *
154 * @return always true
155 */
156 virtual bool isSnooping() const { return true; }
157 };
158
159 /**
160 * Override the default behaviour of sendDeferredPacket to enable
161 * the memory-side cache port to also send requests based on the
162 * current MSHR status. This queue has a pointer to our specific
163 * cache implementation and is used by the MemSidePort.
164 */
165 class CacheReqPacketQueue : public ReqPacketQueue
166 {
167
168 protected:
169
170 BaseCache &cache;
171 SnoopRespPacketQueue &snoopRespQueue;
172
173 public:
174
175 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
176 SnoopRespPacketQueue &snoop_resp_queue,
177 const std::string &label) :
178 ReqPacketQueue(cache, port, label), cache(cache),
179 snoopRespQueue(snoop_resp_queue) { }
180
181 /**
182 * Override the normal sendDeferredPacket and do not only
183 * consider the transmit list (used for responses), but also
184 * requests.
185 */
186 virtual void sendDeferredPacket();
187
188 /**
189 * Check if there is a conflicting snoop response about to be
190 * send out, and if so simply stall any requests, and schedule
191 * a send event at the same time as the next snoop response is
192 * being sent out.
193 *
194 * @param pkt The packet to check for conflicts against.
195 */
196 bool checkConflictingSnoop(const PacketPtr pkt)
197 {
198 if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
199 DPRINTF(CachePort, "Waiting for snoop response to be "
200 "sent\n");
201 Tick when = snoopRespQueue.deferredPacketReadyTime();
202 schedSendEvent(when);
203 return true;
204 }
205 return false;
206 }
207 };
208
209
210 /**
211 * The memory-side port extends the base cache master port with
212 * access functions for functional, atomic and timing snoops.
213 */
214 class MemSidePort : public CacheMasterPort
215 {
216 private:
217
218 /** The cache-specific queue. */
219 CacheReqPacketQueue _reqQueue;
220
221 SnoopRespPacketQueue _snoopRespQueue;
222
223 // a pointer to our specific cache implementation
224 BaseCache *cache;
225
226 protected:
227
228 virtual void recvTimingSnoopReq(PacketPtr pkt);
229
230 virtual bool recvTimingResp(PacketPtr pkt);
231
232 virtual Tick recvAtomicSnoop(PacketPtr pkt);
233
234 virtual void recvFunctionalSnoop(PacketPtr pkt);
235
236 public:
237
238 MemSidePort(const std::string &_name, BaseCache *_cache,
239 const std::string &_label);
240 };
241
242 /**
243 * A cache slave port is used for the CPU-side port of the cache,
244 * and it is basically a simple timing port that uses a transmit
245 * list for responses to the CPU (or connected master). In
246 * addition, it has the functionality to block the port for
247 * incoming requests. If blocked, the port will issue a retry once
248 * unblocked.
249 */
250 class CacheSlavePort : public QueuedSlavePort
251 {
252
253 public:
254
255 /** Do not accept any new requests. */
256 void setBlocked();
257
258 /** Return to normal operation and accept new requests. */
259 void clearBlocked();
260
261 bool isBlocked() const { return blocked; }
262
263 protected:
264
265 CacheSlavePort(const std::string &_name, BaseCache *_cache,
266 const std::string &_label);
267
268 /** A normal packet queue used to store responses. */
269 RespPacketQueue queue;
270
271 bool blocked;
272
273 bool mustSendRetry;
274
275 private:
276
277 void processSendRetry();
278
279 EventFunctionWrapper sendRetryEvent;
280
281 };
282
283 /**
284 * The CPU-side port extends the base cache slave port with access
285 * functions for functional, atomic and timing requests.
286 */
287 class CpuSidePort : public CacheSlavePort
288 {
289 private:
290
291 // a pointer to our specific cache implementation
292 BaseCache *cache;
293
294 protected:
295 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
296
297 virtual bool tryTiming(PacketPtr pkt) override;
298
299 virtual bool recvTimingReq(PacketPtr pkt) override;
300
301 virtual Tick recvAtomic(PacketPtr pkt) override;
302
303 virtual void recvFunctional(PacketPtr pkt) override;
304
305 virtual AddrRangeList getAddrRanges() const override;
306
307 public:
308
309 CpuSidePort(const std::string &_name, BaseCache *_cache,
310 const std::string &_label);
311
312 };
313
314 CpuSidePort cpuSidePort;
315 MemSidePort memSidePort;
316
317 protected:
318
319 /** Miss status registers */
320 MSHRQueue mshrQueue;
321
322 /** Write/writeback buffer */
323 WriteQueue writeBuffer;
324
325 /** Tag and data Storage */
326 BaseTags *tags;
327
328 /** Compression method being used. */
329 BaseCacheCompressor* compressor;
330
331 /** Prefetcher */
332 BasePrefetcher *prefetcher;
333
334 /** To probe when a cache hit occurs */
335 ProbePointArg<PacketPtr> *ppHit;
336
337 /** To probe when a cache miss occurs */
338 ProbePointArg<PacketPtr> *ppMiss;
339
340 /** To probe when a cache fill occurs */
341 ProbePointArg<PacketPtr> *ppFill;
342
343 /**
344 * The writeAllocator drive optimizations for streaming writes.
345 * It first determines whether a WriteReq MSHR should be delayed,
346 * thus ensuring that we wait longer in cases when we are write
347 * coalescing and allowing all the bytes of the line to be written
348 * before the MSHR packet is sent downstream. This works in unison
349 * with the tracking in the MSHR to check if the entire line is
350 * written. The write mode also affects the behaviour on filling
351 * any whole-line writes. Normally the cache allocates the line
352 * when receiving the InvalidateResp, but after seeing enough
353 * consecutive lines we switch to using the tempBlock, and thus
354 * end up not allocating the line, and instead turning the
355 * whole-line write into a writeback straight away.
356 */
357 WriteAllocator * const writeAllocator;
358
359 /**
360 * Temporary cache block for occasional transitory use. We use
361 * the tempBlock to fill when allocation fails (e.g., when there
362 * is an outstanding request that accesses the victim block) or
363 * when we want to avoid allocation (e.g., exclusive caches)
364 */
365 TempCacheBlk *tempBlock;
366
367 /**
368 * Upstream caches need this packet until true is returned, so
369 * hold it for deletion until a subsequent call
370 */
371 std::unique_ptr<Packet> pendingDelete;
372
373 /**
374 * Mark a request as in service (sent downstream in the memory
375 * system), effectively making this MSHR the ordering point.
376 */
377 void markInService(MSHR *mshr, bool pending_modified_resp)
378 {
379 bool wasFull = mshrQueue.isFull();
380 mshrQueue.markInService(mshr, pending_modified_resp);
381
382 if (wasFull && !mshrQueue.isFull()) {
383 clearBlocked(Blocked_NoMSHRs);
384 }
385 }
386
387 void markInService(WriteQueueEntry *entry)
388 {
389 bool wasFull = writeBuffer.isFull();
390 writeBuffer.markInService(entry);
391
392 if (wasFull && !writeBuffer.isFull()) {
393 clearBlocked(Blocked_NoWBBuffers);
394 }
395 }
396
397 /**
398 * Determine whether we should allocate on a fill or not. If this
399 * cache is mostly inclusive with regards to the upstream cache(s)
400 * we always allocate (for any non-forwarded and cacheable
401 * requests). In the case of a mostly exclusive cache, we allocate
402 * on fill if the packet did not come from a cache, thus if we:
403 * are dealing with a whole-line write (the latter behaves much
404 * like a writeback), the original target packet came from a
405 * non-caching source, or if we are performing a prefetch or LLSC.
406 *
407 * @param cmd Command of the incoming requesting packet
408 * @return Whether we should allocate on the fill
409 */
410 inline bool allocOnFill(MemCmd cmd) const
411 {
412 return clusivity == Enums::mostly_incl ||
413 cmd == MemCmd::WriteLineReq ||
414 cmd == MemCmd::ReadReq ||
415 cmd == MemCmd::WriteReq ||
416 cmd.isPrefetch() ||
417 cmd.isLLSC();
418 }
419
420 /**
421 * Regenerate block address using tags.
422 * Block address regeneration depends on whether we're using a temporary
423 * block or not.
424 *
425 * @param blk The block to regenerate address.
426 * @return The block's address.
427 */
428 Addr regenerateBlkAddr(CacheBlk* blk);
429
430 /**
431 * Calculate latency of accesses that only touch the tag array.
432 * @sa calculateAccessLatency
433 *
434 * @param delay The delay until the packet's metadata is present.
435 * @param lookup_lat Latency of the respective tag lookup.
436 * @return The number of ticks that pass due to a tag-only access.
437 */
438 Cycles calculateTagOnlyLatency(const uint32_t delay,
439 const Cycles lookup_lat) const;
440 /**
441 * Calculate access latency in ticks given a tag lookup latency, and
442 * whether access was a hit or miss.
443 *
444 * @param blk The cache block that was accessed.
445 * @param delay The delay until the packet's metadata is present.
446 * @param lookup_lat Latency of the respective tag lookup.
447 * @return The number of ticks that pass due to a block access.
448 */
449 Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
450 const Cycles lookup_lat) const;
451
452 /**
453 * Does all the processing necessary to perform the provided request.
454 * @param pkt The memory request to perform.
455 * @param blk The cache block to be updated.
456 * @param lat The latency of the access.
457 * @return Boolean indicating whether the request was satisfied.
458 */
459 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat);
460
461 /*
462 * Handle a timing request that hit in the cache
463 *
464 * @param ptk The request packet
465 * @param blk The referenced block
466 * @param request_time The tick at which the block lookup is compete
467 */
468 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
469 Tick request_time);
470
471 /*
472 * Handle a timing request that missed in the cache
473 *
474 * Implementation specific handling for different cache
475 * implementations
476 *
477 * @param ptk The request packet
478 * @param blk The referenced block
479 * @param forward_time The tick at which we can process dependent requests
480 * @param request_time The tick at which the block lookup is compete
481 */
482 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
483 Tick forward_time,
484 Tick request_time) = 0;
485
486 /*
487 * Handle a timing request that missed in the cache
488 *
489 * Common functionality across different cache implementations
490 *
491 * @param ptk The request packet
492 * @param blk The referenced block
493 * @param mshr Any existing mshr for the referenced cache block
494 * @param forward_time The tick at which we can process dependent requests
495 * @param request_time The tick at which the block lookup is compete
496 */
497 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
498 Tick forward_time, Tick request_time);
499
500 /**
501 * Performs the access specified by the request.
502 * @param pkt The request to perform.
503 */
504 virtual void recvTimingReq(PacketPtr pkt);
505
506 /**
507 * Handling the special case of uncacheable write responses to
508 * make recvTimingResp less cluttered.
509 */
510 void handleUncacheableWriteResp(PacketPtr pkt);
511
512 /**
513 * Service non-deferred MSHR targets using the received response
514 *
515 * Iterates through the list of targets that can be serviced with
516 * the current response.
517 *
518 * @param mshr The MSHR that corresponds to the reponse
519 * @param pkt The response packet
520 * @param blk The reference block
521 */
522 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
523 CacheBlk *blk) = 0;
524
525 /**
526 * Handles a response (cache line fill/write ack) from the bus.
527 * @param pkt The response packet
528 */
529 virtual void recvTimingResp(PacketPtr pkt);
530
531 /**
532 * Snoops bus transactions to maintain coherence.
533 * @param pkt The current bus transaction.
534 */
535 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
536
537 /**
538 * Handle a snoop response.
539 * @param pkt Snoop response packet
540 */
541 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
542
543 /**
544 * Handle a request in atomic mode that missed in this cache
545 *
546 * Creates a downstream request, sends it to the memory below and
547 * handles the response. As we are in atomic mode all operations
548 * are performed immediately.
549 *
550 * @param pkt The packet with the requests
551 * @param blk The referenced block
552 * @return Cycles for handling the request
553 */
554 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk) = 0;
555
556 /**
557 * Performs the access specified by the request.
558 * @param pkt The request to perform.
559 * @return The number of ticks required for the access.
560 */
561 virtual Tick recvAtomic(PacketPtr pkt);
562
563 /**
564 * Snoop for the provided request in the cache and return the estimated
565 * time taken.
566 * @param pkt The memory request to snoop
567 * @return The number of ticks required for the snoop.
568 */
569 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
570
571 /**
572 * Performs the access specified by the request.
573 *
574 * @param pkt The request to perform.
575 * @param fromCpuSide from the CPU side port or the memory side port
576 */
577 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
578
579 /**
580 * Handle doing the Compare and Swap function for SPARC.
581 */
582 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
583
584 /**
585 * Return the next queue entry to service, either a pending miss
586 * from the MSHR queue, a buffered write from the write buffer, or
587 * something from the prefetcher. This function is responsible
588 * for prioritizing among those sources on the fly.
589 */
590 QueueEntry* getNextQueueEntry();
591
592 /**
593 * Insert writebacks into the write buffer
594 *
595 * @param pkt The writeback packet.
596 * @param forward_time Tick to which the writeback should be scheduled.
597 */
598 virtual void doWritebacks(PacketPtr pkt, Tick forward_time) = 0;
599
600 /**
601 * Send writebacks down the memory hierarchy in atomic mode.
602 *
603 * @param pkt The writeback packet.
604 */
605 virtual void doWritebacksAtomic(PacketPtr pkt) = 0;
606
607 /**
608 * Create an appropriate downstream bus request packet.
609 *
610 * Creates a new packet with the request to be send to the memory
611 * below, or nullptr if the current request in cpu_pkt should just
612 * be forwarded on.
613 *
614 * @param cpu_pkt The miss packet that needs to be satisfied.
615 * @param blk The referenced block, can be nullptr.
616 * @param needs_writable Indicates that the block must be writable
617 * even if the request in cpu_pkt doesn't indicate that.
618 * @param is_whole_line_write True if there are writes for the
619 * whole line
620 * @return A packet send to the memory below
621 */
622 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
623 bool needs_writable,
624 bool is_whole_line_write) const = 0;
625
626 /**
627 * Determine if clean lines should be written back or not. In
628 * cases where a downstream cache is mostly inclusive we likely
629 * want it to act as a victim cache also for lines that have not
630 * been modified. Hence, we cannot simply drop the line (or send a
631 * clean evict), but rather need to send the actual data.
632 */
633 const bool writebackClean;
634
635 /**
636 * Writebacks from the tempBlock, resulting on the response path
637 * in atomic mode, must happen after the call to recvAtomic has
638 * finished (for the right ordering of the packets). We therefore
639 * need to hold on to the packets, and have a method and an event
640 * to send them.
641 */
642 PacketPtr tempBlockWriteback;
643
644 /**
645 * Send the outstanding tempBlock writeback. To be called after
646 * recvAtomic finishes in cases where the block we filled is in
647 * fact the tempBlock, and now needs to be written back.
648 */
649 void writebackTempBlockAtomic() {
650 assert(tempBlockWriteback != nullptr);
651 doWritebacksAtomic(tempBlockWriteback);
652 tempBlockWriteback = nullptr;
653 }
654
655 /**
656 * An event to writeback the tempBlock after recvAtomic
657 * finishes. To avoid other calls to recvAtomic getting in
658 * between, we create this event with a higher priority.
659 */
660 EventFunctionWrapper writebackTempBlockAtomicEvent;
661
662 /**
663 * When a block is overwriten, its compression information must be updated,
664 * and it may need to be recompressed. If the compression size changes, the
665 * block may either become smaller, in which case there is no side effect,
666 * or bigger (data expansion; fat write), in which case the block might not
667 * fit in its current location anymore. If that happens, there are usually
668 * two options to be taken:
669 *
670 * - The co-allocated blocks must be evicted to make room for this block.
671 * Simpler, but ignores replacement data.
672 * - The block itself is moved elsewhere (used in policies where the CF
673 * determines the location of the block).
674 *
675 * This implementation uses the first approach.
676 *
677 * Notice that this is only called for writebacks, which means that L1
678 * caches (which see regular Writes), do not support compression.
679 * @sa CompressedTags
680 *
681 * @param blk The block to be overwriten.
682 * @param data A pointer to the data to be compressed (blk's new data).
683 * @param delay The delay until the packet's metadata is present.
684 * @param tag_latency Latency to access the tags of the replacement victim.
685 * @return Whether operation is successful or not.
686 */
687 bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
688 uint32_t delay, Cycles tag_latency);
689
690 /**
691 * Perform any necessary updates to the block and perform any data
692 * exchange between the packet and the block. The flags of the
693 * packet are also set accordingly.
694 *
695 * @param pkt Request packet from upstream that hit a block
696 * @param blk Cache block that the packet hit
697 * @param deferred_response Whether this request originally missed
698 * @param pending_downgrade Whether the writable flag is to be removed
699 */
700 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
701 bool deferred_response = false,
702 bool pending_downgrade = false);
703
704 /**
705 * Maintain the clusivity of this cache by potentially
706 * invalidating a block. This method works in conjunction with
707 * satisfyRequest, but is separate to allow us to handle all MSHR
708 * targets before potentially dropping a block.
709 *
710 * @param from_cache Whether we have dealt with a packet from a cache
711 * @param blk The block that should potentially be dropped
712 */
713 void maintainClusivity(bool from_cache, CacheBlk *blk);
714
715 /**
716 * Handle a fill operation caused by a received packet.
717 *
718 * Populates a cache block and handles all outstanding requests for the
719 * satisfied fill request. This version takes two memory requests. One
720 * contains the fill data, the other is an optional target to satisfy.
721 *
722 * @param pkt The memory request with the fill data.
723 * @param blk The cache block if it already exists.
724 * @param allocate Whether to allocate a block or use the temp block
725 * @return Pointer to the new cache block.
726 */
727 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, bool allocate);
728
729 /**
730 * Allocate a new block for the packet's data. The victim block might be
731 * valid, and thus the necessary writebacks are done. May return nullptr
732 * if there are no replaceable blocks. If a replaceable block is found,
733 * it inserts the new block in its place. The new block, however, is not
734 * set as valid yet.
735 *
736 * @param pkt Packet holding the address to update
737 * @param tag_latency Latency to access the tags of the replacement victim.
738 * @return the allocated block
739 */
740 CacheBlk *allocateBlock(const PacketPtr pkt, Cycles tag_latency);
741
742 /**
743 * Evict a cache block.
744 *
745 * Performs a writeback if necesssary and invalidates the block
746 *
747 * @param blk Block to invalidate
748 * @return A packet with the writeback, can be nullptr
749 */
750 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
751
752 /**
753 * Evict a cache block.
754 *
755 * Performs a writeback if necesssary and invalidates the block
756 *
757 * @param blk Block to invalidate
758 * @param forward_time Tick to which the writeback should be scheduled if
759 * in timing mode.
760 */
761 void evictBlock(CacheBlk *blk, Tick forward_time);
762
763 /**
764 * Invalidate a cache block.
765 *
766 * @param blk Block to invalidate
767 */
768 void invalidateBlock(CacheBlk *blk);
769
770 /**
771 * Create a writeback request for the given block.
772 *
773 * @param blk The block to writeback.
774 * @return The writeback request for the block.
775 */
776 PacketPtr writebackBlk(CacheBlk *blk);
777
778 /**
779 * Create a writeclean request for the given block.
780 *
781 * Creates a request that writes the block to the cache below
782 * without evicting the block from the current cache.
783 *
784 * @param blk The block to write clean.
785 * @param dest The destination of the write clean operation.
786 * @param id Use the given packet id for the write clean operation.
787 * @return The generated write clean packet.
788 */
789 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
790
791 /**
792 * Write back dirty blocks in the cache using functional accesses.
793 */
794 virtual void memWriteback() override;
795
796 /**
797 * Invalidates all blocks in the cache.
798 *
799 * @warn Dirty cache lines will not be written back to
800 * memory. Make sure to call functionalWriteback() first if you
801 * want the to write them to memory.
802 */
803 virtual void memInvalidate() override;
804
805 /**
806 * Determine if there are any dirty blocks in the cache.
807 *
808 * @return true if at least one block is dirty, false otherwise.
809 */
810 bool isDirty() const;
811
812 /**
813 * Determine if an address is in the ranges covered by this
814 * cache. This is useful to filter snoops.
815 *
816 * @param addr Address to check against
817 *
818 * @return If the address in question is in range
819 */
820 bool inRange(Addr addr) const;
821
822 /**
823 * Find next request ready time from among possible sources.
824 */
825 Tick nextQueueReadyTime() const;
826
827 /** Block size of this cache */
828 const unsigned blkSize;
829
830 /**
831 * The latency of tag lookup of a cache. It occurs when there is
832 * an access to the cache.
833 */
834 const Cycles lookupLatency;
835
836 /**
837 * The latency of data access of a cache. It occurs when there is
838 * an access to the cache.
839 */
840 const Cycles dataLatency;
841
842 /**
843 * This is the forward latency of the cache. It occurs when there
844 * is a cache miss and a request is forwarded downstream, in
845 * particular an outbound miss.
846 */
847 const Cycles forwardLatency;
848
849 /** The latency to fill a cache block */
850 const Cycles fillLatency;
851
852 /**
853 * The latency of sending reponse to its upper level cache/core on
854 * a linefill. The responseLatency parameter captures this
855 * latency.
856 */
857 const Cycles responseLatency;
858
859 /**
860 * Whether tags and data are accessed sequentially.
861 */
862 const bool sequentialAccess;
863
864 /** The number of targets for each MSHR. */
865 const int numTarget;
866
867 /** Do we forward snoops from mem side port through to cpu side port? */
868 bool forwardSnoops;
869
870 /**
871 * Clusivity with respect to the upstream cache, determining if we
872 * fill into both this cache and the cache above on a miss. Note
873 * that we currently do not support strict clusivity policies.
874 */
875 const Enums::Clusivity clusivity;
876
877 /**
878 * Is this cache read only, for example the instruction cache, or
879 * table-walker cache. A cache that is read only should never see
880 * any writes, and should never get any dirty data (and hence
881 * never have to do any writebacks).
882 */
883 const bool isReadOnly;
884
885 /**
886 * Bit vector of the blocking reasons for the access path.
887 * @sa #BlockedCause
888 */
889 uint8_t blocked;
890
891 /** Increasing order number assigned to each incoming request. */
892 uint64_t order;
893
894 /** Stores time the cache blocked for statistics. */
895 Cycles blockedCycle;
896
897 /** Pointer to the MSHR that has no targets. */
898 MSHR *noTargetMSHR;
899
900 /** The number of misses to trigger an exit event. */
901 Counter missCount;
902
903 /**
904 * The address range to which the cache responds on the CPU side.
905 * Normally this is all possible memory addresses. */
906 const AddrRangeList addrRanges;
907
908 public:
909 /** System we are currently operating in. */
910 System *system;
911
912 // Statistics
913 /**
914 * @addtogroup CacheStatistics
915 * @{
916 */
917
918 /** Number of hits per thread for each type of command.
919 @sa Packet::Command */
920 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
921 /** Number of hits for demand accesses. */
922 Stats::Formula demandHits;
923 /** Number of hit for all accesses. */
924 Stats::Formula overallHits;
925
926 /** Number of misses per thread for each type of command.
927 @sa Packet::Command */
928 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
929 /** Number of misses for demand accesses. */
930 Stats::Formula demandMisses;
931 /** Number of misses for all accesses. */
932 Stats::Formula overallMisses;
933
934 /**
935 * Total number of cycles per thread/command spent waiting for a miss.
936 * Used to calculate the average miss latency.
937 */
938 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
939 /** Total number of cycles spent waiting for demand misses. */
940 Stats::Formula demandMissLatency;
941 /** Total number of cycles spent waiting for all misses. */
942 Stats::Formula overallMissLatency;
943
944 /** The number of accesses per command and thread. */
945 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
946 /** The number of demand accesses. */
947 Stats::Formula demandAccesses;
948 /** The number of overall accesses. */
949 Stats::Formula overallAccesses;
950
951 /** The miss rate per command and thread. */
952 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
953 /** The miss rate of all demand accesses. */
954 Stats::Formula demandMissRate;
955 /** The miss rate for all accesses. */
956 Stats::Formula overallMissRate;
957
958 /** The average miss latency per command and thread. */
959 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
960 /** The average miss latency for demand misses. */
961 Stats::Formula demandAvgMissLatency;
962 /** The average miss latency for all misses. */
963 Stats::Formula overallAvgMissLatency;
964
965 /** The total number of cycles blocked for each blocked cause. */
966 Stats::Vector blocked_cycles;
967 /** The number of times this cache blocked for each blocked cause. */
968 Stats::Vector blocked_causes;
969
970 /** The average number of cycles blocked for each blocked cause. */
971 Stats::Formula avg_blocked;
972
973 /** The number of times a HW-prefetched block is evicted w/o reference. */
974 Stats::Scalar unusedPrefetches;
975
976 /** Number of blocks written back per thread. */
977 Stats::Vector writebacks;
978
979 /** Number of misses that hit in the MSHRs per command and thread. */
980 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
981 /** Demand misses that hit in the MSHRs. */
982 Stats::Formula demandMshrHits;
983 /** Total number of misses that hit in the MSHRs. */
984 Stats::Formula overallMshrHits;
985
986 /** Number of misses that miss in the MSHRs, per command and thread. */
987 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
988 /** Demand misses that miss in the MSHRs. */
989 Stats::Formula demandMshrMisses;
990 /** Total number of misses that miss in the MSHRs. */
991 Stats::Formula overallMshrMisses;
992
993 /** Number of misses that miss in the MSHRs, per command and thread. */
994 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
995 /** Total number of misses that miss in the MSHRs. */
996 Stats::Formula overallMshrUncacheable;
997
998 /** Total cycle latency of each MSHR miss, per command and thread. */
999 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
1000 /** Total cycle latency of demand MSHR misses. */
1001 Stats::Formula demandMshrMissLatency;
1002 /** Total cycle latency of overall MSHR misses. */
1003 Stats::Formula overallMshrMissLatency;
1004
1005 /** Total cycle latency of each MSHR miss, per command and thread. */
1006 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
1007 /** Total cycle latency of overall MSHR misses. */
1008 Stats::Formula overallMshrUncacheableLatency;
1009
1010 /** The miss rate in the MSHRs pre command and thread. */
1011 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
1012 /** The demand miss rate in the MSHRs. */
1013 Stats::Formula demandMshrMissRate;
1014 /** The overall miss rate in the MSHRs. */
1015 Stats::Formula overallMshrMissRate;
1016
1017 /** The average latency of an MSHR miss, per command and thread. */
1018 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
1019 /** The average latency of a demand MSHR miss. */
1020 Stats::Formula demandAvgMshrMissLatency;
1021 /** The average overall latency of an MSHR miss. */
1022 Stats::Formula overallAvgMshrMissLatency;
1023
1024 /** The average latency of an MSHR miss, per command and thread. */
1025 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
1026 /** The average overall latency of an MSHR miss. */
1027 Stats::Formula overallAvgMshrUncacheableLatency;
1028
1029 /** Number of replacements of valid blocks. */
1030 Stats::Scalar replacements;
1031
1032 /** Number of data expansions. */
1033 Stats::Scalar dataExpansions;
1034
1035 /**
1036 * @}
1037 */
1038
1039 /**
1040 * Register stats for this object.
1041 */
1042 void regStats() override;
1043
1044 /** Registers probes. */
1045 void regProbePoints() override;
1046
1047 public:
1048 BaseCache(const BaseCacheParams *p, unsigned blk_size);
1049 ~BaseCache();
1050
1051 void init() override;
1052
1053 Port &getPort(const std::string &if_name,
1054 PortID idx=InvalidPortID) override;
1055
1056 /**
1057 * Query block size of a cache.
1058 * @return The block size
1059 */
1060 unsigned
1061 getBlockSize() const
1062 {
1063 return blkSize;
1064 }
1065
1066 const AddrRangeList &getAddrRanges() const { return addrRanges; }
1067
1068 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1069 {
1070 MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1071 pkt, time, order++,
1072 allocOnFill(pkt->cmd));
1073
1074 if (mshrQueue.isFull()) {
1075 setBlocked((BlockedCause)MSHRQueue_MSHRs);
1076 }
1077
1078 if (sched_send) {
1079 // schedule the send
1080 schedMemSideSendEvent(time);
1081 }
1082
1083 return mshr;
1084 }
1085
1086 void allocateWriteBuffer(PacketPtr pkt, Tick time)
1087 {
1088 // should only see writes or clean evicts here
1089 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1090
1091 Addr blk_addr = pkt->getBlockAddr(blkSize);
1092
1093 // If using compression, on evictions the block is decompressed and
1094 // the operation's latency is added to the payload delay. Consume
1095 // that payload delay here, meaning that the data is always stored
1096 // uncompressed in the writebuffer
1097 if (compressor) {
1098 time += pkt->payloadDelay;
1099 pkt->payloadDelay = 0;
1100 }
1101
1102 WriteQueueEntry *wq_entry =
1103 writeBuffer.findMatch(blk_addr, pkt->isSecure());
1104 if (wq_entry && !wq_entry->inService) {
1105 DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1106 }
1107
1108 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1109
1110 if (writeBuffer.isFull()) {
1111 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1112 }
1113
1114 // schedule the send
1115 schedMemSideSendEvent(time);
1116 }
1117
1118 /**
1119 * Returns true if the cache is blocked for accesses.
1120 */
1121 bool isBlocked() const
1122 {
1123 return blocked != 0;
1124 }
1125
1126 /**
1127 * Marks the access path of the cache as blocked for the given cause. This
1128 * also sets the blocked flag in the slave interface.
1129 * @param cause The reason for the cache blocking.
1130 */
1131 void setBlocked(BlockedCause cause)
1132 {
1133 uint8_t flag = 1 << cause;
1134 if (blocked == 0) {
1135 blocked_causes[cause]++;
1136 blockedCycle = curCycle();
1137 cpuSidePort.setBlocked();
1138 }
1139 blocked |= flag;
1140 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1141 }
1142
1143 /**
1144 * Marks the cache as unblocked for the given cause. This also clears the
1145 * blocked flags in the appropriate interfaces.
1146 * @param cause The newly unblocked cause.
1147 * @warning Calling this function can cause a blocked request on the bus to
1148 * access the cache. The cache must be in a state to handle that request.
1149 */
1150 void clearBlocked(BlockedCause cause)
1151 {
1152 uint8_t flag = 1 << cause;
1153 blocked &= ~flag;
1154 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1155 if (blocked == 0) {
1156 blocked_cycles[cause] += curCycle() - blockedCycle;
1157 cpuSidePort.clearBlocked();
1158 }
1159 }
1160
1161 /**
1162 * Schedule a send event for the memory-side port. If already
1163 * scheduled, this may reschedule the event at an earlier
1164 * time. When the specified time is reached, the port is free to
1165 * send either a response, a request, or a prefetch request.
1166 *
1167 * @param time The time when to attempt sending a packet.
1168 */
1169 void schedMemSideSendEvent(Tick time)
1170 {
1171 memSidePort.schedSendEvent(time);
1172 }
1173
1174 bool inCache(Addr addr, bool is_secure) const {
1175 return tags->findBlock(addr, is_secure);
1176 }
1177
1178 bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1179 CacheBlk *block = tags->findBlock(addr, is_secure);
1180 if (block) {
1181 return block->wasPrefetched();
1182 } else {
1183 return false;
1184 }
1185 }
1186
1187 bool inMissQueue(Addr addr, bool is_secure) const {
1188 return mshrQueue.findMatch(addr, is_secure);
1189 }
1190
1191 void incMissCount(PacketPtr pkt)
1192 {
1193 assert(pkt->req->masterId() < system->maxMasters());
1194 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1195 pkt->req->incAccessDepth();
1196 if (missCount) {
1197 --missCount;
1198 if (missCount == 0)
1199 exitSimLoop("A cache reached the maximum miss count");
1200 }
1201 }
1202 void incHitCount(PacketPtr pkt)
1203 {
1204 assert(pkt->req->masterId() < system->maxMasters());
1205 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1206
1207 }
1208
1209 /**
1210 * Checks if the cache is coalescing writes
1211 *
1212 * @return True if the cache is coalescing writes
1213 */
1214 bool coalesce() const;
1215
1216
1217 /**
1218 * Cache block visitor that writes back dirty cache blocks using
1219 * functional writes.
1220 */
1221 void writebackVisitor(CacheBlk &blk);
1222
1223 /**
1224 * Cache block visitor that invalidates all blocks in the cache.
1225 *
1226 * @warn Dirty cache lines will not be written back to memory.
1227 */
1228 void invalidateVisitor(CacheBlk &blk);
1229
1230 /**
1231 * Take an MSHR, turn it into a suitable downstream packet, and
1232 * send it out. This construct allows a queue entry to choose a suitable
1233 * approach based on its type.
1234 *
1235 * @param mshr The MSHR to turn into a packet and send
1236 * @return True if the port is waiting for a retry
1237 */
1238 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1239
1240 /**
1241 * Similar to sendMSHR, but for a write-queue entry
1242 * instead. Create the packet, and send it, and if successful also
1243 * mark the entry in service.
1244 *
1245 * @param wq_entry The write-queue entry to turn into a packet and send
1246 * @return True if the port is waiting for a retry
1247 */
1248 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1249
1250 /**
1251 * Serialize the state of the caches
1252 *
1253 * We currently don't support checkpointing cache state, so this panics.
1254 */
1255 void serialize(CheckpointOut &cp) const override;
1256 void unserialize(CheckpointIn &cp) override;
1257};
1258
1259/**
1260 * The write allocator inspects write packets and detects streaming
1261 * patterns. The write allocator supports a single stream where writes
1262 * are expected to access consecutive locations and keeps track of
1263 * size of the area covered by the concecutive writes in byteCount.
1264 *
1265 * 1) When byteCount has surpassed the coallesceLimit the mode
1266 * switches from ALLOCATE to COALESCE where writes should be delayed
1267 * until the whole block is written at which point a single packet
1268 * (whole line write) can service them.
1269 *
1270 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1271 * line) we switch to NO_ALLOCATE when writes should not allocate in
1272 * the cache but rather send a whole line write to the memory below.
1273 */
1274class WriteAllocator : public SimObject {
1275 public:
1276 WriteAllocator(const WriteAllocatorParams *p) :
1277 SimObject(p),
1278 coalesceLimit(p->coalesce_limit * p->block_size),
1279 noAllocateLimit(p->no_allocate_limit * p->block_size),
1280 delayThreshold(p->delay_threshold)
1281 {
1282 reset();
1283 }
1284
1285 /**
1286 * Should writes be coalesced? This is true if the mode is set to
1287 * NO_ALLOCATE.
1288 *
1289 * @return return true if the cache should coalesce writes.
1290 */
1291 bool coalesce() const {
1292 return mode != WriteMode::ALLOCATE;
1293 }
1294
1295 /**
1296 * Should writes allocate?
1297 *
1298 * @return return true if the cache should not allocate for writes.
1299 */
1300 bool allocate() const {
1301 return mode != WriteMode::NO_ALLOCATE;
1302 }
1303
1304 /**
1305 * Reset the write allocator state, meaning that it allocates for
1306 * writes and has not recorded any information about qualifying
1307 * writes that might trigger a switch to coalescing and later no
1308 * allocation.
1309 */
1310 void reset() {
1311 mode = WriteMode::ALLOCATE;
1312 byteCount = 0;
1313 nextAddr = 0;
1314 }
1315
1316 /**
1317 * Access whether we need to delay the current write.
1318 *
1319 * @param blk_addr The block address the packet writes to
1320 * @return true if the current packet should be delayed
1321 */
1322 bool delay(Addr blk_addr) {
1323 if (delayCtr[blk_addr] > 0) {
1324 --delayCtr[blk_addr];
1325 return true;
1326 } else {
1327 return false;
1328 }
1329 }
1330
1331 /**
1332 * Clear delay counter for the input block
1333 *
1334 * @param blk_addr The accessed cache block
1335 */
1336 void resetDelay(Addr blk_addr) {
1337 delayCtr.erase(blk_addr);
1338 }
1339
1340 /**
1341 * Update the write mode based on the current write
1342 * packet. This method compares the packet's address with any
1343 * current stream, and updates the tracking and the mode
1344 * accordingly.
1345 *
1346 * @param write_addr Start address of the write request
1347 * @param write_size Size of the write request
1348 * @param blk_addr The block address that this packet writes to
1349 */
1350 void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1351
1352 private:
1353 /**
1354 * The current mode for write coalescing and allocation, either
1355 * normal operation (ALLOCATE), write coalescing (COALESCE), or
1356 * write coalescing without allocation (NO_ALLOCATE).
1357 */
1358 enum class WriteMode : char {
1359 ALLOCATE,
1360 COALESCE,
1361 NO_ALLOCATE,
1362 };
1363 WriteMode mode;
1364
1365 /** Address to match writes against to detect streams. */
1366 Addr nextAddr;
1367
1368 /**
1369 * Bytes written contiguously. Saturating once we no longer
1370 * allocate.
1371 */
1372 uint32_t byteCount;
1373
1374 /**
1375 * Limits for when to switch between the different write modes.
1376 */
1377 const uint32_t coalesceLimit;
1378 const uint32_t noAllocateLimit;
1379 /**
1380 * The number of times the allocator will delay an WriteReq MSHR.
1381 */
1382 const uint32_t delayThreshold;
1383
1384 /**
1385 * Keep track of the number of times the allocator has delayed an
1386 * WriteReq MSHR.
1387 */
1388 std::unordered_map<Addr, Counter> delayCtr;
1389};
1390
1391#endif //__MEM_CACHE_BASE_HH__