base.hh (12702:27cb33a96e0f) base.hh (12724:4f6fac3191d2)
1/*
1/*
2 * Copyright (c) 2012-2013, 2015-2016 ARM Limited
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 24 unchanged lines hidden (view full) ---

35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 24 unchanged lines hidden (view full) ---

35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 * Andreas Hansson
44 * Nikos Nikoleris
43 */
44
45/**
46 * @file
47 * Declares a basic cache interface BaseCache.
48 */
49
50#ifndef __MEM_CACHE_BASE_HH__
51#define __MEM_CACHE_BASE_HH__
52
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
53#include <algorithm>
54#include <list>
55#include <cassert>
56#include <cstdint>
55#include <string>
57#include <string>
56#include <vector>
57
58
58#include "base/logging.hh"
59#include "base/addr_range.hh"
59#include "base/statistics.hh"
60#include "base/trace.hh"
61#include "base/types.hh"
62#include "debug/Cache.hh"
63#include "debug/CachePort.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/blk.hh"
64#include "mem/cache/mshr_queue.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
65#include "mem/cache/write_queue.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
66#include "mem/mem_object.hh"
67#include "mem/packet.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
68#include "mem/qport.hh"
69#include "mem/request.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
70#include "params/BaseCache.hh"
71#include "sim/eventq.hh"
76#include "sim/eventq.hh"
72#include "sim/full_system.hh"
77#include "sim/serialize.hh"
73#include "sim/sim_exit.hh"
74#include "sim/system.hh"
75
78#include "sim/sim_exit.hh"
79#include "sim/system.hh"
80
81class BaseMasterPort;
82class BasePrefetcher;
83class BaseSlavePort;
84class MSHR;
85class MasterPort;
86class QueueEntry;
87struct BaseCacheParams;
88
76/**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79class BaseCache : public MemObject
80{
81 protected:
82 /**
83 * Indexes to enumerate the MSHR queues.

--- 52 unchanged lines hidden (view full) ---

136 * Memory-side port always snoops.
137 *
138 * @return always true
139 */
140 virtual bool isSnooping() const { return true; }
141 };
142
143 /**
89/**
90 * A basic cache interface. Implements some common functions for speed.
91 */
92class BaseCache : public MemObject
93{
94 protected:
95 /**
96 * Indexes to enumerate the MSHR queues.

--- 52 unchanged lines hidden (view full) ---

149 * Memory-side port always snoops.
150 *
151 * @return always true
152 */
153 virtual bool isSnooping() const { return true; }
154 };
155
156 /**
157 * Override the default behaviour of sendDeferredPacket to enable
158 * the memory-side cache port to also send requests based on the
159 * current MSHR status. This queue has a pointer to our specific
160 * cache implementation and is used by the MemSidePort.
161 */
162 class CacheReqPacketQueue : public ReqPacketQueue
163 {
164
165 protected:
166
167 BaseCache &cache;
168 SnoopRespPacketQueue &snoopRespQueue;
169
170 public:
171
172 CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
173 SnoopRespPacketQueue &snoop_resp_queue,
174 const std::string &label) :
175 ReqPacketQueue(cache, port, label), cache(cache),
176 snoopRespQueue(snoop_resp_queue) { }
177
178 /**
179 * Override the normal sendDeferredPacket and do not only
180 * consider the transmit list (used for responses), but also
181 * requests.
182 */
183 virtual void sendDeferredPacket();
184
185 /**
186 * Check if there is a conflicting snoop response about to be
187 * send out, and if so simply stall any requests, and schedule
188 * a send event at the same time as the next snoop response is
189 * being sent out.
190 */
191 bool checkConflictingSnoop(Addr addr)
192 {
193 if (snoopRespQueue.hasAddr(addr)) {
194 DPRINTF(CachePort, "Waiting for snoop response to be "
195 "sent\n");
196 Tick when = snoopRespQueue.deferredPacketReadyTime();
197 schedSendEvent(when);
198 return true;
199 }
200 return false;
201 }
202 };
203
204
205 /**
206 * The memory-side port extends the base cache master port with
207 * access functions for functional, atomic and timing snoops.
208 */
209 class MemSidePort : public CacheMasterPort
210 {
211 private:
212
213 /** The cache-specific queue. */
214 CacheReqPacketQueue _reqQueue;
215
216 SnoopRespPacketQueue _snoopRespQueue;
217
218 // a pointer to our specific cache implementation
219 BaseCache *cache;
220
221 protected:
222
223 virtual void recvTimingSnoopReq(PacketPtr pkt);
224
225 virtual bool recvTimingResp(PacketPtr pkt);
226
227 virtual Tick recvAtomicSnoop(PacketPtr pkt);
228
229 virtual void recvFunctionalSnoop(PacketPtr pkt);
230
231 public:
232
233 MemSidePort(const std::string &_name, BaseCache *_cache,
234 const std::string &_label);
235 };
236
237 /**
144 * A cache slave port is used for the CPU-side port of the cache,
145 * and it is basically a simple timing port that uses a transmit
146 * list for responses to the CPU (or connected master). In
147 * addition, it has the functionality to block the port for
148 * incoming requests. If blocked, the port will issue a retry once
149 * unblocked.
150 */
151 class CacheSlavePort : public QueuedSlavePort

--- 24 unchanged lines hidden (view full) ---

176 private:
177
178 void processSendRetry();
179
180 EventFunctionWrapper sendRetryEvent;
181
182 };
183
238 * A cache slave port is used for the CPU-side port of the cache,
239 * and it is basically a simple timing port that uses a transmit
240 * list for responses to the CPU (or connected master). In
241 * addition, it has the functionality to block the port for
242 * incoming requests. If blocked, the port will issue a retry once
243 * unblocked.
244 */
245 class CacheSlavePort : public QueuedSlavePort

--- 24 unchanged lines hidden (view full) ---

270 private:
271
272 void processSendRetry();
273
274 EventFunctionWrapper sendRetryEvent;
275
276 };
277
184 CacheSlavePort *cpuSidePort;
185 CacheMasterPort *memSidePort;
278 /**
279 * The CPU-side port extends the base cache slave port with access
280 * functions for functional, atomic and timing requests.
281 */
282 class CpuSidePort : public CacheSlavePort
283 {
284 private:
186
285
286 // a pointer to our specific cache implementation
287 BaseCache *cache;
288
289 protected:
290 virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
291
292 virtual bool tryTiming(PacketPtr pkt) override;
293
294 virtual bool recvTimingReq(PacketPtr pkt) override;
295
296 virtual Tick recvAtomic(PacketPtr pkt) override;
297
298 virtual void recvFunctional(PacketPtr pkt) override;
299
300 virtual AddrRangeList getAddrRanges() const override;
301
302 public:
303
304 CpuSidePort(const std::string &_name, BaseCache *_cache,
305 const std::string &_label);
306
307 };
308
309 CpuSidePort cpuSidePort;
310 MemSidePort memSidePort;
311
187 protected:
188
189 /** Miss status registers */
190 MSHRQueue mshrQueue;
191
192 /** Write/writeback buffer */
193 WriteQueue writeBuffer;
194
312 protected:
313
314 /** Miss status registers */
315 MSHRQueue mshrQueue;
316
317 /** Write/writeback buffer */
318 WriteQueue writeBuffer;
319
320 /** Tag and data Storage */
321 BaseTags *tags;
322
323 /** Prefetcher */
324 BasePrefetcher *prefetcher;
325
195 /**
326 /**
327 * Notify the prefetcher on every access, not just misses.
328 */
329 const bool prefetchOnAccess;
330
331 /**
332 * Temporary cache block for occasional transitory use. We use
333 * the tempBlock to fill when allocation fails (e.g., when there
334 * is an outstanding request that accesses the victim block) or
335 * when we want to avoid allocation (e.g., exclusive caches)
336 */
337 CacheBlk *tempBlock;
338
339 /**
340 * Upstream caches need this packet until true is returned, so
341 * hold it for deletion until a subsequent call
342 */
343 std::unique_ptr<Packet> pendingDelete;
344
345 /**
196 * Mark a request as in service (sent downstream in the memory
197 * system), effectively making this MSHR the ordering point.
198 */
199 void markInService(MSHR *mshr, bool pending_modified_resp)
200 {
201 bool wasFull = mshrQueue.isFull();
202 mshrQueue.markInService(mshr, pending_modified_resp);
203

--- 8 unchanged lines hidden (view full) ---

212 writeBuffer.markInService(entry);
213
214 if (wasFull && !writeBuffer.isFull()) {
215 clearBlocked(Blocked_NoWBBuffers);
216 }
217 }
218
219 /**
346 * Mark a request as in service (sent downstream in the memory
347 * system), effectively making this MSHR the ordering point.
348 */
349 void markInService(MSHR *mshr, bool pending_modified_resp)
350 {
351 bool wasFull = mshrQueue.isFull();
352 mshrQueue.markInService(mshr, pending_modified_resp);
353

--- 8 unchanged lines hidden (view full) ---

362 writeBuffer.markInService(entry);
363
364 if (wasFull && !writeBuffer.isFull()) {
365 clearBlocked(Blocked_NoWBBuffers);
366 }
367 }
368
369 /**
220 * Determine if we should allocate on a fill or not.
370 * Determine whether we should allocate on a fill or not. If this
371 * cache is mostly inclusive with regards to the upstream cache(s)
372 * we always allocate (for any non-forwarded and cacheable
373 * requests). In the case of a mostly exclusive cache, we allocate
374 * on fill if the packet did not come from a cache, thus if we:
375 * are dealing with a whole-line write (the latter behaves much
376 * like a writeback), the original target packet came from a
377 * non-caching source, or if we are performing a prefetch or LLSC.
221 *
378 *
222 * @param cmd Packet command being added as an MSHR target
379 * @param cmd Command of the incoming requesting packet
380 * @return Whether we should allocate on the fill
381 */
382 inline bool allocOnFill(MemCmd cmd) const
383 {
384 return clusivity == Enums::mostly_incl ||
385 cmd == MemCmd::WriteLineReq ||
386 cmd == MemCmd::ReadReq ||
387 cmd == MemCmd::WriteReq ||
388 cmd.isPrefetch() ||
389 cmd.isLLSC();
390 }
391
392 /**
393 * Does all the processing necessary to perform the provided request.
394 * @param pkt The memory request to perform.
395 * @param blk The cache block to be updated.
396 * @param lat The latency of the access.
397 * @param writebacks List for any writebacks that need to be performed.
398 * @return Boolean indicating whether the request was satisfied.
399 */
400 virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
401 PacketList &writebacks);
402
403 /*
404 * Handle a timing request that hit in the cache
223 *
405 *
224 * @return Whether we should allocate on a fill or not
406 * @param ptk The request packet
407 * @param blk The referenced block
408 * @param request_time The tick at which the block lookup is compete
225 */
409 */
226 virtual bool allocOnFill(MemCmd cmd) const = 0;
410 virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
411 Tick request_time);
227
412
413 /*
414 * Handle a timing request that missed in the cache
415 *
416 * Implementation specific handling for different cache
417 * implementations
418 *
419 * @param ptk The request packet
420 * @param blk The referenced block
421 * @param forward_time The tick at which we can process dependent requests
422 * @param request_time The tick at which the block lookup is compete
423 */
424 virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
425 Tick forward_time,
426 Tick request_time) = 0;
427
428 /*
429 * Handle a timing request that missed in the cache
430 *
431 * Common functionality across different cache implementations
432 *
433 * @param ptk The request packet
434 * @param blk The referenced block
435 * @param mshr Any existing mshr for the referenced cache block
436 * @param forward_time The tick at which we can process dependent requests
437 * @param request_time The tick at which the block lookup is compete
438 */
439 void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
440 Tick forward_time, Tick request_time);
441
228 /**
442 /**
443 * Performs the access specified by the request.
444 * @param pkt The request to perform.
445 */
446 virtual void recvTimingReq(PacketPtr pkt);
447
448 /**
449 * Handling the special case of uncacheable write responses to
450 * make recvTimingResp less cluttered.
451 */
452 void handleUncacheableWriteResp(PacketPtr pkt);
453
454 /**
455 * Service non-deferred MSHR targets using the received response
456 *
457 * Iterates through the list of targets that can be serviced with
458 * the current response. Any writebacks that need to performed
459 * must be appended to the writebacks parameter.
460 *
461 * @param mshr The MSHR that corresponds to the reponse
462 * @param pkt The response packet
463 * @param blk The reference block
464 * @param writebacks List of writebacks that need to be performed
465 */
466 virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
467 CacheBlk *blk, PacketList& writebacks) = 0;
468
469 /**
470 * Handles a response (cache line fill/write ack) from the bus.
471 * @param pkt The response packet
472 */
473 virtual void recvTimingResp(PacketPtr pkt);
474
475 /**
476 * Snoops bus transactions to maintain coherence.
477 * @param pkt The current bus transaction.
478 */
479 virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
480
481 /**
482 * Handle a snoop response.
483 * @param pkt Snoop response packet
484 */
485 virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
486
487 /**
488 * Handle a request in atomic mode that missed in this cache
489 *
490 * Creates a downstream request, sends it to the memory below and
491 * handles the response. As we are in atomic mode all operations
492 * are performed immediately.
493 *
494 * @param pkt The packet with the requests
495 * @param blk The referenced block
496 * @param writebacks A list with packets for any performed writebacks
497 * @return Cycles for handling the request
498 */
499 virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
500 PacketList &writebacks) = 0;
501
502 /**
503 * Performs the access specified by the request.
504 * @param pkt The request to perform.
505 * @return The number of ticks required for the access.
506 */
507 virtual Tick recvAtomic(PacketPtr pkt);
508
509 /**
510 * Snoop for the provided request in the cache and return the estimated
511 * time taken.
512 * @param pkt The memory request to snoop
513 * @return The number of ticks required for the snoop.
514 */
515 virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
516
517 /**
518 * Performs the access specified by the request.
519 *
520 * @param pkt The request to perform.
521 * @param fromCpuSide from the CPU side port or the memory side port
522 */
523 virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
524
525 /**
526 * Handle doing the Compare and Swap function for SPARC.
527 */
528 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
529
530 /**
531 * Return the next queue entry to service, either a pending miss
532 * from the MSHR queue, a buffered write from the write buffer, or
533 * something from the prefetcher. This function is responsible
534 * for prioritizing among those sources on the fly.
535 */
536 QueueEntry* getNextQueueEntry();
537
538 /**
539 * Insert writebacks into the write buffer
540 */
541 virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
542
543 /**
544 * Send writebacks down the memory hierarchy in atomic mode
545 */
546 virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
547
548 /**
549 * Create an appropriate downstream bus request packet.
550 *
551 * Creates a new packet with the request to be send to the memory
552 * below, or nullptr if the current request in cpu_pkt should just
553 * be forwarded on.
554 *
555 * @param cpu_pkt The miss packet that needs to be satisfied.
556 * @param blk The referenced block, can be nullptr.
557 * @param needs_writable Indicates that the block must be writable
558 * even if the request in cpu_pkt doesn't indicate that.
559 * @return A packet send to the memory below
560 */
561 virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
562 bool needs_writable) const = 0;
563
564 /**
565 * Determine if clean lines should be written back or not. In
566 * cases where a downstream cache is mostly inclusive we likely
567 * want it to act as a victim cache also for lines that have not
568 * been modified. Hence, we cannot simply drop the line (or send a
569 * clean evict), but rather need to send the actual data.
570 */
571 const bool writebackClean;
572
573 /**
574 * Writebacks from the tempBlock, resulting on the response path
575 * in atomic mode, must happen after the call to recvAtomic has
576 * finished (for the right ordering of the packets). We therefore
577 * need to hold on to the packets, and have a method and an event
578 * to send them.
579 */
580 PacketPtr tempBlockWriteback;
581
582 /**
583 * Send the outstanding tempBlock writeback. To be called after
584 * recvAtomic finishes in cases where the block we filled is in
585 * fact the tempBlock, and now needs to be written back.
586 */
587 void writebackTempBlockAtomic() {
588 assert(tempBlockWriteback != nullptr);
589 PacketList writebacks{tempBlockWriteback};
590 doWritebacksAtomic(writebacks);
591 tempBlockWriteback = nullptr;
592 }
593
594 /**
595 * An event to writeback the tempBlock after recvAtomic
596 * finishes. To avoid other calls to recvAtomic getting in
597 * between, we create this event with a higher priority.
598 */
599 EventFunctionWrapper writebackTempBlockAtomicEvent;
600
601 /**
602 * Perform any necessary updates to the block and perform any data
603 * exchange between the packet and the block. The flags of the
604 * packet are also set accordingly.
605 *
606 * @param pkt Request packet from upstream that hit a block
607 * @param blk Cache block that the packet hit
608 * @param deferred_response Whether this request originally missed
609 * @param pending_downgrade Whether the writable flag is to be removed
610 */
611 virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
612 bool deferred_response = false,
613 bool pending_downgrade = false);
614
615 /**
616 * Maintain the clusivity of this cache by potentially
617 * invalidating a block. This method works in conjunction with
618 * satisfyRequest, but is separate to allow us to handle all MSHR
619 * targets before potentially dropping a block.
620 *
621 * @param from_cache Whether we have dealt with a packet from a cache
622 * @param blk The block that should potentially be dropped
623 */
624 void maintainClusivity(bool from_cache, CacheBlk *blk);
625
626 /**
627 * Handle a fill operation caused by a received packet.
628 *
629 * Populates a cache block and handles all outstanding requests for the
630 * satisfied fill request. This version takes two memory requests. One
631 * contains the fill data, the other is an optional target to satisfy.
632 * Note that the reason we return a list of writebacks rather than
633 * inserting them directly in the write buffer is that this function
634 * is called by both atomic and timing-mode accesses, and in atomic
635 * mode we don't mess with the write buffer (we just perform the
636 * writebacks atomically once the original request is complete).
637 *
638 * @param pkt The memory request with the fill data.
639 * @param blk The cache block if it already exists.
640 * @param writebacks List for any writebacks that need to be performed.
641 * @param allocate Whether to allocate a block or use the temp block
642 * @return Pointer to the new cache block.
643 */
644 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
645 PacketList &writebacks, bool allocate);
646
647 /**
648 * Allocate a new block and perform any necessary writebacks
649 *
650 * Find a victim block and if necessary prepare writebacks for any
651 * existing data. May return nullptr if there are no replaceable
652 * blocks.
653 *
654 * @param addr Physical address of the new block
655 * @param is_secure Set if the block should be secure
656 * @param writebacks A list of writeback packets for the evicted blocks
657 * @return the allocated block
658 */
659 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
660 /**
661 * Evict a cache block.
662 *
663 * Performs a writeback if necesssary and invalidates the block
664 *
665 * @param blk Block to invalidate
666 * @return A packet with the writeback, can be nullptr
667 */
668 M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
669
670 /**
671 * Evict a cache block.
672 *
673 * Performs a writeback if necesssary and invalidates the block
674 *
675 * @param blk Block to invalidate
676 * @param writebacks Return a list of packets with writebacks
677 */
678 virtual void evictBlock(CacheBlk *blk, PacketList &writebacks) = 0;
679
680 /**
681 * Invalidate a cache block.
682 *
683 * @param blk Block to invalidate
684 */
685 void invalidateBlock(CacheBlk *blk);
686
687 /**
688 * Create a writeback request for the given block.
689 *
690 * @param blk The block to writeback.
691 * @return The writeback request for the block.
692 */
693 PacketPtr writebackBlk(CacheBlk *blk);
694
695 /**
696 * Create a writeclean request for the given block.
697 *
698 * Creates a request that writes the block to the cache below
699 * without evicting the block from the current cache.
700 *
701 * @param blk The block to write clean.
702 * @param dest The destination of the write clean operation.
703 * @param id Use the given packet id for the write clean operation.
704 * @return The generated write clean packet.
705 */
706 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
707
708 /**
229 * Write back dirty blocks in the cache using functional accesses.
230 */
709 * Write back dirty blocks in the cache using functional accesses.
710 */
231 virtual void memWriteback() override = 0;
711 virtual void memWriteback() override;
712
232 /**
233 * Invalidates all blocks in the cache.
234 *
235 * @warn Dirty cache lines will not be written back to
236 * memory. Make sure to call functionalWriteback() first if you
237 * want the to write them to memory.
238 */
713 /**
714 * Invalidates all blocks in the cache.
715 *
716 * @warn Dirty cache lines will not be written back to
717 * memory. Make sure to call functionalWriteback() first if you
718 * want the to write them to memory.
719 */
239 virtual void memInvalidate() override = 0;
720 virtual void memInvalidate() override;
721
240 /**
241 * Determine if there are any dirty blocks in the cache.
242 *
722 /**
723 * Determine if there are any dirty blocks in the cache.
724 *
243 * \return true if at least one block is dirty, false otherwise.
725 * @return true if at least one block is dirty, false otherwise.
244 */
726 */
245 virtual bool isDirty() const = 0;
727 bool isDirty() const;
246
247 /**
248 * Determine if an address is in the ranges covered by this
249 * cache. This is useful to filter snoops.
250 *
251 * @param addr Address to check against
252 *
253 * @return If the address in question is in range
254 */
255 bool inRange(Addr addr) const;
256
728
729 /**
730 * Determine if an address is in the ranges covered by this
731 * cache. This is useful to filter snoops.
732 *
733 * @param addr Address to check against
734 *
735 * @return If the address in question is in range
736 */
737 bool inRange(Addr addr) const;
738
739 /**
740 * Find next request ready time from among possible sources.
741 */
742 Tick nextQueueReadyTime() const;
743
257 /** Block size of this cache */
258 const unsigned blkSize;
259
260 /**
261 * The latency of tag lookup of a cache. It occurs when there is
262 * an access to the cache.
263 */
264 const Cycles lookupLatency;

--- 23 unchanged lines hidden (view full) ---

288
289 /** The number of targets for each MSHR. */
290 const int numTarget;
291
292 /** Do we forward snoops from mem side port through to cpu side port? */
293 bool forwardSnoops;
294
295 /**
744 /** Block size of this cache */
745 const unsigned blkSize;
746
747 /**
748 * The latency of tag lookup of a cache. It occurs when there is
749 * an access to the cache.
750 */
751 const Cycles lookupLatency;

--- 23 unchanged lines hidden (view full) ---

775
776 /** The number of targets for each MSHR. */
777 const int numTarget;
778
779 /** Do we forward snoops from mem side port through to cpu side port? */
780 bool forwardSnoops;
781
782 /**
783 * Clusivity with respect to the upstream cache, determining if we
784 * fill into both this cache and the cache above on a miss. Note
785 * that we currently do not support strict clusivity policies.
786 */
787 const Enums::Clusivity clusivity;
788
789 /**
296 * Is this cache read only, for example the instruction cache, or
297 * table-walker cache. A cache that is read only should never see
298 * any writes, and should never get any dirty data (and hence
299 * never have to do any writebacks).
300 */
301 const bool isReadOnly;
302
303 /**

--- 154 unchanged lines hidden (view full) ---

458
459 /**
460 * @}
461 */
462
463 /**
464 * Register stats for this object.
465 */
790 * Is this cache read only, for example the instruction cache, or
791 * table-walker cache. A cache that is read only should never see
792 * any writes, and should never get any dirty data (and hence
793 * never have to do any writebacks).
794 */
795 const bool isReadOnly;
796
797 /**

--- 154 unchanged lines hidden (view full) ---

952
953 /**
954 * @}
955 */
956
957 /**
958 * Register stats for this object.
959 */
466 virtual void regStats() override;
960 void regStats() override;
467
468 public:
469 BaseCache(const BaseCacheParams *p, unsigned blk_size);
961
962 public:
963 BaseCache(const BaseCacheParams *p, unsigned blk_size);
470 ~BaseCache() {}
964 ~BaseCache();
471
965
472 virtual void init() override;
966 void init() override;
473
967
474 virtual BaseMasterPort &getMasterPort(const std::string &if_name,
475 PortID idx = InvalidPortID) override;
476 virtual BaseSlavePort &getSlavePort(const std::string &if_name,
477 PortID idx = InvalidPortID) override;
968 BaseMasterPort &getMasterPort(const std::string &if_name,
969 PortID idx = InvalidPortID) override;
970 BaseSlavePort &getSlavePort(const std::string &if_name,
971 PortID idx = InvalidPortID) override;
478
479 /**
480 * Query block size of a cache.
481 * @return The block size
482 */
483 unsigned
484 getBlockSize() const
485 {

--- 57 unchanged lines hidden (view full) ---

543 * @param cause The reason for the cache blocking.
544 */
545 void setBlocked(BlockedCause cause)
546 {
547 uint8_t flag = 1 << cause;
548 if (blocked == 0) {
549 blocked_causes[cause]++;
550 blockedCycle = curCycle();
972
973 /**
974 * Query block size of a cache.
975 * @return The block size
976 */
977 unsigned
978 getBlockSize() const
979 {

--- 57 unchanged lines hidden (view full) ---

1037 * @param cause The reason for the cache blocking.
1038 */
1039 void setBlocked(BlockedCause cause)
1040 {
1041 uint8_t flag = 1 << cause;
1042 if (blocked == 0) {
1043 blocked_causes[cause]++;
1044 blockedCycle = curCycle();
551 cpuSidePort->setBlocked();
1045 cpuSidePort.setBlocked();
552 }
553 blocked |= flag;
554 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
555 }
556
557 /**
558 * Marks the cache as unblocked for the given cause. This also clears the
559 * blocked flags in the appropriate interfaces.
560 * @param cause The newly unblocked cause.
561 * @warning Calling this function can cause a blocked request on the bus to
562 * access the cache. The cache must be in a state to handle that request.
563 */
564 void clearBlocked(BlockedCause cause)
565 {
566 uint8_t flag = 1 << cause;
567 blocked &= ~flag;
568 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
569 if (blocked == 0) {
570 blocked_cycles[cause] += curCycle() - blockedCycle;
1046 }
1047 blocked |= flag;
1048 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1049 }
1050
1051 /**
1052 * Marks the cache as unblocked for the given cause. This also clears the
1053 * blocked flags in the appropriate interfaces.
1054 * @param cause The newly unblocked cause.
1055 * @warning Calling this function can cause a blocked request on the bus to
1056 * access the cache. The cache must be in a state to handle that request.
1057 */
1058 void clearBlocked(BlockedCause cause)
1059 {
1060 uint8_t flag = 1 << cause;
1061 blocked &= ~flag;
1062 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1063 if (blocked == 0) {
1064 blocked_cycles[cause] += curCycle() - blockedCycle;
571 cpuSidePort->clearBlocked();
1065 cpuSidePort.clearBlocked();
572 }
573 }
574
575 /**
576 * Schedule a send event for the memory-side port. If already
577 * scheduled, this may reschedule the event at an earlier
578 * time. When the specified time is reached, the port is free to
579 * send either a response, a request, or a prefetch request.
580 *
581 * @param time The time when to attempt sending a packet.
582 */
583 void schedMemSideSendEvent(Tick time)
584 {
1066 }
1067 }
1068
1069 /**
1070 * Schedule a send event for the memory-side port. If already
1071 * scheduled, this may reschedule the event at an earlier
1072 * time. When the specified time is reached, the port is free to
1073 * send either a response, a request, or a prefetch request.
1074 *
1075 * @param time The time when to attempt sending a packet.
1076 */
1077 void schedMemSideSendEvent(Tick time)
1078 {
585 memSidePort->schedSendEvent(time);
1079 memSidePort.schedSendEvent(time);
586 }
587
1080 }
1081
588 virtual bool inCache(Addr addr, bool is_secure) const = 0;
1082 bool inCache(Addr addr, bool is_secure) const {
1083 return tags->findBlock(addr, is_secure);
1084 }
589
1085
590 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0;
1086 bool inMissQueue(Addr addr, bool is_secure) const {
1087 return mshrQueue.findMatch(addr, is_secure);
1088 }
591
592 void incMissCount(PacketPtr pkt)
593 {
594 assert(pkt->req->masterId() < system->maxMasters());
595 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
596 pkt->req->incAccessDepth();
597 if (missCount) {
598 --missCount;
599 if (missCount == 0)
600 exitSimLoop("A cache reached the maximum miss count");
601 }
602 }
603 void incHitCount(PacketPtr pkt)
604 {
605 assert(pkt->req->masterId() < system->maxMasters());
606 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
607
608 }
609
1089
1090 void incMissCount(PacketPtr pkt)
1091 {
1092 assert(pkt->req->masterId() < system->maxMasters());
1093 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1094 pkt->req->incAccessDepth();
1095 if (missCount) {
1096 --missCount;
1097 if (missCount == 0)
1098 exitSimLoop("A cache reached the maximum miss count");
1099 }
1100 }
1101 void incHitCount(PacketPtr pkt)
1102 {
1103 assert(pkt->req->masterId() < system->maxMasters());
1104 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1105
1106 }
1107
1108 /**
1109 * Cache block visitor that writes back dirty cache blocks using
1110 * functional writes.
1111 *
1112 * @return Always returns true.
1113 */
1114 bool writebackVisitor(CacheBlk &blk);
1115
1116 /**
1117 * Cache block visitor that invalidates all blocks in the cache.
1118 *
1119 * @warn Dirty cache lines will not be written back to memory.
1120 *
1121 * @return Always returns true.
1122 */
1123 bool invalidateVisitor(CacheBlk &blk);
1124
1125 /**
1126 * Take an MSHR, turn it into a suitable downstream packet, and
1127 * send it out. This construct allows a queue entry to choose a suitable
1128 * approach based on its type.
1129 *
1130 * @param mshr The MSHR to turn into a packet and send
1131 * @return True if the port is waiting for a retry
1132 */
1133 virtual bool sendMSHRQueuePacket(MSHR* mshr);
1134
1135 /**
1136 * Similar to sendMSHR, but for a write-queue entry
1137 * instead. Create the packet, and send it, and if successful also
1138 * mark the entry in service.
1139 *
1140 * @param wq_entry The write-queue entry to turn into a packet and send
1141 * @return True if the port is waiting for a retry
1142 */
1143 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1144
1145 /**
1146 * Serialize the state of the caches
1147 *
1148 * We currently don't support checkpointing cache state, so this panics.
1149 */
1150 void serialize(CheckpointOut &cp) const override;
1151 void unserialize(CheckpointIn &cp) override;
1152
610};
611
1153};
1154
1155/**
1156 * Wrap a method and present it as a cache block visitor.
1157 *
1158 * For example the forEachBlk method in the tag arrays expects a
1159 * callable object/function as their parameter. This class wraps a
1160 * method in an object and presents callable object that adheres to
1161 * the cache block visitor protocol.
1162 */
1163class CacheBlkVisitorWrapper : public CacheBlkVisitor
1164{
1165 public:
1166 typedef bool (BaseCache::*VisitorPtr)(CacheBlk &blk);
1167
1168 CacheBlkVisitorWrapper(BaseCache &_cache, VisitorPtr _visitor)
1169 : cache(_cache), visitor(_visitor) {}
1170
1171 bool operator()(CacheBlk &blk) override {
1172 return (cache.*visitor)(blk);
1173 }
1174
1175 private:
1176 BaseCache &cache;
1177 VisitorPtr visitor;
1178};
1179
1180/**
1181 * Cache block visitor that determines if there are dirty blocks in a
1182 * cache.
1183 *
1184 * Use with the forEachBlk method in the tag array to determine if the
1185 * array contains dirty blocks.
1186 */
1187class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
1188{
1189 public:
1190 CacheBlkIsDirtyVisitor()
1191 : _isDirty(false) {}
1192
1193 bool operator()(CacheBlk &blk) override {
1194 if (blk.isDirty()) {
1195 _isDirty = true;
1196 return false;
1197 } else {
1198 return true;
1199 }
1200 }
1201
1202 /**
1203 * Does the array contain a dirty line?
1204 *
1205 * @return true if yes, false otherwise.
1206 */
1207 bool isDirty() const { return _isDirty; };
1208
1209 private:
1210 bool _isDirty;
1211};
1212
612#endif //__MEM_CACHE_BASE_HH__
1213#endif //__MEM_CACHE_BASE_HH__