base.hh revision 13624:3d8220c2d41d
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Steve Reinhardt
42 *          Ron Dreslinski
43 *          Andreas Hansson
44 *          Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/mshr_queue.hh"
68#include "mem/cache/tags/base.hh"
69#include "mem/cache/write_queue.hh"
70#include "mem/cache/write_queue_entry.hh"
71#include "mem/mem_object.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/eventq.hh"
78#include "sim/probe/probe.hh"
79#include "sim/serialize.hh"
80#include "sim/sim_exit.hh"
81#include "sim/system.hh"
82
83class BaseMasterPort;
84class BasePrefetcher;
85class BaseSlavePort;
86class MSHR;
87class MasterPort;
88class QueueEntry;
89struct BaseCacheParams;
90
91/**
92 * A basic cache interface. Implements some common functions for speed.
93 */
94class BaseCache : public MemObject
95{
96  protected:
97    /**
98     * Indexes to enumerate the MSHR queues.
99     */
100    enum MSHRQueueIndex {
101        MSHRQueue_MSHRs,
102        MSHRQueue_WriteBuffer
103    };
104
105  public:
106    /**
107     * Reasons for caches to be blocked.
108     */
109    enum BlockedCause {
110        Blocked_NoMSHRs = MSHRQueue_MSHRs,
111        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112        Blocked_NoTargets,
113        NUM_BLOCKED_CAUSES
114    };
115
116  protected:
117
118    /**
119     * A cache master port is used for the memory-side port of the
120     * cache, and in addition to the basic timing port that only sends
121     * response packets through a transmit list, it also offers the
122     * ability to schedule and send request packets (requests &
123     * writebacks). The send event is scheduled through schedSendEvent,
124     * and the sendDeferredPacket of the timing port is modified to
125     * consider both the transmit list and the requests from the MSHR.
126     */
127    class CacheMasterPort : public QueuedMasterPort
128    {
129
130      public:
131
132        /**
133         * Schedule a send of a request packet (from the MSHR). Note
134         * that we could already have a retry outstanding.
135         */
136        void schedSendEvent(Tick time)
137        {
138            DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139            reqQueue.schedSendEvent(time);
140        }
141
142      protected:
143
144        CacheMasterPort(const std::string &_name, BaseCache *_cache,
145                        ReqPacketQueue &_reqQueue,
146                        SnoopRespPacketQueue &_snoopRespQueue) :
147            QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148        { }
149
150        /**
151         * Memory-side port always snoops.
152         *
153         * @return always true
154         */
155        virtual bool isSnooping() const { return true; }
156    };
157
158    /**
159     * Override the default behaviour of sendDeferredPacket to enable
160     * the memory-side cache port to also send requests based on the
161     * current MSHR status. This queue has a pointer to our specific
162     * cache implementation and is used by the MemSidePort.
163     */
164    class CacheReqPacketQueue : public ReqPacketQueue
165    {
166
167      protected:
168
169        BaseCache &cache;
170        SnoopRespPacketQueue &snoopRespQueue;
171
172      public:
173
174        CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175                            SnoopRespPacketQueue &snoop_resp_queue,
176                            const std::string &label) :
177            ReqPacketQueue(cache, port, label), cache(cache),
178            snoopRespQueue(snoop_resp_queue) { }
179
180        /**
181         * Override the normal sendDeferredPacket and do not only
182         * consider the transmit list (used for responses), but also
183         * requests.
184         */
185        virtual void sendDeferredPacket();
186
187        /**
188         * Check if there is a conflicting snoop response about to be
189         * send out, and if so simply stall any requests, and schedule
190         * a send event at the same time as the next snoop response is
191         * being sent out.
192         */
193        bool checkConflictingSnoop(Addr addr)
194        {
195            if (snoopRespQueue.hasAddr(addr)) {
196                DPRINTF(CachePort, "Waiting for snoop response to be "
197                        "sent\n");
198                Tick when = snoopRespQueue.deferredPacketReadyTime();
199                schedSendEvent(when);
200                return true;
201            }
202            return false;
203        }
204    };
205
206
207    /**
208     * The memory-side port extends the base cache master port with
209     * access functions for functional, atomic and timing snoops.
210     */
211    class MemSidePort : public CacheMasterPort
212    {
213      private:
214
215        /** The cache-specific queue. */
216        CacheReqPacketQueue _reqQueue;
217
218        SnoopRespPacketQueue _snoopRespQueue;
219
220        // a pointer to our specific cache implementation
221        BaseCache *cache;
222
223      protected:
224
225        virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227        virtual bool recvTimingResp(PacketPtr pkt);
228
229        virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231        virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233      public:
234
235        MemSidePort(const std::string &_name, BaseCache *_cache,
236                    const std::string &_label);
237    };
238
239    /**
240     * A cache slave port is used for the CPU-side port of the cache,
241     * and it is basically a simple timing port that uses a transmit
242     * list for responses to the CPU (or connected master). In
243     * addition, it has the functionality to block the port for
244     * incoming requests. If blocked, the port will issue a retry once
245     * unblocked.
246     */
247    class CacheSlavePort : public QueuedSlavePort
248    {
249
250      public:
251
252        /** Do not accept any new requests. */
253        void setBlocked();
254
255        /** Return to normal operation and accept new requests. */
256        void clearBlocked();
257
258        bool isBlocked() const { return blocked; }
259
260      protected:
261
262        CacheSlavePort(const std::string &_name, BaseCache *_cache,
263                       const std::string &_label);
264
265        /** A normal packet queue used to store responses. */
266        RespPacketQueue queue;
267
268        bool blocked;
269
270        bool mustSendRetry;
271
272      private:
273
274        void processSendRetry();
275
276        EventFunctionWrapper sendRetryEvent;
277
278    };
279
280    /**
281     * The CPU-side port extends the base cache slave port with access
282     * functions for functional, atomic and timing requests.
283     */
284    class CpuSidePort : public CacheSlavePort
285    {
286      private:
287
288        // a pointer to our specific cache implementation
289        BaseCache *cache;
290
291      protected:
292        virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294        virtual bool tryTiming(PacketPtr pkt) override;
295
296        virtual bool recvTimingReq(PacketPtr pkt) override;
297
298        virtual Tick recvAtomic(PacketPtr pkt) override;
299
300        virtual void recvFunctional(PacketPtr pkt) override;
301
302        virtual AddrRangeList getAddrRanges() const override;
303
304      public:
305
306        CpuSidePort(const std::string &_name, BaseCache *_cache,
307                    const std::string &_label);
308
309    };
310
311    CpuSidePort cpuSidePort;
312    MemSidePort memSidePort;
313
314  protected:
315
316    /** Miss status registers */
317    MSHRQueue mshrQueue;
318
319    /** Write/writeback buffer */
320    WriteQueue writeBuffer;
321
322    /** Tag and data Storage */
323    BaseTags *tags;
324
325    /** Prefetcher */
326    BasePrefetcher *prefetcher;
327
328    /** To probe when a cache hit occurs */
329    ProbePointArg<PacketPtr> *ppHit;
330
331    /** To probe when a cache miss occurs */
332    ProbePointArg<PacketPtr> *ppMiss;
333
334    /**
335     * The writeAllocator drive optimizations for streaming writes.
336     * It first determines whether a WriteReq MSHR should be delayed,
337     * thus ensuring that we wait longer in cases when we are write
338     * coalescing and allowing all the bytes of the line to be written
339     * before the MSHR packet is sent downstream. This works in unison
340     * with the tracking in the MSHR to check if the entire line is
341     * written. The write mode also affects the behaviour on filling
342     * any whole-line writes. Normally the cache allocates the line
343     * when receiving the InvalidateResp, but after seeing enough
344     * consecutive lines we switch to using the tempBlock, and thus
345     * end up not allocating the line, and instead turning the
346     * whole-line write into a writeback straight away.
347     */
348    WriteAllocator * const writeAllocator;
349
350    /**
351     * Temporary cache block for occasional transitory use.  We use
352     * the tempBlock to fill when allocation fails (e.g., when there
353     * is an outstanding request that accesses the victim block) or
354     * when we want to avoid allocation (e.g., exclusive caches)
355     */
356    TempCacheBlk *tempBlock;
357
358    /**
359     * Upstream caches need this packet until true is returned, so
360     * hold it for deletion until a subsequent call
361     */
362    std::unique_ptr<Packet> pendingDelete;
363
364    /**
365     * Mark a request as in service (sent downstream in the memory
366     * system), effectively making this MSHR the ordering point.
367     */
368    void markInService(MSHR *mshr, bool pending_modified_resp)
369    {
370        bool wasFull = mshrQueue.isFull();
371        mshrQueue.markInService(mshr, pending_modified_resp);
372
373        if (wasFull && !mshrQueue.isFull()) {
374            clearBlocked(Blocked_NoMSHRs);
375        }
376    }
377
378    void markInService(WriteQueueEntry *entry)
379    {
380        bool wasFull = writeBuffer.isFull();
381        writeBuffer.markInService(entry);
382
383        if (wasFull && !writeBuffer.isFull()) {
384            clearBlocked(Blocked_NoWBBuffers);
385        }
386    }
387
388    /**
389     * Determine whether we should allocate on a fill or not. If this
390     * cache is mostly inclusive with regards to the upstream cache(s)
391     * we always allocate (for any non-forwarded and cacheable
392     * requests). In the case of a mostly exclusive cache, we allocate
393     * on fill if the packet did not come from a cache, thus if we:
394     * are dealing with a whole-line write (the latter behaves much
395     * like a writeback), the original target packet came from a
396     * non-caching source, or if we are performing a prefetch or LLSC.
397     *
398     * @param cmd Command of the incoming requesting packet
399     * @return Whether we should allocate on the fill
400     */
401    inline bool allocOnFill(MemCmd cmd) const
402    {
403        return clusivity == Enums::mostly_incl ||
404            cmd == MemCmd::WriteLineReq ||
405            cmd == MemCmd::ReadReq ||
406            cmd == MemCmd::WriteReq ||
407            cmd.isPrefetch() ||
408            cmd.isLLSC();
409    }
410
411    /**
412     * Regenerate block address using tags.
413     * Block address regeneration depends on whether we're using a temporary
414     * block or not.
415     *
416     * @param blk The block to regenerate address.
417     * @return The block's address.
418     */
419    Addr regenerateBlkAddr(CacheBlk* blk);
420
421    /**
422     * Calculate access latency in ticks given a tag lookup latency, and
423     * whether access was a hit or miss.
424     *
425     * @param blk The cache block that was accessed.
426     * @param lookup_lat Latency of the respective tag lookup.
427     * @return The number of ticks that pass due to a block access.
428     */
429    Cycles calculateAccessLatency(const CacheBlk* blk,
430                                  const Cycles lookup_lat) const;
431
432    /**
433     * Does all the processing necessary to perform the provided request.
434     * @param pkt The memory request to perform.
435     * @param blk The cache block to be updated.
436     * @param lat The latency of the access.
437     * @param writebacks List for any writebacks that need to be performed.
438     * @return Boolean indicating whether the request was satisfied.
439     */
440    virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
441                        PacketList &writebacks);
442
443    /*
444     * Handle a timing request that hit in the cache
445     *
446     * @param ptk The request packet
447     * @param blk The referenced block
448     * @param request_time The tick at which the block lookup is compete
449     */
450    virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
451                                    Tick request_time);
452
453    /*
454     * Handle a timing request that missed in the cache
455     *
456     * Implementation specific handling for different cache
457     * implementations
458     *
459     * @param ptk The request packet
460     * @param blk The referenced block
461     * @param forward_time The tick at which we can process dependent requests
462     * @param request_time The tick at which the block lookup is compete
463     */
464    virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
465                                     Tick forward_time,
466                                     Tick request_time) = 0;
467
468    /*
469     * Handle a timing request that missed in the cache
470     *
471     * Common functionality across different cache implementations
472     *
473     * @param ptk The request packet
474     * @param blk The referenced block
475     * @param mshr Any existing mshr for the referenced cache block
476     * @param forward_time The tick at which we can process dependent requests
477     * @param request_time The tick at which the block lookup is compete
478     */
479    void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
480                             Tick forward_time, Tick request_time);
481
482    /**
483     * Performs the access specified by the request.
484     * @param pkt The request to perform.
485     */
486    virtual void recvTimingReq(PacketPtr pkt);
487
488    /**
489     * Handling the special case of uncacheable write responses to
490     * make recvTimingResp less cluttered.
491     */
492    void handleUncacheableWriteResp(PacketPtr pkt);
493
494    /**
495     * Service non-deferred MSHR targets using the received response
496     *
497     * Iterates through the list of targets that can be serviced with
498     * the current response.
499     *
500     * @param mshr The MSHR that corresponds to the reponse
501     * @param pkt The response packet
502     * @param blk The reference block
503     */
504    virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
505                                    CacheBlk *blk) = 0;
506
507    /**
508     * Handles a response (cache line fill/write ack) from the bus.
509     * @param pkt The response packet
510     */
511    virtual void recvTimingResp(PacketPtr pkt);
512
513    /**
514     * Snoops bus transactions to maintain coherence.
515     * @param pkt The current bus transaction.
516     */
517    virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
518
519    /**
520     * Handle a snoop response.
521     * @param pkt Snoop response packet
522     */
523    virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
524
525    /**
526     * Handle a request in atomic mode that missed in this cache
527     *
528     * Creates a downstream request, sends it to the memory below and
529     * handles the response. As we are in atomic mode all operations
530     * are performed immediately.
531     *
532     * @param pkt The packet with the requests
533     * @param blk The referenced block
534     * @param writebacks A list with packets for any performed writebacks
535     * @return Cycles for handling the request
536     */
537    virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
538                                       PacketList &writebacks) = 0;
539
540    /**
541     * Performs the access specified by the request.
542     * @param pkt The request to perform.
543     * @return The number of ticks required for the access.
544     */
545    virtual Tick recvAtomic(PacketPtr pkt);
546
547    /**
548     * Snoop for the provided request in the cache and return the estimated
549     * time taken.
550     * @param pkt The memory request to snoop
551     * @return The number of ticks required for the snoop.
552     */
553    virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
554
555    /**
556     * Performs the access specified by the request.
557     *
558     * @param pkt The request to perform.
559     * @param fromCpuSide from the CPU side port or the memory side port
560     */
561    virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
562
563    /**
564     * Handle doing the Compare and Swap function for SPARC.
565     */
566    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
567
568    /**
569     * Return the next queue entry to service, either a pending miss
570     * from the MSHR queue, a buffered write from the write buffer, or
571     * something from the prefetcher. This function is responsible
572     * for prioritizing among those sources on the fly.
573     */
574    QueueEntry* getNextQueueEntry();
575
576    /**
577     * Insert writebacks into the write buffer
578     */
579    virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
580
581    /**
582     * Send writebacks down the memory hierarchy in atomic mode
583     */
584    virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
585
586    /**
587     * Create an appropriate downstream bus request packet.
588     *
589     * Creates a new packet with the request to be send to the memory
590     * below, or nullptr if the current request in cpu_pkt should just
591     * be forwarded on.
592     *
593     * @param cpu_pkt The miss packet that needs to be satisfied.
594     * @param blk The referenced block, can be nullptr.
595     * @param needs_writable Indicates that the block must be writable
596     * even if the request in cpu_pkt doesn't indicate that.
597     * @param is_whole_line_write True if there are writes for the
598     * whole line
599     * @return A packet send to the memory below
600     */
601    virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
602                                       bool needs_writable,
603                                       bool is_whole_line_write) const = 0;
604
605    /**
606     * Determine if clean lines should be written back or not. In
607     * cases where a downstream cache is mostly inclusive we likely
608     * want it to act as a victim cache also for lines that have not
609     * been modified. Hence, we cannot simply drop the line (or send a
610     * clean evict), but rather need to send the actual data.
611     */
612    const bool writebackClean;
613
614    /**
615     * Writebacks from the tempBlock, resulting on the response path
616     * in atomic mode, must happen after the call to recvAtomic has
617     * finished (for the right ordering of the packets). We therefore
618     * need to hold on to the packets, and have a method and an event
619     * to send them.
620     */
621    PacketPtr tempBlockWriteback;
622
623    /**
624     * Send the outstanding tempBlock writeback. To be called after
625     * recvAtomic finishes in cases where the block we filled is in
626     * fact the tempBlock, and now needs to be written back.
627     */
628    void writebackTempBlockAtomic() {
629        assert(tempBlockWriteback != nullptr);
630        PacketList writebacks{tempBlockWriteback};
631        doWritebacksAtomic(writebacks);
632        tempBlockWriteback = nullptr;
633    }
634
635    /**
636     * An event to writeback the tempBlock after recvAtomic
637     * finishes. To avoid other calls to recvAtomic getting in
638     * between, we create this event with a higher priority.
639     */
640    EventFunctionWrapper writebackTempBlockAtomicEvent;
641
642    /**
643     * Perform any necessary updates to the block and perform any data
644     * exchange between the packet and the block. The flags of the
645     * packet are also set accordingly.
646     *
647     * @param pkt Request packet from upstream that hit a block
648     * @param blk Cache block that the packet hit
649     * @param deferred_response Whether this request originally missed
650     * @param pending_downgrade Whether the writable flag is to be removed
651     */
652    virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
653                                bool deferred_response = false,
654                                bool pending_downgrade = false);
655
656    /**
657     * Maintain the clusivity of this cache by potentially
658     * invalidating a block. This method works in conjunction with
659     * satisfyRequest, but is separate to allow us to handle all MSHR
660     * targets before potentially dropping a block.
661     *
662     * @param from_cache Whether we have dealt with a packet from a cache
663     * @param blk The block that should potentially be dropped
664     */
665    void maintainClusivity(bool from_cache, CacheBlk *blk);
666
667    /**
668     * Handle a fill operation caused by a received packet.
669     *
670     * Populates a cache block and handles all outstanding requests for the
671     * satisfied fill request. This version takes two memory requests. One
672     * contains the fill data, the other is an optional target to satisfy.
673     * Note that the reason we return a list of writebacks rather than
674     * inserting them directly in the write buffer is that this function
675     * is called by both atomic and timing-mode accesses, and in atomic
676     * mode we don't mess with the write buffer (we just perform the
677     * writebacks atomically once the original request is complete).
678     *
679     * @param pkt The memory request with the fill data.
680     * @param blk The cache block if it already exists.
681     * @param writebacks List for any writebacks that need to be performed.
682     * @param allocate Whether to allocate a block or use the temp block
683     * @return Pointer to the new cache block.
684     */
685    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
686                         PacketList &writebacks, bool allocate);
687
688    /**
689     * Allocate a new block and perform any necessary writebacks
690     *
691     * Find a victim block and if necessary prepare writebacks for any
692     * existing data. May return nullptr if there are no replaceable
693     * blocks. If a replaceable block is found, it inserts the new block in
694     * its place. The new block, however, is not set as valid yet.
695     *
696     * @param pkt Packet holding the address to update
697     * @param writebacks A list of writeback packets for the evicted blocks
698     * @return the allocated block
699     */
700    CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
701    /**
702     * Evict a cache block.
703     *
704     * Performs a writeback if necesssary and invalidates the block
705     *
706     * @param blk Block to invalidate
707     * @return A packet with the writeback, can be nullptr
708     */
709    M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
710
711    /**
712     * Evict a cache block.
713     *
714     * Performs a writeback if necesssary and invalidates the block
715     *
716     * @param blk Block to invalidate
717     * @param writebacks Return a list of packets with writebacks
718     */
719    void evictBlock(CacheBlk *blk, PacketList &writebacks);
720
721    /**
722     * Invalidate a cache block.
723     *
724     * @param blk Block to invalidate
725     */
726    void invalidateBlock(CacheBlk *blk);
727
728    /**
729     * Create a writeback request for the given block.
730     *
731     * @param blk The block to writeback.
732     * @return The writeback request for the block.
733     */
734    PacketPtr writebackBlk(CacheBlk *blk);
735
736    /**
737     * Create a writeclean request for the given block.
738     *
739     * Creates a request that writes the block to the cache below
740     * without evicting the block from the current cache.
741     *
742     * @param blk The block to write clean.
743     * @param dest The destination of the write clean operation.
744     * @param id Use the given packet id for the write clean operation.
745     * @return The generated write clean packet.
746     */
747    PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
748
749    /**
750     * Write back dirty blocks in the cache using functional accesses.
751     */
752    virtual void memWriteback() override;
753
754    /**
755     * Invalidates all blocks in the cache.
756     *
757     * @warn Dirty cache lines will not be written back to
758     * memory. Make sure to call functionalWriteback() first if you
759     * want the to write them to memory.
760     */
761    virtual void memInvalidate() override;
762
763    /**
764     * Determine if there are any dirty blocks in the cache.
765     *
766     * @return true if at least one block is dirty, false otherwise.
767     */
768    bool isDirty() const;
769
770    /**
771     * Determine if an address is in the ranges covered by this
772     * cache. This is useful to filter snoops.
773     *
774     * @param addr Address to check against
775     *
776     * @return If the address in question is in range
777     */
778    bool inRange(Addr addr) const;
779
780    /**
781     * Find next request ready time from among possible sources.
782     */
783    Tick nextQueueReadyTime() const;
784
785    /** Block size of this cache */
786    const unsigned blkSize;
787
788    /**
789     * The latency of tag lookup of a cache. It occurs when there is
790     * an access to the cache.
791     */
792    const Cycles lookupLatency;
793
794    /**
795     * The latency of data access of a cache. It occurs when there is
796     * an access to the cache.
797     */
798    const Cycles dataLatency;
799
800    /**
801     * This is the forward latency of the cache. It occurs when there
802     * is a cache miss and a request is forwarded downstream, in
803     * particular an outbound miss.
804     */
805    const Cycles forwardLatency;
806
807    /** The latency to fill a cache block */
808    const Cycles fillLatency;
809
810    /**
811     * The latency of sending reponse to its upper level cache/core on
812     * a linefill. The responseLatency parameter captures this
813     * latency.
814     */
815    const Cycles responseLatency;
816
817    /**
818     * Whether tags and data are accessed sequentially.
819     */
820    const bool sequentialAccess;
821
822    /** The number of targets for each MSHR. */
823    const int numTarget;
824
825    /** Do we forward snoops from mem side port through to cpu side port? */
826    bool forwardSnoops;
827
828    /**
829     * Clusivity with respect to the upstream cache, determining if we
830     * fill into both this cache and the cache above on a miss. Note
831     * that we currently do not support strict clusivity policies.
832     */
833    const Enums::Clusivity clusivity;
834
835    /**
836     * Is this cache read only, for example the instruction cache, or
837     * table-walker cache. A cache that is read only should never see
838     * any writes, and should never get any dirty data (and hence
839     * never have to do any writebacks).
840     */
841    const bool isReadOnly;
842
843    /**
844     * Bit vector of the blocking reasons for the access path.
845     * @sa #BlockedCause
846     */
847    uint8_t blocked;
848
849    /** Increasing order number assigned to each incoming request. */
850    uint64_t order;
851
852    /** Stores time the cache blocked for statistics. */
853    Cycles blockedCycle;
854
855    /** Pointer to the MSHR that has no targets. */
856    MSHR *noTargetMSHR;
857
858    /** The number of misses to trigger an exit event. */
859    Counter missCount;
860
861    /**
862     * The address range to which the cache responds on the CPU side.
863     * Normally this is all possible memory addresses. */
864    const AddrRangeList addrRanges;
865
866  public:
867    /** System we are currently operating in. */
868    System *system;
869
870    // Statistics
871    /**
872     * @addtogroup CacheStatistics
873     * @{
874     */
875
876    /** Number of hits per thread for each type of command.
877        @sa Packet::Command */
878    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
879    /** Number of hits for demand accesses. */
880    Stats::Formula demandHits;
881    /** Number of hit for all accesses. */
882    Stats::Formula overallHits;
883
884    /** Number of misses per thread for each type of command.
885        @sa Packet::Command */
886    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
887    /** Number of misses for demand accesses. */
888    Stats::Formula demandMisses;
889    /** Number of misses for all accesses. */
890    Stats::Formula overallMisses;
891
892    /**
893     * Total number of cycles per thread/command spent waiting for a miss.
894     * Used to calculate the average miss latency.
895     */
896    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
897    /** Total number of cycles spent waiting for demand misses. */
898    Stats::Formula demandMissLatency;
899    /** Total number of cycles spent waiting for all misses. */
900    Stats::Formula overallMissLatency;
901
902    /** The number of accesses per command and thread. */
903    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
904    /** The number of demand accesses. */
905    Stats::Formula demandAccesses;
906    /** The number of overall accesses. */
907    Stats::Formula overallAccesses;
908
909    /** The miss rate per command and thread. */
910    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
911    /** The miss rate of all demand accesses. */
912    Stats::Formula demandMissRate;
913    /** The miss rate for all accesses. */
914    Stats::Formula overallMissRate;
915
916    /** The average miss latency per command and thread. */
917    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
918    /** The average miss latency for demand misses. */
919    Stats::Formula demandAvgMissLatency;
920    /** The average miss latency for all misses. */
921    Stats::Formula overallAvgMissLatency;
922
923    /** The total number of cycles blocked for each blocked cause. */
924    Stats::Vector blocked_cycles;
925    /** The number of times this cache blocked for each blocked cause. */
926    Stats::Vector blocked_causes;
927
928    /** The average number of cycles blocked for each blocked cause. */
929    Stats::Formula avg_blocked;
930
931    /** The number of times a HW-prefetched block is evicted w/o reference. */
932    Stats::Scalar unusedPrefetches;
933
934    /** Number of blocks written back per thread. */
935    Stats::Vector writebacks;
936
937    /** Number of misses that hit in the MSHRs per command and thread. */
938    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
939    /** Demand misses that hit in the MSHRs. */
940    Stats::Formula demandMshrHits;
941    /** Total number of misses that hit in the MSHRs. */
942    Stats::Formula overallMshrHits;
943
944    /** Number of misses that miss in the MSHRs, per command and thread. */
945    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
946    /** Demand misses that miss in the MSHRs. */
947    Stats::Formula demandMshrMisses;
948    /** Total number of misses that miss in the MSHRs. */
949    Stats::Formula overallMshrMisses;
950
951    /** Number of misses that miss in the MSHRs, per command and thread. */
952    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
953    /** Total number of misses that miss in the MSHRs. */
954    Stats::Formula overallMshrUncacheable;
955
956    /** Total cycle latency of each MSHR miss, per command and thread. */
957    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
958    /** Total cycle latency of demand MSHR misses. */
959    Stats::Formula demandMshrMissLatency;
960    /** Total cycle latency of overall MSHR misses. */
961    Stats::Formula overallMshrMissLatency;
962
963    /** Total cycle latency of each MSHR miss, per command and thread. */
964    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
965    /** Total cycle latency of overall MSHR misses. */
966    Stats::Formula overallMshrUncacheableLatency;
967
968#if 0
969    /** The total number of MSHR accesses per command and thread. */
970    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
971    /** The total number of demand MSHR accesses. */
972    Stats::Formula demandMshrAccesses;
973    /** The total number of MSHR accesses. */
974    Stats::Formula overallMshrAccesses;
975#endif
976
977    /** The miss rate in the MSHRs pre command and thread. */
978    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
979    /** The demand miss rate in the MSHRs. */
980    Stats::Formula demandMshrMissRate;
981    /** The overall miss rate in the MSHRs. */
982    Stats::Formula overallMshrMissRate;
983
984    /** The average latency of an MSHR miss, per command and thread. */
985    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
986    /** The average latency of a demand MSHR miss. */
987    Stats::Formula demandAvgMshrMissLatency;
988    /** The average overall latency of an MSHR miss. */
989    Stats::Formula overallAvgMshrMissLatency;
990
991    /** The average latency of an MSHR miss, per command and thread. */
992    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
993    /** The average overall latency of an MSHR miss. */
994    Stats::Formula overallAvgMshrUncacheableLatency;
995
996    /** Number of replacements of valid blocks. */
997    Stats::Scalar replacements;
998
999    /**
1000     * @}
1001     */
1002
1003    /**
1004     * Register stats for this object.
1005     */
1006    void regStats() override;
1007
1008    /** Registers probes. */
1009    void regProbePoints() override;
1010
1011  public:
1012    BaseCache(const BaseCacheParams *p, unsigned blk_size);
1013    ~BaseCache();
1014
1015    void init() override;
1016
1017    BaseMasterPort &getMasterPort(const std::string &if_name,
1018                                  PortID idx = InvalidPortID) override;
1019    BaseSlavePort &getSlavePort(const std::string &if_name,
1020                                PortID idx = InvalidPortID) override;
1021
1022    /**
1023     * Query block size of a cache.
1024     * @return  The block size
1025     */
1026    unsigned
1027    getBlockSize() const
1028    {
1029        return blkSize;
1030    }
1031
1032    const AddrRangeList &getAddrRanges() const { return addrRanges; }
1033
1034    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1035    {
1036        MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1037                                        pkt, time, order++,
1038                                        allocOnFill(pkt->cmd));
1039
1040        if (mshrQueue.isFull()) {
1041            setBlocked((BlockedCause)MSHRQueue_MSHRs);
1042        }
1043
1044        if (sched_send) {
1045            // schedule the send
1046            schedMemSideSendEvent(time);
1047        }
1048
1049        return mshr;
1050    }
1051
1052    void allocateWriteBuffer(PacketPtr pkt, Tick time)
1053    {
1054        // should only see writes or clean evicts here
1055        assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1056
1057        Addr blk_addr = pkt->getBlockAddr(blkSize);
1058
1059        WriteQueueEntry *wq_entry =
1060            writeBuffer.findMatch(blk_addr, pkt->isSecure());
1061        if (wq_entry && !wq_entry->inService) {
1062            DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1063        }
1064
1065        writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1066
1067        if (writeBuffer.isFull()) {
1068            setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1069        }
1070
1071        // schedule the send
1072        schedMemSideSendEvent(time);
1073    }
1074
1075    /**
1076     * Returns true if the cache is blocked for accesses.
1077     */
1078    bool isBlocked() const
1079    {
1080        return blocked != 0;
1081    }
1082
1083    /**
1084     * Marks the access path of the cache as blocked for the given cause. This
1085     * also sets the blocked flag in the slave interface.
1086     * @param cause The reason for the cache blocking.
1087     */
1088    void setBlocked(BlockedCause cause)
1089    {
1090        uint8_t flag = 1 << cause;
1091        if (blocked == 0) {
1092            blocked_causes[cause]++;
1093            blockedCycle = curCycle();
1094            cpuSidePort.setBlocked();
1095        }
1096        blocked |= flag;
1097        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1098    }
1099
1100    /**
1101     * Marks the cache as unblocked for the given cause. This also clears the
1102     * blocked flags in the appropriate interfaces.
1103     * @param cause The newly unblocked cause.
1104     * @warning Calling this function can cause a blocked request on the bus to
1105     * access the cache. The cache must be in a state to handle that request.
1106     */
1107    void clearBlocked(BlockedCause cause)
1108    {
1109        uint8_t flag = 1 << cause;
1110        blocked &= ~flag;
1111        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1112        if (blocked == 0) {
1113            blocked_cycles[cause] += curCycle() - blockedCycle;
1114            cpuSidePort.clearBlocked();
1115        }
1116    }
1117
1118    /**
1119     * Schedule a send event for the memory-side port. If already
1120     * scheduled, this may reschedule the event at an earlier
1121     * time. When the specified time is reached, the port is free to
1122     * send either a response, a request, or a prefetch request.
1123     *
1124     * @param time The time when to attempt sending a packet.
1125     */
1126    void schedMemSideSendEvent(Tick time)
1127    {
1128        memSidePort.schedSendEvent(time);
1129    }
1130
1131    bool inCache(Addr addr, bool is_secure) const {
1132        return tags->findBlock(addr, is_secure);
1133    }
1134
1135    bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1136        CacheBlk *block = tags->findBlock(addr, is_secure);
1137        if (block) {
1138            return block->wasPrefetched();
1139        } else {
1140            return false;
1141        }
1142    }
1143
1144    bool inMissQueue(Addr addr, bool is_secure) const {
1145        return mshrQueue.findMatch(addr, is_secure);
1146    }
1147
1148    void incMissCount(PacketPtr pkt)
1149    {
1150        assert(pkt->req->masterId() < system->maxMasters());
1151        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1152        pkt->req->incAccessDepth();
1153        if (missCount) {
1154            --missCount;
1155            if (missCount == 0)
1156                exitSimLoop("A cache reached the maximum miss count");
1157        }
1158    }
1159    void incHitCount(PacketPtr pkt)
1160    {
1161        assert(pkt->req->masterId() < system->maxMasters());
1162        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1163
1164    }
1165
1166    /**
1167     * Checks if the cache is coalescing writes
1168     *
1169     * @return True if the cache is coalescing writes
1170     */
1171    bool coalesce() const;
1172
1173
1174    /**
1175     * Cache block visitor that writes back dirty cache blocks using
1176     * functional writes.
1177     */
1178    void writebackVisitor(CacheBlk &blk);
1179
1180    /**
1181     * Cache block visitor that invalidates all blocks in the cache.
1182     *
1183     * @warn Dirty cache lines will not be written back to memory.
1184     */
1185    void invalidateVisitor(CacheBlk &blk);
1186
1187    /**
1188     * Take an MSHR, turn it into a suitable downstream packet, and
1189     * send it out. This construct allows a queue entry to choose a suitable
1190     * approach based on its type.
1191     *
1192     * @param mshr The MSHR to turn into a packet and send
1193     * @return True if the port is waiting for a retry
1194     */
1195    virtual bool sendMSHRQueuePacket(MSHR* mshr);
1196
1197    /**
1198     * Similar to sendMSHR, but for a write-queue entry
1199     * instead. Create the packet, and send it, and if successful also
1200     * mark the entry in service.
1201     *
1202     * @param wq_entry The write-queue entry to turn into a packet and send
1203     * @return True if the port is waiting for a retry
1204     */
1205    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1206
1207    /**
1208     * Serialize the state of the caches
1209     *
1210     * We currently don't support checkpointing cache state, so this panics.
1211     */
1212    void serialize(CheckpointOut &cp) const override;
1213    void unserialize(CheckpointIn &cp) override;
1214};
1215
1216/**
1217 * The write allocator inspects write packets and detects streaming
1218 * patterns. The write allocator supports a single stream where writes
1219 * are expected to access consecutive locations and keeps track of
1220 * size of the area covered by the concecutive writes in byteCount.
1221 *
1222 * 1) When byteCount has surpassed the coallesceLimit the mode
1223 * switches from ALLOCATE to COALESCE where writes should be delayed
1224 * until the whole block is written at which point a single packet
1225 * (whole line write) can service them.
1226 *
1227 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1228 * line) we switch to NO_ALLOCATE when writes should not allocate in
1229 * the cache but rather send a whole line write to the memory below.
1230 */
1231class WriteAllocator : public SimObject {
1232  public:
1233    WriteAllocator(const WriteAllocatorParams *p) :
1234        SimObject(p),
1235        coalesceLimit(p->coalesce_limit * p->block_size),
1236        noAllocateLimit(p->no_allocate_limit * p->block_size),
1237        delayThreshold(p->delay_threshold)
1238    {
1239        reset();
1240    }
1241
1242    /**
1243     * Should writes be coalesced? This is true if the mode is set to
1244     * NO_ALLOCATE.
1245     *
1246     * @return return true if the cache should coalesce writes.
1247     */
1248    bool coalesce() const {
1249        return mode != WriteMode::ALLOCATE;
1250    }
1251
1252    /**
1253     * Should writes allocate?
1254     *
1255     * @return return true if the cache should not allocate for writes.
1256     */
1257    bool allocate() const {
1258        return mode != WriteMode::NO_ALLOCATE;
1259    }
1260
1261    /**
1262     * Reset the write allocator state, meaning that it allocates for
1263     * writes and has not recorded any information about qualifying
1264     * writes that might trigger a switch to coalescing and later no
1265     * allocation.
1266     */
1267    void reset() {
1268        mode = WriteMode::ALLOCATE;
1269        byteCount = 0;
1270        nextAddr = 0;
1271    }
1272
1273    /**
1274     * Access whether we need to delay the current write.
1275     *
1276     * @param blk_addr The block address the packet writes to
1277     * @return true if the current packet should be delayed
1278     */
1279    bool delay(Addr blk_addr) {
1280        if (delayCtr[blk_addr] > 0) {
1281            --delayCtr[blk_addr];
1282            return true;
1283        } else {
1284            return false;
1285        }
1286    }
1287
1288    /**
1289     * Clear delay counter for the input block
1290     *
1291     * @param blk_addr The accessed cache block
1292     */
1293    void resetDelay(Addr blk_addr) {
1294        delayCtr.erase(blk_addr);
1295    }
1296
1297    /**
1298     * Update the write mode based on the current write
1299     * packet. This method compares the packet's address with any
1300     * current stream, and updates the tracking and the mode
1301     * accordingly.
1302     *
1303     * @param write_addr Start address of the write request
1304     * @param write_size Size of the write request
1305     * @param blk_addr The block address that this packet writes to
1306     */
1307    void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1308
1309  private:
1310    /**
1311     * The current mode for write coalescing and allocation, either
1312     * normal operation (ALLOCATE), write coalescing (COALESCE), or
1313     * write coalescing without allocation (NO_ALLOCATE).
1314     */
1315    enum class WriteMode : char {
1316        ALLOCATE,
1317        COALESCE,
1318        NO_ALLOCATE,
1319    };
1320    WriteMode mode;
1321
1322    /** Address to match writes against to detect streams. */
1323    Addr nextAddr;
1324
1325    /**
1326     * Bytes written contiguously. Saturating once we no longer
1327     * allocate.
1328     */
1329    uint32_t byteCount;
1330
1331    /**
1332     * Limits for when to switch between the different write modes.
1333     */
1334    const uint32_t coalesceLimit;
1335    const uint32_t noAllocateLimit;
1336    /**
1337     * The number of times the allocator will delay an WriteReq MSHR.
1338     */
1339    const uint32_t delayThreshold;
1340
1341    /**
1342     * Keep track of the number of times the allocator has delayed an
1343     * WriteReq MSHR.
1344     */
1345    std::unordered_map<Addr, Counter> delayCtr;
1346};
1347
1348#endif //__MEM_CACHE_BASE_HH__
1349