base.hh revision 13945:a573bed35a8b
19155SN/A/*
29155SN/A * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
39155SN/A * All rights reserved.
49155SN/A *
59155SN/A * The license below extends only to copyright in the software and shall
69155SN/A * not be construed as granting a license to any other intellectual
79155SN/A * property including but not limited to intellectual property relating
89155SN/A * to a hardware implementation of the functionality of the software
99155SN/A * licensed hereunder.  You may use the software subject to the license
109155SN/A * terms below provided that you ensure that this notice is replicated
119155SN/A * unmodified and in its entirety in all distributions of the software,
129155SN/A * modified or unmodified, in source code or in binary form.
139155SN/A *
149155SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
159155SN/A * All rights reserved.
169155SN/A *
179155SN/A * Redistribution and use in source and binary forms, with or without
189155SN/A * modification, are permitted provided that the following conditions are
199155SN/A * met: redistributions of source code must retain the above copyright
209155SN/A * notice, this list of conditions and the following disclaimer;
219155SN/A * redistributions in binary form must reproduce the above copyright
229155SN/A * notice, this list of conditions and the following disclaimer in the
239155SN/A * documentation and/or other materials provided with the distribution;
249155SN/A * neither the name of the copyright holders nor the names of its
259155SN/A * contributors may be used to endorse or promote products derived from
269155SN/A * this software without specific prior written permission.
279155SN/A *
289155SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
299155SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
309155SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
319105SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3210441Snilay@cs.wisc.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3310441Snilay@cs.wisc.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
349105SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
359105SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
369105SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
379105SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3810919Sbrandon.potter@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
399297SN/A *
409105SN/A * Authors: Erik Hallnor
419297SN/A *          Steve Reinhardt
429105SN/A *          Ron Dreslinski
439297SN/A *          Andreas Hansson
449105SN/A *          Nikos Nikoleris
459184SN/A */
469105SN/A
479105SN/A/**
4810919Sbrandon.potter@amd.com * @file
499105SN/A * Declares a basic cache interface BaseCache.
509297SN/A */
519105SN/A
529297SN/A#ifndef __MEM_CACHE_BASE_HH__
539297SN/A#define __MEM_CACHE_BASE_HH__
5410314Snilay@cs.wisc.edu
559105SN/A#include <cassert>
569297SN/A#include <cstdint>
579105SN/A#include <string>
589105SN/A
599105SN/A#include "base/addr_range.hh"
609105SN/A#include "base/statistics.hh"
619297SN/A#include "base/trace.hh"
629105SN/A#include "base/types.hh"
6310314Snilay@cs.wisc.edu#include "debug/Cache.hh"
649105SN/A#include "debug/CachePort.hh"
659297SN/A#include "enums/Clusivity.hh"
6610917Sbrandon.potter@amd.com#include "mem/cache/cache_blk.hh"
6710919Sbrandon.potter@amd.com#include "mem/cache/compressors/base.hh"
689105SN/A#include "mem/cache/mshr_queue.hh"
699105SN/A#include "mem/cache/tags/base.hh"
709105SN/A#include "mem/cache/write_queue.hh"
7110314Snilay@cs.wisc.edu#include "mem/cache/write_queue_entry.hh"
729105SN/A#include "mem/packet.hh"
7310969Sdavid.hashe@amd.com#include "mem/packet_queue.hh"
749105SN/A#include "mem/qport.hh"
759105SN/A#include "mem/request.hh"
769105SN/A#include "params/WriteAllocator.hh"
77#include "sim/clocked_object.hh"
78#include "sim/eventq.hh"
79#include "sim/probe/probe.hh"
80#include "sim/serialize.hh"
81#include "sim/sim_exit.hh"
82#include "sim/system.hh"
83
84class BaseMasterPort;
85class BasePrefetcher;
86class BaseSlavePort;
87class MSHR;
88class MasterPort;
89class QueueEntry;
90struct BaseCacheParams;
91
92/**
93 * A basic cache interface. Implements some common functions for speed.
94 */
95class BaseCache : public ClockedObject
96{
97  protected:
98    /**
99     * Indexes to enumerate the MSHR queues.
100     */
101    enum MSHRQueueIndex {
102        MSHRQueue_MSHRs,
103        MSHRQueue_WriteBuffer
104    };
105
106  public:
107    /**
108     * Reasons for caches to be blocked.
109     */
110    enum BlockedCause {
111        Blocked_NoMSHRs = MSHRQueue_MSHRs,
112        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
113        Blocked_NoTargets,
114        NUM_BLOCKED_CAUSES
115    };
116
117  protected:
118
119    /**
120     * A cache master port is used for the memory-side port of the
121     * cache, and in addition to the basic timing port that only sends
122     * response packets through a transmit list, it also offers the
123     * ability to schedule and send request packets (requests &
124     * writebacks). The send event is scheduled through schedSendEvent,
125     * and the sendDeferredPacket of the timing port is modified to
126     * consider both the transmit list and the requests from the MSHR.
127     */
128    class CacheMasterPort : public QueuedMasterPort
129    {
130
131      public:
132
133        /**
134         * Schedule a send of a request packet (from the MSHR). Note
135         * that we could already have a retry outstanding.
136         */
137        void schedSendEvent(Tick time)
138        {
139            DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
140            reqQueue.schedSendEvent(time);
141        }
142
143      protected:
144
145        CacheMasterPort(const std::string &_name, BaseCache *_cache,
146                        ReqPacketQueue &_reqQueue,
147                        SnoopRespPacketQueue &_snoopRespQueue) :
148            QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
149        { }
150
151        /**
152         * Memory-side port always snoops.
153         *
154         * @return always true
155         */
156        virtual bool isSnooping() const { return true; }
157    };
158
159    /**
160     * Override the default behaviour of sendDeferredPacket to enable
161     * the memory-side cache port to also send requests based on the
162     * current MSHR status. This queue has a pointer to our specific
163     * cache implementation and is used by the MemSidePort.
164     */
165    class CacheReqPacketQueue : public ReqPacketQueue
166    {
167
168      protected:
169
170        BaseCache &cache;
171        SnoopRespPacketQueue &snoopRespQueue;
172
173      public:
174
175        CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
176                            SnoopRespPacketQueue &snoop_resp_queue,
177                            const std::string &label) :
178            ReqPacketQueue(cache, port, label), cache(cache),
179            snoopRespQueue(snoop_resp_queue) { }
180
181        /**
182         * Override the normal sendDeferredPacket and do not only
183         * consider the transmit list (used for responses), but also
184         * requests.
185         */
186        virtual void sendDeferredPacket();
187
188        /**
189         * Check if there is a conflicting snoop response about to be
190         * send out, and if so simply stall any requests, and schedule
191         * a send event at the same time as the next snoop response is
192         * being sent out.
193         *
194         * @param pkt The packet to check for conflicts against.
195         */
196        bool checkConflictingSnoop(const PacketPtr pkt)
197        {
198            if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
199                DPRINTF(CachePort, "Waiting for snoop response to be "
200                        "sent\n");
201                Tick when = snoopRespQueue.deferredPacketReadyTime();
202                schedSendEvent(when);
203                return true;
204            }
205            return false;
206        }
207    };
208
209
210    /**
211     * The memory-side port extends the base cache master port with
212     * access functions for functional, atomic and timing snoops.
213     */
214    class MemSidePort : public CacheMasterPort
215    {
216      private:
217
218        /** The cache-specific queue. */
219        CacheReqPacketQueue _reqQueue;
220
221        SnoopRespPacketQueue _snoopRespQueue;
222
223        // a pointer to our specific cache implementation
224        BaseCache *cache;
225
226      protected:
227
228        virtual void recvTimingSnoopReq(PacketPtr pkt);
229
230        virtual bool recvTimingResp(PacketPtr pkt);
231
232        virtual Tick recvAtomicSnoop(PacketPtr pkt);
233
234        virtual void recvFunctionalSnoop(PacketPtr pkt);
235
236      public:
237
238        MemSidePort(const std::string &_name, BaseCache *_cache,
239                    const std::string &_label);
240    };
241
242    /**
243     * A cache slave port is used for the CPU-side port of the cache,
244     * and it is basically a simple timing port that uses a transmit
245     * list for responses to the CPU (or connected master). In
246     * addition, it has the functionality to block the port for
247     * incoming requests. If blocked, the port will issue a retry once
248     * unblocked.
249     */
250    class CacheSlavePort : public QueuedSlavePort
251    {
252
253      public:
254
255        /** Do not accept any new requests. */
256        void setBlocked();
257
258        /** Return to normal operation and accept new requests. */
259        void clearBlocked();
260
261        bool isBlocked() const { return blocked; }
262
263      protected:
264
265        CacheSlavePort(const std::string &_name, BaseCache *_cache,
266                       const std::string &_label);
267
268        /** A normal packet queue used to store responses. */
269        RespPacketQueue queue;
270
271        bool blocked;
272
273        bool mustSendRetry;
274
275      private:
276
277        void processSendRetry();
278
279        EventFunctionWrapper sendRetryEvent;
280
281    };
282
283    /**
284     * The CPU-side port extends the base cache slave port with access
285     * functions for functional, atomic and timing requests.
286     */
287    class CpuSidePort : public CacheSlavePort
288    {
289      private:
290
291        // a pointer to our specific cache implementation
292        BaseCache *cache;
293
294      protected:
295        virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
296
297        virtual bool tryTiming(PacketPtr pkt) override;
298
299        virtual bool recvTimingReq(PacketPtr pkt) override;
300
301        virtual Tick recvAtomic(PacketPtr pkt) override;
302
303        virtual void recvFunctional(PacketPtr pkt) override;
304
305        virtual AddrRangeList getAddrRanges() const override;
306
307      public:
308
309        CpuSidePort(const std::string &_name, BaseCache *_cache,
310                    const std::string &_label);
311
312    };
313
314    CpuSidePort cpuSidePort;
315    MemSidePort memSidePort;
316
317  protected:
318
319    /** Miss status registers */
320    MSHRQueue mshrQueue;
321
322    /** Write/writeback buffer */
323    WriteQueue writeBuffer;
324
325    /** Tag and data Storage */
326    BaseTags *tags;
327
328    /** Compression method being used. */
329    BaseCacheCompressor* compressor;
330
331    /** Prefetcher */
332    BasePrefetcher *prefetcher;
333
334    /** To probe when a cache hit occurs */
335    ProbePointArg<PacketPtr> *ppHit;
336
337    /** To probe when a cache miss occurs */
338    ProbePointArg<PacketPtr> *ppMiss;
339
340    /** To probe when a cache fill occurs */
341    ProbePointArg<PacketPtr> *ppFill;
342
343    /**
344     * The writeAllocator drive optimizations for streaming writes.
345     * It first determines whether a WriteReq MSHR should be delayed,
346     * thus ensuring that we wait longer in cases when we are write
347     * coalescing and allowing all the bytes of the line to be written
348     * before the MSHR packet is sent downstream. This works in unison
349     * with the tracking in the MSHR to check if the entire line is
350     * written. The write mode also affects the behaviour on filling
351     * any whole-line writes. Normally the cache allocates the line
352     * when receiving the InvalidateResp, but after seeing enough
353     * consecutive lines we switch to using the tempBlock, and thus
354     * end up not allocating the line, and instead turning the
355     * whole-line write into a writeback straight away.
356     */
357    WriteAllocator * const writeAllocator;
358
359    /**
360     * Temporary cache block for occasional transitory use.  We use
361     * the tempBlock to fill when allocation fails (e.g., when there
362     * is an outstanding request that accesses the victim block) or
363     * when we want to avoid allocation (e.g., exclusive caches)
364     */
365    TempCacheBlk *tempBlock;
366
367    /**
368     * Upstream caches need this packet until true is returned, so
369     * hold it for deletion until a subsequent call
370     */
371    std::unique_ptr<Packet> pendingDelete;
372
373    /**
374     * Mark a request as in service (sent downstream in the memory
375     * system), effectively making this MSHR the ordering point.
376     */
377    void markInService(MSHR *mshr, bool pending_modified_resp)
378    {
379        bool wasFull = mshrQueue.isFull();
380        mshrQueue.markInService(mshr, pending_modified_resp);
381
382        if (wasFull && !mshrQueue.isFull()) {
383            clearBlocked(Blocked_NoMSHRs);
384        }
385    }
386
387    void markInService(WriteQueueEntry *entry)
388    {
389        bool wasFull = writeBuffer.isFull();
390        writeBuffer.markInService(entry);
391
392        if (wasFull && !writeBuffer.isFull()) {
393            clearBlocked(Blocked_NoWBBuffers);
394        }
395    }
396
397    /**
398     * Determine whether we should allocate on a fill or not. If this
399     * cache is mostly inclusive with regards to the upstream cache(s)
400     * we always allocate (for any non-forwarded and cacheable
401     * requests). In the case of a mostly exclusive cache, we allocate
402     * on fill if the packet did not come from a cache, thus if we:
403     * are dealing with a whole-line write (the latter behaves much
404     * like a writeback), the original target packet came from a
405     * non-caching source, or if we are performing a prefetch or LLSC.
406     *
407     * @param cmd Command of the incoming requesting packet
408     * @return Whether we should allocate on the fill
409     */
410    inline bool allocOnFill(MemCmd cmd) const
411    {
412        return clusivity == Enums::mostly_incl ||
413            cmd == MemCmd::WriteLineReq ||
414            cmd == MemCmd::ReadReq ||
415            cmd == MemCmd::WriteReq ||
416            cmd.isPrefetch() ||
417            cmd.isLLSC();
418    }
419
420    /**
421     * Regenerate block address using tags.
422     * Block address regeneration depends on whether we're using a temporary
423     * block or not.
424     *
425     * @param blk The block to regenerate address.
426     * @return The block's address.
427     */
428    Addr regenerateBlkAddr(CacheBlk* blk);
429
430    /**
431     * Calculate latency of accesses that only touch the tag array.
432     * @sa calculateAccessLatency
433     *
434     * @param delay The delay until the packet's metadata is present.
435     * @param lookup_lat Latency of the respective tag lookup.
436     * @return The number of ticks that pass due to a tag-only access.
437     */
438    Cycles calculateTagOnlyLatency(const uint32_t delay,
439                                   const Cycles lookup_lat) const;
440    /**
441     * Calculate access latency in ticks given a tag lookup latency, and
442     * whether access was a hit or miss.
443     *
444     * @param blk The cache block that was accessed.
445     * @param delay The delay until the packet's metadata is present.
446     * @param lookup_lat Latency of the respective tag lookup.
447     * @return The number of ticks that pass due to a block access.
448     */
449    Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
450                                  const Cycles lookup_lat) const;
451
452    /**
453     * Does all the processing necessary to perform the provided request.
454     * @param pkt The memory request to perform.
455     * @param blk The cache block to be updated.
456     * @param lat The latency of the access.
457     * @param writebacks List for any writebacks that need to be performed.
458     * @return Boolean indicating whether the request was satisfied.
459     */
460    virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
461                        PacketList &writebacks);
462
463    /*
464     * Handle a timing request that hit in the cache
465     *
466     * @param ptk The request packet
467     * @param blk The referenced block
468     * @param request_time The tick at which the block lookup is compete
469     */
470    virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
471                                    Tick request_time);
472
473    /*
474     * Handle a timing request that missed in the cache
475     *
476     * Implementation specific handling for different cache
477     * implementations
478     *
479     * @param ptk The request packet
480     * @param blk The referenced block
481     * @param forward_time The tick at which we can process dependent requests
482     * @param request_time The tick at which the block lookup is compete
483     */
484    virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
485                                     Tick forward_time,
486                                     Tick request_time) = 0;
487
488    /*
489     * Handle a timing request that missed in the cache
490     *
491     * Common functionality across different cache implementations
492     *
493     * @param ptk The request packet
494     * @param blk The referenced block
495     * @param mshr Any existing mshr for the referenced cache block
496     * @param forward_time The tick at which we can process dependent requests
497     * @param request_time The tick at which the block lookup is compete
498     */
499    void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
500                             Tick forward_time, Tick request_time);
501
502    /**
503     * Performs the access specified by the request.
504     * @param pkt The request to perform.
505     */
506    virtual void recvTimingReq(PacketPtr pkt);
507
508    /**
509     * Handling the special case of uncacheable write responses to
510     * make recvTimingResp less cluttered.
511     */
512    void handleUncacheableWriteResp(PacketPtr pkt);
513
514    /**
515     * Service non-deferred MSHR targets using the received response
516     *
517     * Iterates through the list of targets that can be serviced with
518     * the current response.
519     *
520     * @param mshr The MSHR that corresponds to the reponse
521     * @param pkt The response packet
522     * @param blk The reference block
523     */
524    virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
525                                    CacheBlk *blk) = 0;
526
527    /**
528     * Handles a response (cache line fill/write ack) from the bus.
529     * @param pkt The response packet
530     */
531    virtual void recvTimingResp(PacketPtr pkt);
532
533    /**
534     * Snoops bus transactions to maintain coherence.
535     * @param pkt The current bus transaction.
536     */
537    virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
538
539    /**
540     * Handle a snoop response.
541     * @param pkt Snoop response packet
542     */
543    virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
544
545    /**
546     * Handle a request in atomic mode that missed in this cache
547     *
548     * Creates a downstream request, sends it to the memory below and
549     * handles the response. As we are in atomic mode all operations
550     * are performed immediately.
551     *
552     * @param pkt The packet with the requests
553     * @param blk The referenced block
554     * @param writebacks A list with packets for any performed writebacks
555     * @return Cycles for handling the request
556     */
557    virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
558                                       PacketList &writebacks) = 0;
559
560    /**
561     * Performs the access specified by the request.
562     * @param pkt The request to perform.
563     * @return The number of ticks required for the access.
564     */
565    virtual Tick recvAtomic(PacketPtr pkt);
566
567    /**
568     * Snoop for the provided request in the cache and return the estimated
569     * time taken.
570     * @param pkt The memory request to snoop
571     * @return The number of ticks required for the snoop.
572     */
573    virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
574
575    /**
576     * Performs the access specified by the request.
577     *
578     * @param pkt The request to perform.
579     * @param fromCpuSide from the CPU side port or the memory side port
580     */
581    virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
582
583    /**
584     * Handle doing the Compare and Swap function for SPARC.
585     */
586    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
587
588    /**
589     * Return the next queue entry to service, either a pending miss
590     * from the MSHR queue, a buffered write from the write buffer, or
591     * something from the prefetcher. This function is responsible
592     * for prioritizing among those sources on the fly.
593     */
594    QueueEntry* getNextQueueEntry();
595
596    /**
597     * Insert writebacks into the write buffer
598     */
599    virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
600
601    /**
602     * Send writebacks down the memory hierarchy in atomic mode
603     */
604    virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
605
606    /**
607     * Create an appropriate downstream bus request packet.
608     *
609     * Creates a new packet with the request to be send to the memory
610     * below, or nullptr if the current request in cpu_pkt should just
611     * be forwarded on.
612     *
613     * @param cpu_pkt The miss packet that needs to be satisfied.
614     * @param blk The referenced block, can be nullptr.
615     * @param needs_writable Indicates that the block must be writable
616     * even if the request in cpu_pkt doesn't indicate that.
617     * @param is_whole_line_write True if there are writes for the
618     * whole line
619     * @return A packet send to the memory below
620     */
621    virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
622                                       bool needs_writable,
623                                       bool is_whole_line_write) const = 0;
624
625    /**
626     * Determine if clean lines should be written back or not. In
627     * cases where a downstream cache is mostly inclusive we likely
628     * want it to act as a victim cache also for lines that have not
629     * been modified. Hence, we cannot simply drop the line (or send a
630     * clean evict), but rather need to send the actual data.
631     */
632    const bool writebackClean;
633
634    /**
635     * Writebacks from the tempBlock, resulting on the response path
636     * in atomic mode, must happen after the call to recvAtomic has
637     * finished (for the right ordering of the packets). We therefore
638     * need to hold on to the packets, and have a method and an event
639     * to send them.
640     */
641    PacketPtr tempBlockWriteback;
642
643    /**
644     * Send the outstanding tempBlock writeback. To be called after
645     * recvAtomic finishes in cases where the block we filled is in
646     * fact the tempBlock, and now needs to be written back.
647     */
648    void writebackTempBlockAtomic() {
649        assert(tempBlockWriteback != nullptr);
650        PacketList writebacks{tempBlockWriteback};
651        doWritebacksAtomic(writebacks);
652        tempBlockWriteback = nullptr;
653    }
654
655    /**
656     * An event to writeback the tempBlock after recvAtomic
657     * finishes. To avoid other calls to recvAtomic getting in
658     * between, we create this event with a higher priority.
659     */
660    EventFunctionWrapper writebackTempBlockAtomicEvent;
661
662    /**
663     * Perform any necessary updates to the block and perform any data
664     * exchange between the packet and the block. The flags of the
665     * packet are also set accordingly.
666     *
667     * @param pkt Request packet from upstream that hit a block
668     * @param blk Cache block that the packet hit
669     * @param deferred_response Whether this request originally missed
670     * @param pending_downgrade Whether the writable flag is to be removed
671     */
672    virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
673                                bool deferred_response = false,
674                                bool pending_downgrade = false);
675
676    /**
677     * Maintain the clusivity of this cache by potentially
678     * invalidating a block. This method works in conjunction with
679     * satisfyRequest, but is separate to allow us to handle all MSHR
680     * targets before potentially dropping a block.
681     *
682     * @param from_cache Whether we have dealt with a packet from a cache
683     * @param blk The block that should potentially be dropped
684     */
685    void maintainClusivity(bool from_cache, CacheBlk *blk);
686
687    /**
688     * Handle a fill operation caused by a received packet.
689     *
690     * Populates a cache block and handles all outstanding requests for the
691     * satisfied fill request. This version takes two memory requests. One
692     * contains the fill data, the other is an optional target to satisfy.
693     * Note that the reason we return a list of writebacks rather than
694     * inserting them directly in the write buffer is that this function
695     * is called by both atomic and timing-mode accesses, and in atomic
696     * mode we don't mess with the write buffer (we just perform the
697     * writebacks atomically once the original request is complete).
698     *
699     * @param pkt The memory request with the fill data.
700     * @param blk The cache block if it already exists.
701     * @param writebacks List for any writebacks that need to be performed.
702     * @param allocate Whether to allocate a block or use the temp block
703     * @return Pointer to the new cache block.
704     */
705    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
706                         PacketList &writebacks, bool allocate);
707
708    /**
709     * Allocate a new block and perform any necessary writebacks
710     *
711     * Find a victim block and if necessary prepare writebacks for any
712     * existing data. May return nullptr if there are no replaceable
713     * blocks. If a replaceable block is found, it inserts the new block in
714     * its place. The new block, however, is not set as valid yet.
715     *
716     * @param pkt Packet holding the address to update
717     * @param writebacks A list of writeback packets for the evicted blocks
718     * @return the allocated block
719     */
720    CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
721    /**
722     * Evict a cache block.
723     *
724     * Performs a writeback if necesssary and invalidates the block
725     *
726     * @param blk Block to invalidate
727     * @return A packet with the writeback, can be nullptr
728     */
729    M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
730
731    /**
732     * Evict a cache block.
733     *
734     * Performs a writeback if necesssary and invalidates the block
735     *
736     * @param blk Block to invalidate
737     * @param writebacks Return a list of packets with writebacks
738     */
739    void evictBlock(CacheBlk *blk, PacketList &writebacks);
740
741    /**
742     * Invalidate a cache block.
743     *
744     * @param blk Block to invalidate
745     */
746    void invalidateBlock(CacheBlk *blk);
747
748    /**
749     * Create a writeback request for the given block.
750     *
751     * @param blk The block to writeback.
752     * @return The writeback request for the block.
753     */
754    PacketPtr writebackBlk(CacheBlk *blk);
755
756    /**
757     * Create a writeclean request for the given block.
758     *
759     * Creates a request that writes the block to the cache below
760     * without evicting the block from the current cache.
761     *
762     * @param blk The block to write clean.
763     * @param dest The destination of the write clean operation.
764     * @param id Use the given packet id for the write clean operation.
765     * @return The generated write clean packet.
766     */
767    PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
768
769    /**
770     * Write back dirty blocks in the cache using functional accesses.
771     */
772    virtual void memWriteback() override;
773
774    /**
775     * Invalidates all blocks in the cache.
776     *
777     * @warn Dirty cache lines will not be written back to
778     * memory. Make sure to call functionalWriteback() first if you
779     * want the to write them to memory.
780     */
781    virtual void memInvalidate() override;
782
783    /**
784     * Determine if there are any dirty blocks in the cache.
785     *
786     * @return true if at least one block is dirty, false otherwise.
787     */
788    bool isDirty() const;
789
790    /**
791     * Determine if an address is in the ranges covered by this
792     * cache. This is useful to filter snoops.
793     *
794     * @param addr Address to check against
795     *
796     * @return If the address in question is in range
797     */
798    bool inRange(Addr addr) const;
799
800    /**
801     * Find next request ready time from among possible sources.
802     */
803    Tick nextQueueReadyTime() const;
804
805    /** Block size of this cache */
806    const unsigned blkSize;
807
808    /**
809     * The latency of tag lookup of a cache. It occurs when there is
810     * an access to the cache.
811     */
812    const Cycles lookupLatency;
813
814    /**
815     * The latency of data access of a cache. It occurs when there is
816     * an access to the cache.
817     */
818    const Cycles dataLatency;
819
820    /**
821     * This is the forward latency of the cache. It occurs when there
822     * is a cache miss and a request is forwarded downstream, in
823     * particular an outbound miss.
824     */
825    const Cycles forwardLatency;
826
827    /** The latency to fill a cache block */
828    const Cycles fillLatency;
829
830    /**
831     * The latency of sending reponse to its upper level cache/core on
832     * a linefill. The responseLatency parameter captures this
833     * latency.
834     */
835    const Cycles responseLatency;
836
837    /**
838     * Whether tags and data are accessed sequentially.
839     */
840    const bool sequentialAccess;
841
842    /** The number of targets for each MSHR. */
843    const int numTarget;
844
845    /** Do we forward snoops from mem side port through to cpu side port? */
846    bool forwardSnoops;
847
848    /**
849     * Clusivity with respect to the upstream cache, determining if we
850     * fill into both this cache and the cache above on a miss. Note
851     * that we currently do not support strict clusivity policies.
852     */
853    const Enums::Clusivity clusivity;
854
855    /**
856     * Is this cache read only, for example the instruction cache, or
857     * table-walker cache. A cache that is read only should never see
858     * any writes, and should never get any dirty data (and hence
859     * never have to do any writebacks).
860     */
861    const bool isReadOnly;
862
863    /**
864     * Bit vector of the blocking reasons for the access path.
865     * @sa #BlockedCause
866     */
867    uint8_t blocked;
868
869    /** Increasing order number assigned to each incoming request. */
870    uint64_t order;
871
872    /** Stores time the cache blocked for statistics. */
873    Cycles blockedCycle;
874
875    /** Pointer to the MSHR that has no targets. */
876    MSHR *noTargetMSHR;
877
878    /** The number of misses to trigger an exit event. */
879    Counter missCount;
880
881    /**
882     * The address range to which the cache responds on the CPU side.
883     * Normally this is all possible memory addresses. */
884    const AddrRangeList addrRanges;
885
886  public:
887    /** System we are currently operating in. */
888    System *system;
889
890    // Statistics
891    /**
892     * @addtogroup CacheStatistics
893     * @{
894     */
895
896    /** Number of hits per thread for each type of command.
897        @sa Packet::Command */
898    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
899    /** Number of hits for demand accesses. */
900    Stats::Formula demandHits;
901    /** Number of hit for all accesses. */
902    Stats::Formula overallHits;
903
904    /** Number of misses per thread for each type of command.
905        @sa Packet::Command */
906    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
907    /** Number of misses for demand accesses. */
908    Stats::Formula demandMisses;
909    /** Number of misses for all accesses. */
910    Stats::Formula overallMisses;
911
912    /**
913     * Total number of cycles per thread/command spent waiting for a miss.
914     * Used to calculate the average miss latency.
915     */
916    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
917    /** Total number of cycles spent waiting for demand misses. */
918    Stats::Formula demandMissLatency;
919    /** Total number of cycles spent waiting for all misses. */
920    Stats::Formula overallMissLatency;
921
922    /** The number of accesses per command and thread. */
923    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
924    /** The number of demand accesses. */
925    Stats::Formula demandAccesses;
926    /** The number of overall accesses. */
927    Stats::Formula overallAccesses;
928
929    /** The miss rate per command and thread. */
930    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
931    /** The miss rate of all demand accesses. */
932    Stats::Formula demandMissRate;
933    /** The miss rate for all accesses. */
934    Stats::Formula overallMissRate;
935
936    /** The average miss latency per command and thread. */
937    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
938    /** The average miss latency for demand misses. */
939    Stats::Formula demandAvgMissLatency;
940    /** The average miss latency for all misses. */
941    Stats::Formula overallAvgMissLatency;
942
943    /** The total number of cycles blocked for each blocked cause. */
944    Stats::Vector blocked_cycles;
945    /** The number of times this cache blocked for each blocked cause. */
946    Stats::Vector blocked_causes;
947
948    /** The average number of cycles blocked for each blocked cause. */
949    Stats::Formula avg_blocked;
950
951    /** The number of times a HW-prefetched block is evicted w/o reference. */
952    Stats::Scalar unusedPrefetches;
953
954    /** Number of blocks written back per thread. */
955    Stats::Vector writebacks;
956
957    /** Number of misses that hit in the MSHRs per command and thread. */
958    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
959    /** Demand misses that hit in the MSHRs. */
960    Stats::Formula demandMshrHits;
961    /** Total number of misses that hit in the MSHRs. */
962    Stats::Formula overallMshrHits;
963
964    /** Number of misses that miss in the MSHRs, per command and thread. */
965    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
966    /** Demand misses that miss in the MSHRs. */
967    Stats::Formula demandMshrMisses;
968    /** Total number of misses that miss in the MSHRs. */
969    Stats::Formula overallMshrMisses;
970
971    /** Number of misses that miss in the MSHRs, per command and thread. */
972    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
973    /** Total number of misses that miss in the MSHRs. */
974    Stats::Formula overallMshrUncacheable;
975
976    /** Total cycle latency of each MSHR miss, per command and thread. */
977    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
978    /** Total cycle latency of demand MSHR misses. */
979    Stats::Formula demandMshrMissLatency;
980    /** Total cycle latency of overall MSHR misses. */
981    Stats::Formula overallMshrMissLatency;
982
983    /** Total cycle latency of each MSHR miss, per command and thread. */
984    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
985    /** Total cycle latency of overall MSHR misses. */
986    Stats::Formula overallMshrUncacheableLatency;
987
988#if 0
989    /** The total number of MSHR accesses per command and thread. */
990    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
991    /** The total number of demand MSHR accesses. */
992    Stats::Formula demandMshrAccesses;
993    /** The total number of MSHR accesses. */
994    Stats::Formula overallMshrAccesses;
995#endif
996
997    /** The miss rate in the MSHRs pre command and thread. */
998    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
999    /** The demand miss rate in the MSHRs. */
1000    Stats::Formula demandMshrMissRate;
1001    /** The overall miss rate in the MSHRs. */
1002    Stats::Formula overallMshrMissRate;
1003
1004    /** The average latency of an MSHR miss, per command and thread. */
1005    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
1006    /** The average latency of a demand MSHR miss. */
1007    Stats::Formula demandAvgMshrMissLatency;
1008    /** The average overall latency of an MSHR miss. */
1009    Stats::Formula overallAvgMshrMissLatency;
1010
1011    /** The average latency of an MSHR miss, per command and thread. */
1012    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
1013    /** The average overall latency of an MSHR miss. */
1014    Stats::Formula overallAvgMshrUncacheableLatency;
1015
1016    /** Number of replacements of valid blocks. */
1017    Stats::Scalar replacements;
1018
1019    /**
1020     * @}
1021     */
1022
1023    /**
1024     * Register stats for this object.
1025     */
1026    void regStats() override;
1027
1028    /** Registers probes. */
1029    void regProbePoints() override;
1030
1031  public:
1032    BaseCache(const BaseCacheParams *p, unsigned blk_size);
1033    ~BaseCache();
1034
1035    void init() override;
1036
1037    Port &getPort(const std::string &if_name,
1038                  PortID idx=InvalidPortID) override;
1039
1040    /**
1041     * Query block size of a cache.
1042     * @return  The block size
1043     */
1044    unsigned
1045    getBlockSize() const
1046    {
1047        return blkSize;
1048    }
1049
1050    const AddrRangeList &getAddrRanges() const { return addrRanges; }
1051
1052    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1053    {
1054        MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1055                                        pkt, time, order++,
1056                                        allocOnFill(pkt->cmd));
1057
1058        if (mshrQueue.isFull()) {
1059            setBlocked((BlockedCause)MSHRQueue_MSHRs);
1060        }
1061
1062        if (sched_send) {
1063            // schedule the send
1064            schedMemSideSendEvent(time);
1065        }
1066
1067        return mshr;
1068    }
1069
1070    void allocateWriteBuffer(PacketPtr pkt, Tick time)
1071    {
1072        // should only see writes or clean evicts here
1073        assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1074
1075        Addr blk_addr = pkt->getBlockAddr(blkSize);
1076
1077        // If using compression, on evictions the block is decompressed and
1078        // the operation's latency is added to the payload delay. Consume
1079        // that payload delay here, meaning that the data is always stored
1080        // uncompressed in the writebuffer
1081        if (compressor) {
1082            time += pkt->payloadDelay;
1083            pkt->payloadDelay = 0;
1084        }
1085
1086        WriteQueueEntry *wq_entry =
1087            writeBuffer.findMatch(blk_addr, pkt->isSecure());
1088        if (wq_entry && !wq_entry->inService) {
1089            DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1090        }
1091
1092        writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1093
1094        if (writeBuffer.isFull()) {
1095            setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1096        }
1097
1098        // schedule the send
1099        schedMemSideSendEvent(time);
1100    }
1101
1102    /**
1103     * Returns true if the cache is blocked for accesses.
1104     */
1105    bool isBlocked() const
1106    {
1107        return blocked != 0;
1108    }
1109
1110    /**
1111     * Marks the access path of the cache as blocked for the given cause. This
1112     * also sets the blocked flag in the slave interface.
1113     * @param cause The reason for the cache blocking.
1114     */
1115    void setBlocked(BlockedCause cause)
1116    {
1117        uint8_t flag = 1 << cause;
1118        if (blocked == 0) {
1119            blocked_causes[cause]++;
1120            blockedCycle = curCycle();
1121            cpuSidePort.setBlocked();
1122        }
1123        blocked |= flag;
1124        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1125    }
1126
1127    /**
1128     * Marks the cache as unblocked for the given cause. This also clears the
1129     * blocked flags in the appropriate interfaces.
1130     * @param cause The newly unblocked cause.
1131     * @warning Calling this function can cause a blocked request on the bus to
1132     * access the cache. The cache must be in a state to handle that request.
1133     */
1134    void clearBlocked(BlockedCause cause)
1135    {
1136        uint8_t flag = 1 << cause;
1137        blocked &= ~flag;
1138        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1139        if (blocked == 0) {
1140            blocked_cycles[cause] += curCycle() - blockedCycle;
1141            cpuSidePort.clearBlocked();
1142        }
1143    }
1144
1145    /**
1146     * Schedule a send event for the memory-side port. If already
1147     * scheduled, this may reschedule the event at an earlier
1148     * time. When the specified time is reached, the port is free to
1149     * send either a response, a request, or a prefetch request.
1150     *
1151     * @param time The time when to attempt sending a packet.
1152     */
1153    void schedMemSideSendEvent(Tick time)
1154    {
1155        memSidePort.schedSendEvent(time);
1156    }
1157
1158    bool inCache(Addr addr, bool is_secure) const {
1159        return tags->findBlock(addr, is_secure);
1160    }
1161
1162    bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1163        CacheBlk *block = tags->findBlock(addr, is_secure);
1164        if (block) {
1165            return block->wasPrefetched();
1166        } else {
1167            return false;
1168        }
1169    }
1170
1171    bool inMissQueue(Addr addr, bool is_secure) const {
1172        return mshrQueue.findMatch(addr, is_secure);
1173    }
1174
1175    void incMissCount(PacketPtr pkt)
1176    {
1177        assert(pkt->req->masterId() < system->maxMasters());
1178        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1179        pkt->req->incAccessDepth();
1180        if (missCount) {
1181            --missCount;
1182            if (missCount == 0)
1183                exitSimLoop("A cache reached the maximum miss count");
1184        }
1185    }
1186    void incHitCount(PacketPtr pkt)
1187    {
1188        assert(pkt->req->masterId() < system->maxMasters());
1189        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1190
1191    }
1192
1193    /**
1194     * Checks if the cache is coalescing writes
1195     *
1196     * @return True if the cache is coalescing writes
1197     */
1198    bool coalesce() const;
1199
1200
1201    /**
1202     * Cache block visitor that writes back dirty cache blocks using
1203     * functional writes.
1204     */
1205    void writebackVisitor(CacheBlk &blk);
1206
1207    /**
1208     * Cache block visitor that invalidates all blocks in the cache.
1209     *
1210     * @warn Dirty cache lines will not be written back to memory.
1211     */
1212    void invalidateVisitor(CacheBlk &blk);
1213
1214    /**
1215     * Take an MSHR, turn it into a suitable downstream packet, and
1216     * send it out. This construct allows a queue entry to choose a suitable
1217     * approach based on its type.
1218     *
1219     * @param mshr The MSHR to turn into a packet and send
1220     * @return True if the port is waiting for a retry
1221     */
1222    virtual bool sendMSHRQueuePacket(MSHR* mshr);
1223
1224    /**
1225     * Similar to sendMSHR, but for a write-queue entry
1226     * instead. Create the packet, and send it, and if successful also
1227     * mark the entry in service.
1228     *
1229     * @param wq_entry The write-queue entry to turn into a packet and send
1230     * @return True if the port is waiting for a retry
1231     */
1232    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1233
1234    /**
1235     * Serialize the state of the caches
1236     *
1237     * We currently don't support checkpointing cache state, so this panics.
1238     */
1239    void serialize(CheckpointOut &cp) const override;
1240    void unserialize(CheckpointIn &cp) override;
1241};
1242
1243/**
1244 * The write allocator inspects write packets and detects streaming
1245 * patterns. The write allocator supports a single stream where writes
1246 * are expected to access consecutive locations and keeps track of
1247 * size of the area covered by the concecutive writes in byteCount.
1248 *
1249 * 1) When byteCount has surpassed the coallesceLimit the mode
1250 * switches from ALLOCATE to COALESCE where writes should be delayed
1251 * until the whole block is written at which point a single packet
1252 * (whole line write) can service them.
1253 *
1254 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1255 * line) we switch to NO_ALLOCATE when writes should not allocate in
1256 * the cache but rather send a whole line write to the memory below.
1257 */
1258class WriteAllocator : public SimObject {
1259  public:
1260    WriteAllocator(const WriteAllocatorParams *p) :
1261        SimObject(p),
1262        coalesceLimit(p->coalesce_limit * p->block_size),
1263        noAllocateLimit(p->no_allocate_limit * p->block_size),
1264        delayThreshold(p->delay_threshold)
1265    {
1266        reset();
1267    }
1268
1269    /**
1270     * Should writes be coalesced? This is true if the mode is set to
1271     * NO_ALLOCATE.
1272     *
1273     * @return return true if the cache should coalesce writes.
1274     */
1275    bool coalesce() const {
1276        return mode != WriteMode::ALLOCATE;
1277    }
1278
1279    /**
1280     * Should writes allocate?
1281     *
1282     * @return return true if the cache should not allocate for writes.
1283     */
1284    bool allocate() const {
1285        return mode != WriteMode::NO_ALLOCATE;
1286    }
1287
1288    /**
1289     * Reset the write allocator state, meaning that it allocates for
1290     * writes and has not recorded any information about qualifying
1291     * writes that might trigger a switch to coalescing and later no
1292     * allocation.
1293     */
1294    void reset() {
1295        mode = WriteMode::ALLOCATE;
1296        byteCount = 0;
1297        nextAddr = 0;
1298    }
1299
1300    /**
1301     * Access whether we need to delay the current write.
1302     *
1303     * @param blk_addr The block address the packet writes to
1304     * @return true if the current packet should be delayed
1305     */
1306    bool delay(Addr blk_addr) {
1307        if (delayCtr[blk_addr] > 0) {
1308            --delayCtr[blk_addr];
1309            return true;
1310        } else {
1311            return false;
1312        }
1313    }
1314
1315    /**
1316     * Clear delay counter for the input block
1317     *
1318     * @param blk_addr The accessed cache block
1319     */
1320    void resetDelay(Addr blk_addr) {
1321        delayCtr.erase(blk_addr);
1322    }
1323
1324    /**
1325     * Update the write mode based on the current write
1326     * packet. This method compares the packet's address with any
1327     * current stream, and updates the tracking and the mode
1328     * accordingly.
1329     *
1330     * @param write_addr Start address of the write request
1331     * @param write_size Size of the write request
1332     * @param blk_addr The block address that this packet writes to
1333     */
1334    void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1335
1336  private:
1337    /**
1338     * The current mode for write coalescing and allocation, either
1339     * normal operation (ALLOCATE), write coalescing (COALESCE), or
1340     * write coalescing without allocation (NO_ALLOCATE).
1341     */
1342    enum class WriteMode : char {
1343        ALLOCATE,
1344        COALESCE,
1345        NO_ALLOCATE,
1346    };
1347    WriteMode mode;
1348
1349    /** Address to match writes against to detect streams. */
1350    Addr nextAddr;
1351
1352    /**
1353     * Bytes written contiguously. Saturating once we no longer
1354     * allocate.
1355     */
1356    uint32_t byteCount;
1357
1358    /**
1359     * Limits for when to switch between the different write modes.
1360     */
1361    const uint32_t coalesceLimit;
1362    const uint32_t noAllocateLimit;
1363    /**
1364     * The number of times the allocator will delay an WriteReq MSHR.
1365     */
1366    const uint32_t delayThreshold;
1367
1368    /**
1369     * Keep track of the number of times the allocator has delayed an
1370     * WriteReq MSHR.
1371     */
1372    std::unordered_map<Addr, Counter> delayCtr;
1373};
1374
1375#endif //__MEM_CACHE_BASE_HH__
1376