1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Steve Reinhardt
42 *          Ron Dreslinski
43 *          Andreas Hansson
44 *          Nikos Nikoleris
45 */
46
47/**
48 * @file
49 * Declares a basic cache interface BaseCache.
50 */
51
52#ifndef __MEM_CACHE_BASE_HH__
53#define __MEM_CACHE_BASE_HH__
54
55#include <cassert>
56#include <cstdint>
57#include <string>
58
59#include "base/addr_range.hh"
60#include "base/statistics.hh"
61#include "base/trace.hh"
62#include "base/types.hh"
63#include "debug/Cache.hh"
64#include "debug/CachePort.hh"
65#include "enums/Clusivity.hh"
66#include "mem/cache/cache_blk.hh"
67#include "mem/cache/compressors/base.hh"
68#include "mem/cache/mshr_queue.hh"
69#include "mem/cache/tags/base.hh"
70#include "mem/cache/write_queue.hh"
71#include "mem/cache/write_queue_entry.hh"
72#include "mem/packet.hh"
73#include "mem/packet_queue.hh"
74#include "mem/qport.hh"
75#include "mem/request.hh"
76#include "params/WriteAllocator.hh"
77#include "sim/clocked_object.hh"
78#include "sim/eventq.hh"
79#include "sim/probe/probe.hh"
80#include "sim/serialize.hh"
81#include "sim/sim_exit.hh"
82#include "sim/system.hh"
83
84class BasePrefetcher;
85class MSHR;
86class MasterPort;
87class QueueEntry;
88struct BaseCacheParams;
89
90/**
91 * A basic cache interface. Implements some common functions for speed.
92 */
93class BaseCache : public ClockedObject
94{
95  protected:
96    /**
97     * Indexes to enumerate the MSHR queues.
98     */
99    enum MSHRQueueIndex {
100        MSHRQueue_MSHRs,
101        MSHRQueue_WriteBuffer
102    };
103
104  public:
105    /**
106     * Reasons for caches to be blocked.
107     */
108    enum BlockedCause {
109        Blocked_NoMSHRs = MSHRQueue_MSHRs,
110        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
111        Blocked_NoTargets,
112        NUM_BLOCKED_CAUSES
113    };
114
115  protected:
116
117    /**
118     * A cache master port is used for the memory-side port of the
119     * cache, and in addition to the basic timing port that only sends
120     * response packets through a transmit list, it also offers the
121     * ability to schedule and send request packets (requests &
122     * writebacks). The send event is scheduled through schedSendEvent,
123     * and the sendDeferredPacket of the timing port is modified to
124     * consider both the transmit list and the requests from the MSHR.
125     */
126    class CacheMasterPort : public QueuedMasterPort
127    {
128
129      public:
130
131        /**
132         * Schedule a send of a request packet (from the MSHR). Note
133         * that we could already have a retry outstanding.
134         */
135        void schedSendEvent(Tick time)
136        {
137            DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
138            reqQueue.schedSendEvent(time);
139        }
140
141      protected:
142
143        CacheMasterPort(const std::string &_name, BaseCache *_cache,
144                        ReqPacketQueue &_reqQueue,
145                        SnoopRespPacketQueue &_snoopRespQueue) :
146            QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
147        { }
148
149        /**
150         * Memory-side port always snoops.
151         *
152         * @return always true
153         */
154        virtual bool isSnooping() const { return true; }
155    };
156
157    /**
158     * Override the default behaviour of sendDeferredPacket to enable
159     * the memory-side cache port to also send requests based on the
160     * current MSHR status. This queue has a pointer to our specific
161     * cache implementation and is used by the MemSidePort.
162     */
163    class CacheReqPacketQueue : public ReqPacketQueue
164    {
165
166      protected:
167
168        BaseCache &cache;
169        SnoopRespPacketQueue &snoopRespQueue;
170
171      public:
172
173        CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
174                            SnoopRespPacketQueue &snoop_resp_queue,
175                            const std::string &label) :
176            ReqPacketQueue(cache, port, label), cache(cache),
177            snoopRespQueue(snoop_resp_queue) { }
178
179        /**
180         * Override the normal sendDeferredPacket and do not only
181         * consider the transmit list (used for responses), but also
182         * requests.
183         */
184        virtual void sendDeferredPacket();
185
186        /**
187         * Check if there is a conflicting snoop response about to be
188         * send out, and if so simply stall any requests, and schedule
189         * a send event at the same time as the next snoop response is
190         * being sent out.
191         *
192         * @param pkt The packet to check for conflicts against.
193         */
194        bool checkConflictingSnoop(const PacketPtr pkt)
195        {
196            if (snoopRespQueue.checkConflict(pkt, cache.blkSize)) {
197                DPRINTF(CachePort, "Waiting for snoop response to be "
198                        "sent\n");
199                Tick when = snoopRespQueue.deferredPacketReadyTime();
200                schedSendEvent(when);
201                return true;
202            }
203            return false;
204        }
205    };
206
207
208    /**
209     * The memory-side port extends the base cache master port with
210     * access functions for functional, atomic and timing snoops.
211     */
212    class MemSidePort : public CacheMasterPort
213    {
214      private:
215
216        /** The cache-specific queue. */
217        CacheReqPacketQueue _reqQueue;
218
219        SnoopRespPacketQueue _snoopRespQueue;
220
221        // a pointer to our specific cache implementation
222        BaseCache *cache;
223
224      protected:
225
226        virtual void recvTimingSnoopReq(PacketPtr pkt);
227
228        virtual bool recvTimingResp(PacketPtr pkt);
229
230        virtual Tick recvAtomicSnoop(PacketPtr pkt);
231
232        virtual void recvFunctionalSnoop(PacketPtr pkt);
233
234      public:
235
236        MemSidePort(const std::string &_name, BaseCache *_cache,
237                    const std::string &_label);
238    };
239
240    /**
241     * A cache slave port is used for the CPU-side port of the cache,
242     * and it is basically a simple timing port that uses a transmit
243     * list for responses to the CPU (or connected master). In
244     * addition, it has the functionality to block the port for
245     * incoming requests. If blocked, the port will issue a retry once
246     * unblocked.
247     */
248    class CacheSlavePort : public QueuedSlavePort
249    {
250
251      public:
252
253        /** Do not accept any new requests. */
254        void setBlocked();
255
256        /** Return to normal operation and accept new requests. */
257        void clearBlocked();
258
259        bool isBlocked() const { return blocked; }
260
261      protected:
262
263        CacheSlavePort(const std::string &_name, BaseCache *_cache,
264                       const std::string &_label);
265
266        /** A normal packet queue used to store responses. */
267        RespPacketQueue queue;
268
269        bool blocked;
270
271        bool mustSendRetry;
272
273      private:
274
275        void processSendRetry();
276
277        EventFunctionWrapper sendRetryEvent;
278
279    };
280
281    /**
282     * The CPU-side port extends the base cache slave port with access
283     * functions for functional, atomic and timing requests.
284     */
285    class CpuSidePort : public CacheSlavePort
286    {
287      private:
288
289        // a pointer to our specific cache implementation
290        BaseCache *cache;
291
292      protected:
293        virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
294
295        virtual bool tryTiming(PacketPtr pkt) override;
296
297        virtual bool recvTimingReq(PacketPtr pkt) override;
298
299        virtual Tick recvAtomic(PacketPtr pkt) override;
300
301        virtual void recvFunctional(PacketPtr pkt) override;
302
303        virtual AddrRangeList getAddrRanges() const override;
304
305      public:
306
307        CpuSidePort(const std::string &_name, BaseCache *_cache,
308                    const std::string &_label);
309
310    };
311
312    CpuSidePort cpuSidePort;
313    MemSidePort memSidePort;
314
315  protected:
316
317    /** Miss status registers */
318    MSHRQueue mshrQueue;
319
320    /** Write/writeback buffer */
321    WriteQueue writeBuffer;
322
323    /** Tag and data Storage */
324    BaseTags *tags;
325
326    /** Compression method being used. */
327    BaseCacheCompressor* compressor;
328
329    /** Prefetcher */
330    BasePrefetcher *prefetcher;
331
332    /** To probe when a cache hit occurs */
333    ProbePointArg<PacketPtr> *ppHit;
334
335    /** To probe when a cache miss occurs */
336    ProbePointArg<PacketPtr> *ppMiss;
337
338    /** To probe when a cache fill occurs */
339    ProbePointArg<PacketPtr> *ppFill;
340
341    /**
342     * The writeAllocator drive optimizations for streaming writes.
343     * It first determines whether a WriteReq MSHR should be delayed,
344     * thus ensuring that we wait longer in cases when we are write
345     * coalescing and allowing all the bytes of the line to be written
346     * before the MSHR packet is sent downstream. This works in unison
347     * with the tracking in the MSHR to check if the entire line is
348     * written. The write mode also affects the behaviour on filling
349     * any whole-line writes. Normally the cache allocates the line
350     * when receiving the InvalidateResp, but after seeing enough
351     * consecutive lines we switch to using the tempBlock, and thus
352     * end up not allocating the line, and instead turning the
353     * whole-line write into a writeback straight away.
354     */
355    WriteAllocator * const writeAllocator;
356
357    /**
358     * Temporary cache block for occasional transitory use.  We use
359     * the tempBlock to fill when allocation fails (e.g., when there
360     * is an outstanding request that accesses the victim block) or
361     * when we want to avoid allocation (e.g., exclusive caches)
362     */
363    TempCacheBlk *tempBlock;
364
365    /**
366     * Upstream caches need this packet until true is returned, so
367     * hold it for deletion until a subsequent call
368     */
369    std::unique_ptr<Packet> pendingDelete;
370
371    /**
372     * Mark a request as in service (sent downstream in the memory
373     * system), effectively making this MSHR the ordering point.
374     */
375    void markInService(MSHR *mshr, bool pending_modified_resp)
376    {
377        bool wasFull = mshrQueue.isFull();
378        mshrQueue.markInService(mshr, pending_modified_resp);
379
380        if (wasFull && !mshrQueue.isFull()) {
381            clearBlocked(Blocked_NoMSHRs);
382        }
383    }
384
385    void markInService(WriteQueueEntry *entry)
386    {
387        bool wasFull = writeBuffer.isFull();
388        writeBuffer.markInService(entry);
389
390        if (wasFull && !writeBuffer.isFull()) {
391            clearBlocked(Blocked_NoWBBuffers);
392        }
393    }
394
395    /**
396     * Determine whether we should allocate on a fill or not. If this
397     * cache is mostly inclusive with regards to the upstream cache(s)
398     * we always allocate (for any non-forwarded and cacheable
399     * requests). In the case of a mostly exclusive cache, we allocate
400     * on fill if the packet did not come from a cache, thus if we:
401     * are dealing with a whole-line write (the latter behaves much
402     * like a writeback), the original target packet came from a
403     * non-caching source, or if we are performing a prefetch or LLSC.
404     *
405     * @param cmd Command of the incoming requesting packet
406     * @return Whether we should allocate on the fill
407     */
408    inline bool allocOnFill(MemCmd cmd) const
409    {
410        return clusivity == Enums::mostly_incl ||
411            cmd == MemCmd::WriteLineReq ||
412            cmd == MemCmd::ReadReq ||
413            cmd == MemCmd::WriteReq ||
414            cmd.isPrefetch() ||
415            cmd.isLLSC();
416    }
417
418    /**
419     * Regenerate block address using tags.
420     * Block address regeneration depends on whether we're using a temporary
421     * block or not.
422     *
423     * @param blk The block to regenerate address.
424     * @return The block's address.
425     */
426    Addr regenerateBlkAddr(CacheBlk* blk);
427
428    /**
429     * Calculate latency of accesses that only touch the tag array.
430     * @sa calculateAccessLatency
431     *
432     * @param delay The delay until the packet's metadata is present.
433     * @param lookup_lat Latency of the respective tag lookup.
434     * @return The number of ticks that pass due to a tag-only access.
435     */
436    Cycles calculateTagOnlyLatency(const uint32_t delay,
437                                   const Cycles lookup_lat) const;
438    /**
439     * Calculate access latency in ticks given a tag lookup latency, and
440     * whether access was a hit or miss.
441     *
442     * @param blk The cache block that was accessed.
443     * @param delay The delay until the packet's metadata is present.
444     * @param lookup_lat Latency of the respective tag lookup.
445     * @return The number of ticks that pass due to a block access.
446     */
447    Cycles calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
448                                  const Cycles lookup_lat) const;
449
450    /**
451     * Does all the processing necessary to perform the provided request.
452     * @param pkt The memory request to perform.
453     * @param blk The cache block to be updated.
454     * @param lat The latency of the access.
455     * @param writebacks List for any writebacks that need to be performed.
456     * @return Boolean indicating whether the request was satisfied.
457     */
458    virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
459                        PacketList &writebacks);
460
461    /*
462     * Handle a timing request that hit in the cache
463     *
464     * @param ptk The request packet
465     * @param blk The referenced block
466     * @param request_time The tick at which the block lookup is compete
467     */
468    virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
469                                    Tick request_time);
470
471    /*
472     * Handle a timing request that missed in the cache
473     *
474     * Implementation specific handling for different cache
475     * implementations
476     *
477     * @param ptk The request packet
478     * @param blk The referenced block
479     * @param forward_time The tick at which we can process dependent requests
480     * @param request_time The tick at which the block lookup is compete
481     */
482    virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
483                                     Tick forward_time,
484                                     Tick request_time) = 0;
485
486    /*
487     * Handle a timing request that missed in the cache
488     *
489     * Common functionality across different cache implementations
490     *
491     * @param ptk The request packet
492     * @param blk The referenced block
493     * @param mshr Any existing mshr for the referenced cache block
494     * @param forward_time The tick at which we can process dependent requests
495     * @param request_time The tick at which the block lookup is compete
496     */
497    void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
498                             Tick forward_time, Tick request_time);
499
500    /**
501     * Performs the access specified by the request.
502     * @param pkt The request to perform.
503     */
504    virtual void recvTimingReq(PacketPtr pkt);
505
506    /**
507     * Handling the special case of uncacheable write responses to
508     * make recvTimingResp less cluttered.
509     */
510    void handleUncacheableWriteResp(PacketPtr pkt);
511
512    /**
513     * Service non-deferred MSHR targets using the received response
514     *
515     * Iterates through the list of targets that can be serviced with
516     * the current response.
517     *
518     * @param mshr The MSHR that corresponds to the reponse
519     * @param pkt The response packet
520     * @param blk The reference block
521     */
522    virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
523                                    CacheBlk *blk) = 0;
524
525    /**
526     * Handles a response (cache line fill/write ack) from the bus.
527     * @param pkt The response packet
528     */
529    virtual void recvTimingResp(PacketPtr pkt);
530
531    /**
532     * Snoops bus transactions to maintain coherence.
533     * @param pkt The current bus transaction.
534     */
535    virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
536
537    /**
538     * Handle a snoop response.
539     * @param pkt Snoop response packet
540     */
541    virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
542
543    /**
544     * Handle a request in atomic mode that missed in this cache
545     *
546     * Creates a downstream request, sends it to the memory below and
547     * handles the response. As we are in atomic mode all operations
548     * are performed immediately.
549     *
550     * @param pkt The packet with the requests
551     * @param blk The referenced block
552     * @param writebacks A list with packets for any performed writebacks
553     * @return Cycles for handling the request
554     */
555    virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
556                                       PacketList &writebacks) = 0;
557
558    /**
559     * Performs the access specified by the request.
560     * @param pkt The request to perform.
561     * @return The number of ticks required for the access.
562     */
563    virtual Tick recvAtomic(PacketPtr pkt);
564
565    /**
566     * Snoop for the provided request in the cache and return the estimated
567     * time taken.
568     * @param pkt The memory request to snoop
569     * @return The number of ticks required for the snoop.
570     */
571    virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
572
573    /**
574     * Performs the access specified by the request.
575     *
576     * @param pkt The request to perform.
577     * @param fromCpuSide from the CPU side port or the memory side port
578     */
579    virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
580
581    /**
582     * Handle doing the Compare and Swap function for SPARC.
583     */
584    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
585
586    /**
587     * Return the next queue entry to service, either a pending miss
588     * from the MSHR queue, a buffered write from the write buffer, or
589     * something from the prefetcher. This function is responsible
590     * for prioritizing among those sources on the fly.
591     */
592    QueueEntry* getNextQueueEntry();
593
594    /**
595     * Insert writebacks into the write buffer
596     */
597    virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
598
599    /**
600     * Send writebacks down the memory hierarchy in atomic mode
601     */
602    virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
603
604    /**
605     * Create an appropriate downstream bus request packet.
606     *
607     * Creates a new packet with the request to be send to the memory
608     * below, or nullptr if the current request in cpu_pkt should just
609     * be forwarded on.
610     *
611     * @param cpu_pkt The miss packet that needs to be satisfied.
612     * @param blk The referenced block, can be nullptr.
613     * @param needs_writable Indicates that the block must be writable
614     * even if the request in cpu_pkt doesn't indicate that.
615     * @param is_whole_line_write True if there are writes for the
616     * whole line
617     * @return A packet send to the memory below
618     */
619    virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
620                                       bool needs_writable,
621                                       bool is_whole_line_write) const = 0;
622
623    /**
624     * Determine if clean lines should be written back or not. In
625     * cases where a downstream cache is mostly inclusive we likely
626     * want it to act as a victim cache also for lines that have not
627     * been modified. Hence, we cannot simply drop the line (or send a
628     * clean evict), but rather need to send the actual data.
629     */
630    const bool writebackClean;
631
632    /**
633     * Writebacks from the tempBlock, resulting on the response path
634     * in atomic mode, must happen after the call to recvAtomic has
635     * finished (for the right ordering of the packets). We therefore
636     * need to hold on to the packets, and have a method and an event
637     * to send them.
638     */
639    PacketPtr tempBlockWriteback;
640
641    /**
642     * Send the outstanding tempBlock writeback. To be called after
643     * recvAtomic finishes in cases where the block we filled is in
644     * fact the tempBlock, and now needs to be written back.
645     */
646    void writebackTempBlockAtomic() {
647        assert(tempBlockWriteback != nullptr);
648        PacketList writebacks{tempBlockWriteback};
649        doWritebacksAtomic(writebacks);
650        tempBlockWriteback = nullptr;
651    }
652
653    /**
654     * An event to writeback the tempBlock after recvAtomic
655     * finishes. To avoid other calls to recvAtomic getting in
656     * between, we create this event with a higher priority.
657     */
658    EventFunctionWrapper writebackTempBlockAtomicEvent;
659
660    /**
661     * When a block is overwriten, its compression information must be updated,
662     * and it may need to be recompressed. If the compression size changes, the
663     * block may either become smaller, in which case there is no side effect,
664     * or bigger (data expansion; fat write), in which case the block might not
665     * fit in its current location anymore. If that happens, there are usually
666     * two options to be taken:
667     *
668     * - The co-allocated blocks must be evicted to make room for this block.
669     *   Simpler, but ignores replacement data.
670     * - The block itself is moved elsewhere (used in policies where the CF
671     *   determines the location of the block).
672     *
673     * This implementation uses the first approach.
674     *
675     * Notice that this is only called for writebacks, which means that L1
676     * caches (which see regular Writes), do not support compression.
677     * @sa CompressedTags
678     *
679     * @param blk The block to be overwriten.
680     * @param data A pointer to the data to be compressed (blk's new data).
681     * @param writebacks List for any writebacks that need to be performed.
682     * @return Whether operation is successful or not.
683     */
684    bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
685                               PacketList &writebacks);
686
687    /**
688     * Perform any necessary updates to the block and perform any data
689     * exchange between the packet and the block. The flags of the
690     * packet are also set accordingly.
691     *
692     * @param pkt Request packet from upstream that hit a block
693     * @param blk Cache block that the packet hit
694     * @param deferred_response Whether this request originally missed
695     * @param pending_downgrade Whether the writable flag is to be removed
696     */
697    virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
698                                bool deferred_response = false,
699                                bool pending_downgrade = false);
700
701    /**
702     * Maintain the clusivity of this cache by potentially
703     * invalidating a block. This method works in conjunction with
704     * satisfyRequest, but is separate to allow us to handle all MSHR
705     * targets before potentially dropping a block.
706     *
707     * @param from_cache Whether we have dealt with a packet from a cache
708     * @param blk The block that should potentially be dropped
709     */
710    void maintainClusivity(bool from_cache, CacheBlk *blk);
711
712    /**
713     * Handle a fill operation caused by a received packet.
714     *
715     * Populates a cache block and handles all outstanding requests for the
716     * satisfied fill request. This version takes two memory requests. One
717     * contains the fill data, the other is an optional target to satisfy.
718     * Note that the reason we return a list of writebacks rather than
719     * inserting them directly in the write buffer is that this function
720     * is called by both atomic and timing-mode accesses, and in atomic
721     * mode we don't mess with the write buffer (we just perform the
722     * writebacks atomically once the original request is complete).
723     *
724     * @param pkt The memory request with the fill data.
725     * @param blk The cache block if it already exists.
726     * @param writebacks List for any writebacks that need to be performed.
727     * @param allocate Whether to allocate a block or use the temp block
728     * @return Pointer to the new cache block.
729     */
730    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
731                         PacketList &writebacks, bool allocate);
732
733    /**
734     * Allocate a new block and perform any necessary writebacks
735     *
736     * Find a victim block and if necessary prepare writebacks for any
737     * existing data. May return nullptr if there are no replaceable
738     * blocks. If a replaceable block is found, it inserts the new block in
739     * its place. The new block, however, is not set as valid yet.
740     *
741     * @param pkt Packet holding the address to update
742     * @param writebacks A list of writeback packets for the evicted blocks
743     * @return the allocated block
744     */
745    CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
746    /**
747     * Evict a cache block.
748     *
749     * Performs a writeback if necesssary and invalidates the block
750     *
751     * @param blk Block to invalidate
752     * @return A packet with the writeback, can be nullptr
753     */
754    M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
755
756    /**
757     * Evict a cache block.
758     *
759     * Performs a writeback if necesssary and invalidates the block
760     *
761     * @param blk Block to invalidate
762     * @param writebacks Return a list of packets with writebacks
763     */
764    void evictBlock(CacheBlk *blk, PacketList &writebacks);
765
766    /**
767     * Invalidate a cache block.
768     *
769     * @param blk Block to invalidate
770     */
771    void invalidateBlock(CacheBlk *blk);
772
773    /**
774     * Create a writeback request for the given block.
775     *
776     * @param blk The block to writeback.
777     * @return The writeback request for the block.
778     */
779    PacketPtr writebackBlk(CacheBlk *blk);
780
781    /**
782     * Create a writeclean request for the given block.
783     *
784     * Creates a request that writes the block to the cache below
785     * without evicting the block from the current cache.
786     *
787     * @param blk The block to write clean.
788     * @param dest The destination of the write clean operation.
789     * @param id Use the given packet id for the write clean operation.
790     * @return The generated write clean packet.
791     */
792    PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
793
794    /**
795     * Write back dirty blocks in the cache using functional accesses.
796     */
797    virtual void memWriteback() override;
798
799    /**
800     * Invalidates all blocks in the cache.
801     *
802     * @warn Dirty cache lines will not be written back to
803     * memory. Make sure to call functionalWriteback() first if you
804     * want the to write them to memory.
805     */
806    virtual void memInvalidate() override;
807
808    /**
809     * Determine if there are any dirty blocks in the cache.
810     *
811     * @return true if at least one block is dirty, false otherwise.
812     */
813    bool isDirty() const;
814
815    /**
816     * Determine if an address is in the ranges covered by this
817     * cache. This is useful to filter snoops.
818     *
819     * @param addr Address to check against
820     *
821     * @return If the address in question is in range
822     */
823    bool inRange(Addr addr) const;
824
825    /**
826     * Find next request ready time from among possible sources.
827     */
828    Tick nextQueueReadyTime() const;
829
830    /** Block size of this cache */
831    const unsigned blkSize;
832
833    /**
834     * The latency of tag lookup of a cache. It occurs when there is
835     * an access to the cache.
836     */
837    const Cycles lookupLatency;
838
839    /**
840     * The latency of data access of a cache. It occurs when there is
841     * an access to the cache.
842     */
843    const Cycles dataLatency;
844
845    /**
846     * This is the forward latency of the cache. It occurs when there
847     * is a cache miss and a request is forwarded downstream, in
848     * particular an outbound miss.
849     */
850    const Cycles forwardLatency;
851
852    /** The latency to fill a cache block */
853    const Cycles fillLatency;
854
855    /**
856     * The latency of sending reponse to its upper level cache/core on
857     * a linefill. The responseLatency parameter captures this
858     * latency.
859     */
860    const Cycles responseLatency;
861
862    /**
863     * Whether tags and data are accessed sequentially.
864     */
865    const bool sequentialAccess;
866
867    /** The number of targets for each MSHR. */
868    const int numTarget;
869
870    /** Do we forward snoops from mem side port through to cpu side port? */
871    bool forwardSnoops;
872
873    /**
874     * Clusivity with respect to the upstream cache, determining if we
875     * fill into both this cache and the cache above on a miss. Note
876     * that we currently do not support strict clusivity policies.
877     */
878    const Enums::Clusivity clusivity;
879
880    /**
881     * Is this cache read only, for example the instruction cache, or
882     * table-walker cache. A cache that is read only should never see
883     * any writes, and should never get any dirty data (and hence
884     * never have to do any writebacks).
885     */
886    const bool isReadOnly;
887
888    /**
889     * Bit vector of the blocking reasons for the access path.
890     * @sa #BlockedCause
891     */
892    uint8_t blocked;
893
894    /** Increasing order number assigned to each incoming request. */
895    uint64_t order;
896
897    /** Stores time the cache blocked for statistics. */
898    Cycles blockedCycle;
899
900    /** Pointer to the MSHR that has no targets. */
901    MSHR *noTargetMSHR;
902
903    /** The number of misses to trigger an exit event. */
904    Counter missCount;
905
906    /**
907     * The address range to which the cache responds on the CPU side.
908     * Normally this is all possible memory addresses. */
909    const AddrRangeList addrRanges;
910
911  public:
912    /** System we are currently operating in. */
913    System *system;
914
915    // Statistics
916    /**
917     * @addtogroup CacheStatistics
918     * @{
919     */
920
921    /** Number of hits per thread for each type of command.
922        @sa Packet::Command */
923    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
924    /** Number of hits for demand accesses. */
925    Stats::Formula demandHits;
926    /** Number of hit for all accesses. */
927    Stats::Formula overallHits;
928
929    /** Number of misses per thread for each type of command.
930        @sa Packet::Command */
931    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
932    /** Number of misses for demand accesses. */
933    Stats::Formula demandMisses;
934    /** Number of misses for all accesses. */
935    Stats::Formula overallMisses;
936
937    /**
938     * Total number of cycles per thread/command spent waiting for a miss.
939     * Used to calculate the average miss latency.
940     */
941    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
942    /** Total number of cycles spent waiting for demand misses. */
943    Stats::Formula demandMissLatency;
944    /** Total number of cycles spent waiting for all misses. */
945    Stats::Formula overallMissLatency;
946
947    /** The number of accesses per command and thread. */
948    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
949    /** The number of demand accesses. */
950    Stats::Formula demandAccesses;
951    /** The number of overall accesses. */
952    Stats::Formula overallAccesses;
953
954    /** The miss rate per command and thread. */
955    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
956    /** The miss rate of all demand accesses. */
957    Stats::Formula demandMissRate;
958    /** The miss rate for all accesses. */
959    Stats::Formula overallMissRate;
960
961    /** The average miss latency per command and thread. */
962    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
963    /** The average miss latency for demand misses. */
964    Stats::Formula demandAvgMissLatency;
965    /** The average miss latency for all misses. */
966    Stats::Formula overallAvgMissLatency;
967
968    /** The total number of cycles blocked for each blocked cause. */
969    Stats::Vector blocked_cycles;
970    /** The number of times this cache blocked for each blocked cause. */
971    Stats::Vector blocked_causes;
972
973    /** The average number of cycles blocked for each blocked cause. */
974    Stats::Formula avg_blocked;
975
976    /** The number of times a HW-prefetched block is evicted w/o reference. */
977    Stats::Scalar unusedPrefetches;
978
979    /** Number of blocks written back per thread. */
980    Stats::Vector writebacks;
981
982    /** Number of misses that hit in the MSHRs per command and thread. */
983    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
984    /** Demand misses that hit in the MSHRs. */
985    Stats::Formula demandMshrHits;
986    /** Total number of misses that hit in the MSHRs. */
987    Stats::Formula overallMshrHits;
988
989    /** Number of misses that miss in the MSHRs, per command and thread. */
990    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
991    /** Demand misses that miss in the MSHRs. */
992    Stats::Formula demandMshrMisses;
993    /** Total number of misses that miss in the MSHRs. */
994    Stats::Formula overallMshrMisses;
995
996    /** Number of misses that miss in the MSHRs, per command and thread. */
997    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
998    /** Total number of misses that miss in the MSHRs. */
999    Stats::Formula overallMshrUncacheable;
1000
1001    /** Total cycle latency of each MSHR miss, per command and thread. */
1002    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
1003    /** Total cycle latency of demand MSHR misses. */
1004    Stats::Formula demandMshrMissLatency;
1005    /** Total cycle latency of overall MSHR misses. */
1006    Stats::Formula overallMshrMissLatency;
1007
1008    /** Total cycle latency of each MSHR miss, per command and thread. */
1009    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
1010    /** Total cycle latency of overall MSHR misses. */
1011    Stats::Formula overallMshrUncacheableLatency;
1012
1013    /** The miss rate in the MSHRs pre command and thread. */
1014    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
1015    /** The demand miss rate in the MSHRs. */
1016    Stats::Formula demandMshrMissRate;
1017    /** The overall miss rate in the MSHRs. */
1018    Stats::Formula overallMshrMissRate;
1019
1020    /** The average latency of an MSHR miss, per command and thread. */
1021    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
1022    /** The average latency of a demand MSHR miss. */
1023    Stats::Formula demandAvgMshrMissLatency;
1024    /** The average overall latency of an MSHR miss. */
1025    Stats::Formula overallAvgMshrMissLatency;
1026
1027    /** The average latency of an MSHR miss, per command and thread. */
1028    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
1029    /** The average overall latency of an MSHR miss. */
1030    Stats::Formula overallAvgMshrUncacheableLatency;
1031
1032    /** Number of replacements of valid blocks. */
1033    Stats::Scalar replacements;
1034
1035    /** Number of data expansions. */
1036    Stats::Scalar dataExpansions;
1037
1038    /**
1039     * @}
1040     */
1041
1042    /**
1043     * Register stats for this object.
1044     */
1045    void regStats() override;
1046
1047    /** Registers probes. */
1048    void regProbePoints() override;
1049
1050  public:
1051    BaseCache(const BaseCacheParams *p, unsigned blk_size);
1052    ~BaseCache();
1053
1054    void init() override;
1055
1056    Port &getPort(const std::string &if_name,
1057                  PortID idx=InvalidPortID) override;
1058
1059    /**
1060     * Query block size of a cache.
1061     * @return  The block size
1062     */
1063    unsigned
1064    getBlockSize() const
1065    {
1066        return blkSize;
1067    }
1068
1069    const AddrRangeList &getAddrRanges() const { return addrRanges; }
1070
1071    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1072    {
1073        MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1074                                        pkt, time, order++,
1075                                        allocOnFill(pkt->cmd));
1076
1077        if (mshrQueue.isFull()) {
1078            setBlocked((BlockedCause)MSHRQueue_MSHRs);
1079        }
1080
1081        if (sched_send) {
1082            // schedule the send
1083            schedMemSideSendEvent(time);
1084        }
1085
1086        return mshr;
1087    }
1088
1089    void allocateWriteBuffer(PacketPtr pkt, Tick time)
1090    {
1091        // should only see writes or clean evicts here
1092        assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1093
1094        Addr blk_addr = pkt->getBlockAddr(blkSize);
1095
1096        // If using compression, on evictions the block is decompressed and
1097        // the operation's latency is added to the payload delay. Consume
1098        // that payload delay here, meaning that the data is always stored
1099        // uncompressed in the writebuffer
1100        if (compressor) {
1101            time += pkt->payloadDelay;
1102            pkt->payloadDelay = 0;
1103        }
1104
1105        WriteQueueEntry *wq_entry =
1106            writeBuffer.findMatch(blk_addr, pkt->isSecure());
1107        if (wq_entry && !wq_entry->inService) {
1108            DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1109        }
1110
1111        writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1112
1113        if (writeBuffer.isFull()) {
1114            setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1115        }
1116
1117        // schedule the send
1118        schedMemSideSendEvent(time);
1119    }
1120
1121    /**
1122     * Returns true if the cache is blocked for accesses.
1123     */
1124    bool isBlocked() const
1125    {
1126        return blocked != 0;
1127    }
1128
1129    /**
1130     * Marks the access path of the cache as blocked for the given cause. This
1131     * also sets the blocked flag in the slave interface.
1132     * @param cause The reason for the cache blocking.
1133     */
1134    void setBlocked(BlockedCause cause)
1135    {
1136        uint8_t flag = 1 << cause;
1137        if (blocked == 0) {
1138            blocked_causes[cause]++;
1139            blockedCycle = curCycle();
1140            cpuSidePort.setBlocked();
1141        }
1142        blocked |= flag;
1143        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1144    }
1145
1146    /**
1147     * Marks the cache as unblocked for the given cause. This also clears the
1148     * blocked flags in the appropriate interfaces.
1149     * @param cause The newly unblocked cause.
1150     * @warning Calling this function can cause a blocked request on the bus to
1151     * access the cache. The cache must be in a state to handle that request.
1152     */
1153    void clearBlocked(BlockedCause cause)
1154    {
1155        uint8_t flag = 1 << cause;
1156        blocked &= ~flag;
1157        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1158        if (blocked == 0) {
1159            blocked_cycles[cause] += curCycle() - blockedCycle;
1160            cpuSidePort.clearBlocked();
1161        }
1162    }
1163
1164    /**
1165     * Schedule a send event for the memory-side port. If already
1166     * scheduled, this may reschedule the event at an earlier
1167     * time. When the specified time is reached, the port is free to
1168     * send either a response, a request, or a prefetch request.
1169     *
1170     * @param time The time when to attempt sending a packet.
1171     */
1172    void schedMemSideSendEvent(Tick time)
1173    {
1174        memSidePort.schedSendEvent(time);
1175    }
1176
1177    bool inCache(Addr addr, bool is_secure) const {
1178        return tags->findBlock(addr, is_secure);
1179    }
1180
1181    bool hasBeenPrefetched(Addr addr, bool is_secure) const {
1182        CacheBlk *block = tags->findBlock(addr, is_secure);
1183        if (block) {
1184            return block->wasPrefetched();
1185        } else {
1186            return false;
1187        }
1188    }
1189
1190    bool inMissQueue(Addr addr, bool is_secure) const {
1191        return mshrQueue.findMatch(addr, is_secure);
1192    }
1193
1194    void incMissCount(PacketPtr pkt)
1195    {
1196        assert(pkt->req->masterId() < system->maxMasters());
1197        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1198        pkt->req->incAccessDepth();
1199        if (missCount) {
1200            --missCount;
1201            if (missCount == 0)
1202                exitSimLoop("A cache reached the maximum miss count");
1203        }
1204    }
1205    void incHitCount(PacketPtr pkt)
1206    {
1207        assert(pkt->req->masterId() < system->maxMasters());
1208        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1209
1210    }
1211
1212    /**
1213     * Checks if the cache is coalescing writes
1214     *
1215     * @return True if the cache is coalescing writes
1216     */
1217    bool coalesce() const;
1218
1219
1220    /**
1221     * Cache block visitor that writes back dirty cache blocks using
1222     * functional writes.
1223     */
1224    void writebackVisitor(CacheBlk &blk);
1225
1226    /**
1227     * Cache block visitor that invalidates all blocks in the cache.
1228     *
1229     * @warn Dirty cache lines will not be written back to memory.
1230     */
1231    void invalidateVisitor(CacheBlk &blk);
1232
1233    /**
1234     * Take an MSHR, turn it into a suitable downstream packet, and
1235     * send it out. This construct allows a queue entry to choose a suitable
1236     * approach based on its type.
1237     *
1238     * @param mshr The MSHR to turn into a packet and send
1239     * @return True if the port is waiting for a retry
1240     */
1241    virtual bool sendMSHRQueuePacket(MSHR* mshr);
1242
1243    /**
1244     * Similar to sendMSHR, but for a write-queue entry
1245     * instead. Create the packet, and send it, and if successful also
1246     * mark the entry in service.
1247     *
1248     * @param wq_entry The write-queue entry to turn into a packet and send
1249     * @return True if the port is waiting for a retry
1250     */
1251    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1252
1253    /**
1254     * Serialize the state of the caches
1255     *
1256     * We currently don't support checkpointing cache state, so this panics.
1257     */
1258    void serialize(CheckpointOut &cp) const override;
1259    void unserialize(CheckpointIn &cp) override;
1260};
1261
1262/**
1263 * The write allocator inspects write packets and detects streaming
1264 * patterns. The write allocator supports a single stream where writes
1265 * are expected to access consecutive locations and keeps track of
1266 * size of the area covered by the concecutive writes in byteCount.
1267 *
1268 * 1) When byteCount has surpassed the coallesceLimit the mode
1269 * switches from ALLOCATE to COALESCE where writes should be delayed
1270 * until the whole block is written at which point a single packet
1271 * (whole line write) can service them.
1272 *
1273 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1274 * line) we switch to NO_ALLOCATE when writes should not allocate in
1275 * the cache but rather send a whole line write to the memory below.
1276 */
1277class WriteAllocator : public SimObject {
1278  public:
1279    WriteAllocator(const WriteAllocatorParams *p) :
1280        SimObject(p),
1281        coalesceLimit(p->coalesce_limit * p->block_size),
1282        noAllocateLimit(p->no_allocate_limit * p->block_size),
1283        delayThreshold(p->delay_threshold)
1284    {
1285        reset();
1286    }
1287
1288    /**
1289     * Should writes be coalesced? This is true if the mode is set to
1290     * NO_ALLOCATE.
1291     *
1292     * @return return true if the cache should coalesce writes.
1293     */
1294    bool coalesce() const {
1295        return mode != WriteMode::ALLOCATE;
1296    }
1297
1298    /**
1299     * Should writes allocate?
1300     *
1301     * @return return true if the cache should not allocate for writes.
1302     */
1303    bool allocate() const {
1304        return mode != WriteMode::NO_ALLOCATE;
1305    }
1306
1307    /**
1308     * Reset the write allocator state, meaning that it allocates for
1309     * writes and has not recorded any information about qualifying
1310     * writes that might trigger a switch to coalescing and later no
1311     * allocation.
1312     */
1313    void reset() {
1314        mode = WriteMode::ALLOCATE;
1315        byteCount = 0;
1316        nextAddr = 0;
1317    }
1318
1319    /**
1320     * Access whether we need to delay the current write.
1321     *
1322     * @param blk_addr The block address the packet writes to
1323     * @return true if the current packet should be delayed
1324     */
1325    bool delay(Addr blk_addr) {
1326        if (delayCtr[blk_addr] > 0) {
1327            --delayCtr[blk_addr];
1328            return true;
1329        } else {
1330            return false;
1331        }
1332    }
1333
1334    /**
1335     * Clear delay counter for the input block
1336     *
1337     * @param blk_addr The accessed cache block
1338     */
1339    void resetDelay(Addr blk_addr) {
1340        delayCtr.erase(blk_addr);
1341    }
1342
1343    /**
1344     * Update the write mode based on the current write
1345     * packet. This method compares the packet's address with any
1346     * current stream, and updates the tracking and the mode
1347     * accordingly.
1348     *
1349     * @param write_addr Start address of the write request
1350     * @param write_size Size of the write request
1351     * @param blk_addr The block address that this packet writes to
1352     */
1353    void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1354
1355  private:
1356    /**
1357     * The current mode for write coalescing and allocation, either
1358     * normal operation (ALLOCATE), write coalescing (COALESCE), or
1359     * write coalescing without allocation (NO_ALLOCATE).
1360     */
1361    enum class WriteMode : char {
1362        ALLOCATE,
1363        COALESCE,
1364        NO_ALLOCATE,
1365    };
1366    WriteMode mode;
1367
1368    /** Address to match writes against to detect streams. */
1369    Addr nextAddr;
1370
1371    /**
1372     * Bytes written contiguously. Saturating once we no longer
1373     * allocate.
1374     */
1375    uint32_t byteCount;
1376
1377    /**
1378     * Limits for when to switch between the different write modes.
1379     */
1380    const uint32_t coalesceLimit;
1381    const uint32_t noAllocateLimit;
1382    /**
1383     * The number of times the allocator will delay an WriteReq MSHR.
1384     */
1385    const uint32_t delayThreshold;
1386
1387    /**
1388     * Keep track of the number of times the allocator has delayed an
1389     * WriteReq MSHR.
1390     */
1391    std::unordered_map<Addr, Counter> delayCtr;
1392};
1393
1394#endif //__MEM_CACHE_BASE_HH__
1395