base.hh revision 13418:08101e89101e
15390SN/A/*
25445SN/A * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
35390SN/A * All rights reserved.
45390SN/A *
55390SN/A * The license below extends only to copyright in the software and shall
65390SN/A * not be construed as granting a license to any other intellectual
75390SN/A * property including but not limited to intellectual property relating
85390SN/A * to a hardware implementation of the functionality of the software
95390SN/A * licensed hereunder.  You may use the software subject to the license
105390SN/A * terms below provided that you ensure that this notice is replicated
115390SN/A * unmodified and in its entirety in all distributions of the software,
125390SN/A * modified or unmodified, in source code or in binary form.
135390SN/A *
145390SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
155390SN/A * All rights reserved.
165390SN/A *
175390SN/A * Redistribution and use in source and binary forms, with or without
185390SN/A * modification, are permitted provided that the following conditions are
195390SN/A * met: redistributions of source code must retain the above copyright
205390SN/A * notice, this list of conditions and the following disclaimer;
215390SN/A * redistributions in binary form must reproduce the above copyright
225390SN/A * notice, this list of conditions and the following disclaimer in the
235390SN/A * documentation and/or other materials provided with the distribution;
245390SN/A * neither the name of the copyright holders nor the names of its
255390SN/A * contributors may be used to endorse or promote products derived from
265390SN/A * this software without specific prior written permission.
275390SN/A *
285390SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
295390SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
305390SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
315390SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
325445SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
335636Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
345636Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
355636Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
365390SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
375390SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
385390SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
395390SN/A *
405390SN/A * Authors: Erik Hallnor
415636Sgblack@eecs.umich.edu *          Steve Reinhardt
425390SN/A *          Ron Dreslinski
435636Sgblack@eecs.umich.edu *          Andreas Hansson
445636Sgblack@eecs.umich.edu *          Nikos Nikoleris
455445SN/A */
465445SN/A
475445SN/A/**
485445SN/A * @file
495445SN/A * Declares a basic cache interface BaseCache.
505898Sgblack@eecs.umich.edu */
515390SN/A
525390SN/A#ifndef __MEM_CACHE_BASE_HH__
535390SN/A#define __MEM_CACHE_BASE_HH__
545390SN/A
555390SN/A#include <cassert>
565390SN/A#include <cstdint>
575636Sgblack@eecs.umich.edu#include <string>
585390SN/A
595390SN/A#include "base/addr_range.hh"
605445SN/A#include "base/statistics.hh"
615445SN/A#include "base/trace.hh"
625445SN/A#include "base/types.hh"
635445SN/A#include "debug/Cache.hh"
645445SN/A#include "debug/CachePort.hh"
655445SN/A#include "enums/Clusivity.hh"
665445SN/A#include "mem/cache/cache_blk.hh"
675445SN/A#include "mem/cache/mshr_queue.hh"
685445SN/A#include "mem/cache/tags/base.hh"
695636Sgblack@eecs.umich.edu#include "mem/cache/write_queue.hh"
705445SN/A#include "mem/cache/write_queue_entry.hh"
715898Sgblack@eecs.umich.edu#include "mem/mem_object.hh"
725390SN/A#include "mem/packet.hh"
735390SN/A#include "mem/packet_queue.hh"
745636Sgblack@eecs.umich.edu#include "mem/qport.hh"
757903Shestness@cs.utexas.edu#include "mem/request.hh"
767903Shestness@cs.utexas.edu#include "params/WriteAllocator.hh"
777903Shestness@cs.utexas.edu#include "sim/eventq.hh"
787903Shestness@cs.utexas.edu#include "sim/probe/probe.hh"
797903Shestness@cs.utexas.edu#include "sim/serialize.hh"
807903Shestness@cs.utexas.edu#include "sim/sim_exit.hh"
817903Shestness@cs.utexas.edu#include "sim/system.hh"
827903Shestness@cs.utexas.edu
837903Shestness@cs.utexas.educlass BaseMasterPort;
847903Shestness@cs.utexas.educlass BasePrefetcher;
857903Shestness@cs.utexas.educlass BaseSlavePort;
867903Shestness@cs.utexas.educlass MSHR;
877903Shestness@cs.utexas.educlass MasterPort;
887903Shestness@cs.utexas.educlass QueueEntry;
897903Shestness@cs.utexas.edustruct BaseCacheParams;
905636Sgblack@eecs.umich.edu
915636Sgblack@eecs.umich.edu/**
925636Sgblack@eecs.umich.edu * A basic cache interface. Implements some common functions for speed.
935636Sgblack@eecs.umich.edu */
945636Sgblack@eecs.umich.educlass BaseCache : public MemObject
95{
96  protected:
97    /**
98     * Indexes to enumerate the MSHR queues.
99     */
100    enum MSHRQueueIndex {
101        MSHRQueue_MSHRs,
102        MSHRQueue_WriteBuffer
103    };
104
105  public:
106    /**
107     * Reasons for caches to be blocked.
108     */
109    enum BlockedCause {
110        Blocked_NoMSHRs = MSHRQueue_MSHRs,
111        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
112        Blocked_NoTargets,
113        NUM_BLOCKED_CAUSES
114    };
115
116  protected:
117
118    /**
119     * A cache master port is used for the memory-side port of the
120     * cache, and in addition to the basic timing port that only sends
121     * response packets through a transmit list, it also offers the
122     * ability to schedule and send request packets (requests &
123     * writebacks). The send event is scheduled through schedSendEvent,
124     * and the sendDeferredPacket of the timing port is modified to
125     * consider both the transmit list and the requests from the MSHR.
126     */
127    class CacheMasterPort : public QueuedMasterPort
128    {
129
130      public:
131
132        /**
133         * Schedule a send of a request packet (from the MSHR). Note
134         * that we could already have a retry outstanding.
135         */
136        void schedSendEvent(Tick time)
137        {
138            DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
139            reqQueue.schedSendEvent(time);
140        }
141
142      protected:
143
144        CacheMasterPort(const std::string &_name, BaseCache *_cache,
145                        ReqPacketQueue &_reqQueue,
146                        SnoopRespPacketQueue &_snoopRespQueue) :
147            QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
148        { }
149
150        /**
151         * Memory-side port always snoops.
152         *
153         * @return always true
154         */
155        virtual bool isSnooping() const { return true; }
156    };
157
158    /**
159     * Override the default behaviour of sendDeferredPacket to enable
160     * the memory-side cache port to also send requests based on the
161     * current MSHR status. This queue has a pointer to our specific
162     * cache implementation and is used by the MemSidePort.
163     */
164    class CacheReqPacketQueue : public ReqPacketQueue
165    {
166
167      protected:
168
169        BaseCache &cache;
170        SnoopRespPacketQueue &snoopRespQueue;
171
172      public:
173
174        CacheReqPacketQueue(BaseCache &cache, MasterPort &port,
175                            SnoopRespPacketQueue &snoop_resp_queue,
176                            const std::string &label) :
177            ReqPacketQueue(cache, port, label), cache(cache),
178            snoopRespQueue(snoop_resp_queue) { }
179
180        /**
181         * Override the normal sendDeferredPacket and do not only
182         * consider the transmit list (used for responses), but also
183         * requests.
184         */
185        virtual void sendDeferredPacket();
186
187        /**
188         * Check if there is a conflicting snoop response about to be
189         * send out, and if so simply stall any requests, and schedule
190         * a send event at the same time as the next snoop response is
191         * being sent out.
192         */
193        bool checkConflictingSnoop(Addr addr)
194        {
195            if (snoopRespQueue.hasAddr(addr)) {
196                DPRINTF(CachePort, "Waiting for snoop response to be "
197                        "sent\n");
198                Tick when = snoopRespQueue.deferredPacketReadyTime();
199                schedSendEvent(when);
200                return true;
201            }
202            return false;
203        }
204    };
205
206
207    /**
208     * The memory-side port extends the base cache master port with
209     * access functions for functional, atomic and timing snoops.
210     */
211    class MemSidePort : public CacheMasterPort
212    {
213      private:
214
215        /** The cache-specific queue. */
216        CacheReqPacketQueue _reqQueue;
217
218        SnoopRespPacketQueue _snoopRespQueue;
219
220        // a pointer to our specific cache implementation
221        BaseCache *cache;
222
223      protected:
224
225        virtual void recvTimingSnoopReq(PacketPtr pkt);
226
227        virtual bool recvTimingResp(PacketPtr pkt);
228
229        virtual Tick recvAtomicSnoop(PacketPtr pkt);
230
231        virtual void recvFunctionalSnoop(PacketPtr pkt);
232
233      public:
234
235        MemSidePort(const std::string &_name, BaseCache *_cache,
236                    const std::string &_label);
237    };
238
239    /**
240     * A cache slave port is used for the CPU-side port of the cache,
241     * and it is basically a simple timing port that uses a transmit
242     * list for responses to the CPU (or connected master). In
243     * addition, it has the functionality to block the port for
244     * incoming requests. If blocked, the port will issue a retry once
245     * unblocked.
246     */
247    class CacheSlavePort : public QueuedSlavePort
248    {
249
250      public:
251
252        /** Do not accept any new requests. */
253        void setBlocked();
254
255        /** Return to normal operation and accept new requests. */
256        void clearBlocked();
257
258        bool isBlocked() const { return blocked; }
259
260      protected:
261
262        CacheSlavePort(const std::string &_name, BaseCache *_cache,
263                       const std::string &_label);
264
265        /** A normal packet queue used to store responses. */
266        RespPacketQueue queue;
267
268        bool blocked;
269
270        bool mustSendRetry;
271
272      private:
273
274        void processSendRetry();
275
276        EventFunctionWrapper sendRetryEvent;
277
278    };
279
280    /**
281     * The CPU-side port extends the base cache slave port with access
282     * functions for functional, atomic and timing requests.
283     */
284    class CpuSidePort : public CacheSlavePort
285    {
286      private:
287
288        // a pointer to our specific cache implementation
289        BaseCache *cache;
290
291      protected:
292        virtual bool recvTimingSnoopResp(PacketPtr pkt) override;
293
294        virtual bool tryTiming(PacketPtr pkt) override;
295
296        virtual bool recvTimingReq(PacketPtr pkt) override;
297
298        virtual Tick recvAtomic(PacketPtr pkt) override;
299
300        virtual void recvFunctional(PacketPtr pkt) override;
301
302        virtual AddrRangeList getAddrRanges() const override;
303
304      public:
305
306        CpuSidePort(const std::string &_name, BaseCache *_cache,
307                    const std::string &_label);
308
309    };
310
311    CpuSidePort cpuSidePort;
312    MemSidePort memSidePort;
313
314  protected:
315
316    /** Miss status registers */
317    MSHRQueue mshrQueue;
318
319    /** Write/writeback buffer */
320    WriteQueue writeBuffer;
321
322    /** Tag and data Storage */
323    BaseTags *tags;
324
325    /** Prefetcher */
326    BasePrefetcher *prefetcher;
327
328    /** To probe when a cache hit occurs */
329    ProbePointArg<PacketPtr> *ppHit;
330
331    /** To probe when a cache miss occurs */
332    ProbePointArg<PacketPtr> *ppMiss;
333
334    /**
335     * The writeAllocator drive optimizations for streaming writes.
336     * It first determines whether a WriteReq MSHR should be delayed,
337     * thus ensuring that we wait longer in cases when we are write
338     * coalescing and allowing all the bytes of the line to be written
339     * before the MSHR packet is sent downstream. This works in unison
340     * with the tracking in the MSHR to check if the entire line is
341     * written. The write mode also affects the behaviour on filling
342     * any whole-line writes. Normally the cache allocates the line
343     * when receiving the InvalidateResp, but after seeing enough
344     * consecutive lines we switch to using the tempBlock, and thus
345     * end up not allocating the line, and instead turning the
346     * whole-line write into a writeback straight away.
347     */
348    WriteAllocator * const writeAllocator;
349
350    /**
351     * Temporary cache block for occasional transitory use.  We use
352     * the tempBlock to fill when allocation fails (e.g., when there
353     * is an outstanding request that accesses the victim block) or
354     * when we want to avoid allocation (e.g., exclusive caches)
355     */
356    TempCacheBlk *tempBlock;
357
358    /**
359     * Upstream caches need this packet until true is returned, so
360     * hold it for deletion until a subsequent call
361     */
362    std::unique_ptr<Packet> pendingDelete;
363
364    /**
365     * Mark a request as in service (sent downstream in the memory
366     * system), effectively making this MSHR the ordering point.
367     */
368    void markInService(MSHR *mshr, bool pending_modified_resp)
369    {
370        bool wasFull = mshrQueue.isFull();
371        mshrQueue.markInService(mshr, pending_modified_resp);
372
373        if (wasFull && !mshrQueue.isFull()) {
374            clearBlocked(Blocked_NoMSHRs);
375        }
376    }
377
378    void markInService(WriteQueueEntry *entry)
379    {
380        bool wasFull = writeBuffer.isFull();
381        writeBuffer.markInService(entry);
382
383        if (wasFull && !writeBuffer.isFull()) {
384            clearBlocked(Blocked_NoWBBuffers);
385        }
386    }
387
388    /**
389     * Determine whether we should allocate on a fill or not. If this
390     * cache is mostly inclusive with regards to the upstream cache(s)
391     * we always allocate (for any non-forwarded and cacheable
392     * requests). In the case of a mostly exclusive cache, we allocate
393     * on fill if the packet did not come from a cache, thus if we:
394     * are dealing with a whole-line write (the latter behaves much
395     * like a writeback), the original target packet came from a
396     * non-caching source, or if we are performing a prefetch or LLSC.
397     *
398     * @param cmd Command of the incoming requesting packet
399     * @return Whether we should allocate on the fill
400     */
401    inline bool allocOnFill(MemCmd cmd) const
402    {
403        return clusivity == Enums::mostly_incl ||
404            cmd == MemCmd::WriteLineReq ||
405            cmd == MemCmd::ReadReq ||
406            cmd == MemCmd::WriteReq ||
407            cmd.isPrefetch() ||
408            cmd.isLLSC();
409    }
410
411    /**
412     * Regenerate block address using tags.
413     * Block address regeneration depends on whether we're using a temporary
414     * block or not.
415     *
416     * @param blk The block to regenerate address.
417     * @return The block's address.
418     */
419    Addr regenerateBlkAddr(CacheBlk* blk);
420
421    /**
422     * Calculate access latency in ticks given a tag lookup latency, and
423     * whether access was a hit or miss.
424     *
425     * @param blk The cache block that was accessed.
426     * @param lookup_lat Latency of the respective tag lookup.
427     * @return The number of ticks that pass due to a block access.
428     */
429    Cycles calculateAccessLatency(const CacheBlk* blk,
430                                  const Cycles lookup_lat) const;
431
432    /**
433     * Does all the processing necessary to perform the provided request.
434     * @param pkt The memory request to perform.
435     * @param blk The cache block to be updated.
436     * @param lat The latency of the access.
437     * @param writebacks List for any writebacks that need to be performed.
438     * @return Boolean indicating whether the request was satisfied.
439     */
440    virtual bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
441                        PacketList &writebacks);
442
443    /*
444     * Handle a timing request that hit in the cache
445     *
446     * @param ptk The request packet
447     * @param blk The referenced block
448     * @param request_time The tick at which the block lookup is compete
449     */
450    virtual void handleTimingReqHit(PacketPtr pkt, CacheBlk *blk,
451                                    Tick request_time);
452
453    /*
454     * Handle a timing request that missed in the cache
455     *
456     * Implementation specific handling for different cache
457     * implementations
458     *
459     * @param ptk The request packet
460     * @param blk The referenced block
461     * @param forward_time The tick at which we can process dependent requests
462     * @param request_time The tick at which the block lookup is compete
463     */
464    virtual void handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
465                                     Tick forward_time,
466                                     Tick request_time) = 0;
467
468    /*
469     * Handle a timing request that missed in the cache
470     *
471     * Common functionality across different cache implementations
472     *
473     * @param ptk The request packet
474     * @param blk The referenced block
475     * @param mshr Any existing mshr for the referenced cache block
476     * @param forward_time The tick at which we can process dependent requests
477     * @param request_time The tick at which the block lookup is compete
478     */
479    void handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
480                             Tick forward_time, Tick request_time);
481
482    /**
483     * Performs the access specified by the request.
484     * @param pkt The request to perform.
485     */
486    virtual void recvTimingReq(PacketPtr pkt);
487
488    /**
489     * Handling the special case of uncacheable write responses to
490     * make recvTimingResp less cluttered.
491     */
492    void handleUncacheableWriteResp(PacketPtr pkt);
493
494    /**
495     * Service non-deferred MSHR targets using the received response
496     *
497     * Iterates through the list of targets that can be serviced with
498     * the current response. Any writebacks that need to performed
499     * must be appended to the writebacks parameter.
500     *
501     * @param mshr The MSHR that corresponds to the reponse
502     * @param pkt The response packet
503     * @param blk The reference block
504     * @param writebacks List of writebacks that need to be performed
505     */
506    virtual void serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
507                                    CacheBlk *blk, PacketList& writebacks) = 0;
508
509    /**
510     * Handles a response (cache line fill/write ack) from the bus.
511     * @param pkt The response packet
512     */
513    virtual void recvTimingResp(PacketPtr pkt);
514
515    /**
516     * Snoops bus transactions to maintain coherence.
517     * @param pkt The current bus transaction.
518     */
519    virtual void recvTimingSnoopReq(PacketPtr pkt) = 0;
520
521    /**
522     * Handle a snoop response.
523     * @param pkt Snoop response packet
524     */
525    virtual void recvTimingSnoopResp(PacketPtr pkt) = 0;
526
527    /**
528     * Handle a request in atomic mode that missed in this cache
529     *
530     * Creates a downstream request, sends it to the memory below and
531     * handles the response. As we are in atomic mode all operations
532     * are performed immediately.
533     *
534     * @param pkt The packet with the requests
535     * @param blk The referenced block
536     * @param writebacks A list with packets for any performed writebacks
537     * @return Cycles for handling the request
538     */
539    virtual Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
540                                       PacketList &writebacks) = 0;
541
542    /**
543     * Performs the access specified by the request.
544     * @param pkt The request to perform.
545     * @return The number of ticks required for the access.
546     */
547    virtual Tick recvAtomic(PacketPtr pkt);
548
549    /**
550     * Snoop for the provided request in the cache and return the estimated
551     * time taken.
552     * @param pkt The memory request to snoop
553     * @return The number of ticks required for the snoop.
554     */
555    virtual Tick recvAtomicSnoop(PacketPtr pkt) = 0;
556
557    /**
558     * Performs the access specified by the request.
559     *
560     * @param pkt The request to perform.
561     * @param fromCpuSide from the CPU side port or the memory side port
562     */
563    virtual void functionalAccess(PacketPtr pkt, bool from_cpu_side);
564
565    /**
566     * Handle doing the Compare and Swap function for SPARC.
567     */
568    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
569
570    /**
571     * Return the next queue entry to service, either a pending miss
572     * from the MSHR queue, a buffered write from the write buffer, or
573     * something from the prefetcher. This function is responsible
574     * for prioritizing among those sources on the fly.
575     */
576    QueueEntry* getNextQueueEntry();
577
578    /**
579     * Insert writebacks into the write buffer
580     */
581    virtual void doWritebacks(PacketList& writebacks, Tick forward_time) = 0;
582
583    /**
584     * Send writebacks down the memory hierarchy in atomic mode
585     */
586    virtual void doWritebacksAtomic(PacketList& writebacks) = 0;
587
588    /**
589     * Create an appropriate downstream bus request packet.
590     *
591     * Creates a new packet with the request to be send to the memory
592     * below, or nullptr if the current request in cpu_pkt should just
593     * be forwarded on.
594     *
595     * @param cpu_pkt The miss packet that needs to be satisfied.
596     * @param blk The referenced block, can be nullptr.
597     * @param needs_writable Indicates that the block must be writable
598     * even if the request in cpu_pkt doesn't indicate that.
599     * @param is_whole_line_write True if there are writes for the
600     * whole line
601     * @return A packet send to the memory below
602     */
603    virtual PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
604                                       bool needs_writable,
605                                       bool is_whole_line_write) const = 0;
606
607    /**
608     * Determine if clean lines should be written back or not. In
609     * cases where a downstream cache is mostly inclusive we likely
610     * want it to act as a victim cache also for lines that have not
611     * been modified. Hence, we cannot simply drop the line (or send a
612     * clean evict), but rather need to send the actual data.
613     */
614    const bool writebackClean;
615
616    /**
617     * Writebacks from the tempBlock, resulting on the response path
618     * in atomic mode, must happen after the call to recvAtomic has
619     * finished (for the right ordering of the packets). We therefore
620     * need to hold on to the packets, and have a method and an event
621     * to send them.
622     */
623    PacketPtr tempBlockWriteback;
624
625    /**
626     * Send the outstanding tempBlock writeback. To be called after
627     * recvAtomic finishes in cases where the block we filled is in
628     * fact the tempBlock, and now needs to be written back.
629     */
630    void writebackTempBlockAtomic() {
631        assert(tempBlockWriteback != nullptr);
632        PacketList writebacks{tempBlockWriteback};
633        doWritebacksAtomic(writebacks);
634        tempBlockWriteback = nullptr;
635    }
636
637    /**
638     * An event to writeback the tempBlock after recvAtomic
639     * finishes. To avoid other calls to recvAtomic getting in
640     * between, we create this event with a higher priority.
641     */
642    EventFunctionWrapper writebackTempBlockAtomicEvent;
643
644    /**
645     * Perform any necessary updates to the block and perform any data
646     * exchange between the packet and the block. The flags of the
647     * packet are also set accordingly.
648     *
649     * @param pkt Request packet from upstream that hit a block
650     * @param blk Cache block that the packet hit
651     * @param deferred_response Whether this request originally missed
652     * @param pending_downgrade Whether the writable flag is to be removed
653     */
654    virtual void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
655                                bool deferred_response = false,
656                                bool pending_downgrade = false);
657
658    /**
659     * Maintain the clusivity of this cache by potentially
660     * invalidating a block. This method works in conjunction with
661     * satisfyRequest, but is separate to allow us to handle all MSHR
662     * targets before potentially dropping a block.
663     *
664     * @param from_cache Whether we have dealt with a packet from a cache
665     * @param blk The block that should potentially be dropped
666     */
667    void maintainClusivity(bool from_cache, CacheBlk *blk);
668
669    /**
670     * Handle a fill operation caused by a received packet.
671     *
672     * Populates a cache block and handles all outstanding requests for the
673     * satisfied fill request. This version takes two memory requests. One
674     * contains the fill data, the other is an optional target to satisfy.
675     * Note that the reason we return a list of writebacks rather than
676     * inserting them directly in the write buffer is that this function
677     * is called by both atomic and timing-mode accesses, and in atomic
678     * mode we don't mess with the write buffer (we just perform the
679     * writebacks atomically once the original request is complete).
680     *
681     * @param pkt The memory request with the fill data.
682     * @param blk The cache block if it already exists.
683     * @param writebacks List for any writebacks that need to be performed.
684     * @param allocate Whether to allocate a block or use the temp block
685     * @return Pointer to the new cache block.
686     */
687    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
688                         PacketList &writebacks, bool allocate);
689
690    /**
691     * Allocate a new block and perform any necessary writebacks
692     *
693     * Find a victim block and if necessary prepare writebacks for any
694     * existing data. May return nullptr if there are no replaceable
695     * blocks. If a replaceable block is found, it inserts the new block in
696     * its place. The new block, however, is not set as valid yet.
697     *
698     * @param pkt Packet holding the address to update
699     * @param writebacks A list of writeback packets for the evicted blocks
700     * @return the allocated block
701     */
702    CacheBlk *allocateBlock(const PacketPtr pkt, PacketList &writebacks);
703    /**
704     * Evict a cache block.
705     *
706     * Performs a writeback if necesssary and invalidates the block
707     *
708     * @param blk Block to invalidate
709     * @return A packet with the writeback, can be nullptr
710     */
711    M5_NODISCARD virtual PacketPtr evictBlock(CacheBlk *blk) = 0;
712
713    /**
714     * Evict a cache block.
715     *
716     * Performs a writeback if necesssary and invalidates the block
717     *
718     * @param blk Block to invalidate
719     * @param writebacks Return a list of packets with writebacks
720     */
721    void evictBlock(CacheBlk *blk, PacketList &writebacks);
722
723    /**
724     * Invalidate a cache block.
725     *
726     * @param blk Block to invalidate
727     */
728    void invalidateBlock(CacheBlk *blk);
729
730    /**
731     * Create a writeback request for the given block.
732     *
733     * @param blk The block to writeback.
734     * @return The writeback request for the block.
735     */
736    PacketPtr writebackBlk(CacheBlk *blk);
737
738    /**
739     * Create a writeclean request for the given block.
740     *
741     * Creates a request that writes the block to the cache below
742     * without evicting the block from the current cache.
743     *
744     * @param blk The block to write clean.
745     * @param dest The destination of the write clean operation.
746     * @param id Use the given packet id for the write clean operation.
747     * @return The generated write clean packet.
748     */
749    PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id);
750
751    /**
752     * Write back dirty blocks in the cache using functional accesses.
753     */
754    virtual void memWriteback() override;
755
756    /**
757     * Invalidates all blocks in the cache.
758     *
759     * @warn Dirty cache lines will not be written back to
760     * memory. Make sure to call functionalWriteback() first if you
761     * want the to write them to memory.
762     */
763    virtual void memInvalidate() override;
764
765    /**
766     * Determine if there are any dirty blocks in the cache.
767     *
768     * @return true if at least one block is dirty, false otherwise.
769     */
770    bool isDirty() const;
771
772    /**
773     * Determine if an address is in the ranges covered by this
774     * cache. This is useful to filter snoops.
775     *
776     * @param addr Address to check against
777     *
778     * @return If the address in question is in range
779     */
780    bool inRange(Addr addr) const;
781
782    /**
783     * Find next request ready time from among possible sources.
784     */
785    Tick nextQueueReadyTime() const;
786
787    /** Block size of this cache */
788    const unsigned blkSize;
789
790    /**
791     * The latency of tag lookup of a cache. It occurs when there is
792     * an access to the cache.
793     */
794    const Cycles lookupLatency;
795
796    /**
797     * The latency of data access of a cache. It occurs when there is
798     * an access to the cache.
799     */
800    const Cycles dataLatency;
801
802    /**
803     * This is the forward latency of the cache. It occurs when there
804     * is a cache miss and a request is forwarded downstream, in
805     * particular an outbound miss.
806     */
807    const Cycles forwardLatency;
808
809    /** The latency to fill a cache block */
810    const Cycles fillLatency;
811
812    /**
813     * The latency of sending reponse to its upper level cache/core on
814     * a linefill. The responseLatency parameter captures this
815     * latency.
816     */
817    const Cycles responseLatency;
818
819    /**
820     * Whether tags and data are accessed sequentially.
821     */
822    const bool sequentialAccess;
823
824    /** The number of targets for each MSHR. */
825    const int numTarget;
826
827    /** Do we forward snoops from mem side port through to cpu side port? */
828    bool forwardSnoops;
829
830    /**
831     * Clusivity with respect to the upstream cache, determining if we
832     * fill into both this cache and the cache above on a miss. Note
833     * that we currently do not support strict clusivity policies.
834     */
835    const Enums::Clusivity clusivity;
836
837    /**
838     * Is this cache read only, for example the instruction cache, or
839     * table-walker cache. A cache that is read only should never see
840     * any writes, and should never get any dirty data (and hence
841     * never have to do any writebacks).
842     */
843    const bool isReadOnly;
844
845    /**
846     * Bit vector of the blocking reasons for the access path.
847     * @sa #BlockedCause
848     */
849    uint8_t blocked;
850
851    /** Increasing order number assigned to each incoming request. */
852    uint64_t order;
853
854    /** Stores time the cache blocked for statistics. */
855    Cycles blockedCycle;
856
857    /** Pointer to the MSHR that has no targets. */
858    MSHR *noTargetMSHR;
859
860    /** The number of misses to trigger an exit event. */
861    Counter missCount;
862
863    /**
864     * The address range to which the cache responds on the CPU side.
865     * Normally this is all possible memory addresses. */
866    const AddrRangeList addrRanges;
867
868  public:
869    /** System we are currently operating in. */
870    System *system;
871
872    // Statistics
873    /**
874     * @addtogroup CacheStatistics
875     * @{
876     */
877
878    /** Number of hits per thread for each type of command.
879        @sa Packet::Command */
880    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
881    /** Number of hits for demand accesses. */
882    Stats::Formula demandHits;
883    /** Number of hit for all accesses. */
884    Stats::Formula overallHits;
885
886    /** Number of misses per thread for each type of command.
887        @sa Packet::Command */
888    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
889    /** Number of misses for demand accesses. */
890    Stats::Formula demandMisses;
891    /** Number of misses for all accesses. */
892    Stats::Formula overallMisses;
893
894    /**
895     * Total number of cycles per thread/command spent waiting for a miss.
896     * Used to calculate the average miss latency.
897     */
898    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
899    /** Total number of cycles spent waiting for demand misses. */
900    Stats::Formula demandMissLatency;
901    /** Total number of cycles spent waiting for all misses. */
902    Stats::Formula overallMissLatency;
903
904    /** The number of accesses per command and thread. */
905    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
906    /** The number of demand accesses. */
907    Stats::Formula demandAccesses;
908    /** The number of overall accesses. */
909    Stats::Formula overallAccesses;
910
911    /** The miss rate per command and thread. */
912    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
913    /** The miss rate of all demand accesses. */
914    Stats::Formula demandMissRate;
915    /** The miss rate for all accesses. */
916    Stats::Formula overallMissRate;
917
918    /** The average miss latency per command and thread. */
919    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
920    /** The average miss latency for demand misses. */
921    Stats::Formula demandAvgMissLatency;
922    /** The average miss latency for all misses. */
923    Stats::Formula overallAvgMissLatency;
924
925    /** The total number of cycles blocked for each blocked cause. */
926    Stats::Vector blocked_cycles;
927    /** The number of times this cache blocked for each blocked cause. */
928    Stats::Vector blocked_causes;
929
930    /** The average number of cycles blocked for each blocked cause. */
931    Stats::Formula avg_blocked;
932
933    /** The number of times a HW-prefetched block is evicted w/o reference. */
934    Stats::Scalar unusedPrefetches;
935
936    /** Number of blocks written back per thread. */
937    Stats::Vector writebacks;
938
939    /** Number of misses that hit in the MSHRs per command and thread. */
940    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
941    /** Demand misses that hit in the MSHRs. */
942    Stats::Formula demandMshrHits;
943    /** Total number of misses that hit in the MSHRs. */
944    Stats::Formula overallMshrHits;
945
946    /** Number of misses that miss in the MSHRs, per command and thread. */
947    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
948    /** Demand misses that miss in the MSHRs. */
949    Stats::Formula demandMshrMisses;
950    /** Total number of misses that miss in the MSHRs. */
951    Stats::Formula overallMshrMisses;
952
953    /** Number of misses that miss in the MSHRs, per command and thread. */
954    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
955    /** Total number of misses that miss in the MSHRs. */
956    Stats::Formula overallMshrUncacheable;
957
958    /** Total cycle latency of each MSHR miss, per command and thread. */
959    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
960    /** Total cycle latency of demand MSHR misses. */
961    Stats::Formula demandMshrMissLatency;
962    /** Total cycle latency of overall MSHR misses. */
963    Stats::Formula overallMshrMissLatency;
964
965    /** Total cycle latency of each MSHR miss, per command and thread. */
966    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
967    /** Total cycle latency of overall MSHR misses. */
968    Stats::Formula overallMshrUncacheableLatency;
969
970#if 0
971    /** The total number of MSHR accesses per command and thread. */
972    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
973    /** The total number of demand MSHR accesses. */
974    Stats::Formula demandMshrAccesses;
975    /** The total number of MSHR accesses. */
976    Stats::Formula overallMshrAccesses;
977#endif
978
979    /** The miss rate in the MSHRs pre command and thread. */
980    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
981    /** The demand miss rate in the MSHRs. */
982    Stats::Formula demandMshrMissRate;
983    /** The overall miss rate in the MSHRs. */
984    Stats::Formula overallMshrMissRate;
985
986    /** The average latency of an MSHR miss, per command and thread. */
987    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
988    /** The average latency of a demand MSHR miss. */
989    Stats::Formula demandAvgMshrMissLatency;
990    /** The average overall latency of an MSHR miss. */
991    Stats::Formula overallAvgMshrMissLatency;
992
993    /** The average latency of an MSHR miss, per command and thread. */
994    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
995    /** The average overall latency of an MSHR miss. */
996    Stats::Formula overallAvgMshrUncacheableLatency;
997
998    /** Number of replacements of valid blocks. */
999    Stats::Scalar replacements;
1000
1001    /**
1002     * @}
1003     */
1004
1005    /**
1006     * Register stats for this object.
1007     */
1008    void regStats() override;
1009
1010    /** Registers probes. */
1011    void regProbePoints() override;
1012
1013  public:
1014    BaseCache(const BaseCacheParams *p, unsigned blk_size);
1015    ~BaseCache();
1016
1017    void init() override;
1018
1019    BaseMasterPort &getMasterPort(const std::string &if_name,
1020                                  PortID idx = InvalidPortID) override;
1021    BaseSlavePort &getSlavePort(const std::string &if_name,
1022                                PortID idx = InvalidPortID) override;
1023
1024    /**
1025     * Query block size of a cache.
1026     * @return  The block size
1027     */
1028    unsigned
1029    getBlockSize() const
1030    {
1031        return blkSize;
1032    }
1033
1034    const AddrRangeList &getAddrRanges() const { return addrRanges; }
1035
1036    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
1037    {
1038        MSHR *mshr = mshrQueue.allocate(pkt->getBlockAddr(blkSize), blkSize,
1039                                        pkt, time, order++,
1040                                        allocOnFill(pkt->cmd));
1041
1042        if (mshrQueue.isFull()) {
1043            setBlocked((BlockedCause)MSHRQueue_MSHRs);
1044        }
1045
1046        if (sched_send) {
1047            // schedule the send
1048            schedMemSideSendEvent(time);
1049        }
1050
1051        return mshr;
1052    }
1053
1054    void allocateWriteBuffer(PacketPtr pkt, Tick time)
1055    {
1056        // should only see writes or clean evicts here
1057        assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
1058
1059        Addr blk_addr = pkt->getBlockAddr(blkSize);
1060
1061        WriteQueueEntry *wq_entry =
1062            writeBuffer.findMatch(blk_addr, pkt->isSecure());
1063        if (wq_entry && !wq_entry->inService) {
1064            DPRINTF(Cache, "Potential to merge writeback %s", pkt->print());
1065        }
1066
1067        writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
1068
1069        if (writeBuffer.isFull()) {
1070            setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
1071        }
1072
1073        // schedule the send
1074        schedMemSideSendEvent(time);
1075    }
1076
1077    /**
1078     * Returns true if the cache is blocked for accesses.
1079     */
1080    bool isBlocked() const
1081    {
1082        return blocked != 0;
1083    }
1084
1085    /**
1086     * Marks the access path of the cache as blocked for the given cause. This
1087     * also sets the blocked flag in the slave interface.
1088     * @param cause The reason for the cache blocking.
1089     */
1090    void setBlocked(BlockedCause cause)
1091    {
1092        uint8_t flag = 1 << cause;
1093        if (blocked == 0) {
1094            blocked_causes[cause]++;
1095            blockedCycle = curCycle();
1096            cpuSidePort.setBlocked();
1097        }
1098        blocked |= flag;
1099        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
1100    }
1101
1102    /**
1103     * Marks the cache as unblocked for the given cause. This also clears the
1104     * blocked flags in the appropriate interfaces.
1105     * @param cause The newly unblocked cause.
1106     * @warning Calling this function can cause a blocked request on the bus to
1107     * access the cache. The cache must be in a state to handle that request.
1108     */
1109    void clearBlocked(BlockedCause cause)
1110    {
1111        uint8_t flag = 1 << cause;
1112        blocked &= ~flag;
1113        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
1114        if (blocked == 0) {
1115            blocked_cycles[cause] += curCycle() - blockedCycle;
1116            cpuSidePort.clearBlocked();
1117        }
1118    }
1119
1120    /**
1121     * Schedule a send event for the memory-side port. If already
1122     * scheduled, this may reschedule the event at an earlier
1123     * time. When the specified time is reached, the port is free to
1124     * send either a response, a request, or a prefetch request.
1125     *
1126     * @param time The time when to attempt sending a packet.
1127     */
1128    void schedMemSideSendEvent(Tick time)
1129    {
1130        memSidePort.schedSendEvent(time);
1131    }
1132
1133    bool inCache(Addr addr, bool is_secure) const {
1134        return tags->findBlock(addr, is_secure);
1135    }
1136
1137    bool inMissQueue(Addr addr, bool is_secure) const {
1138        return mshrQueue.findMatch(addr, is_secure);
1139    }
1140
1141    void incMissCount(PacketPtr pkt)
1142    {
1143        assert(pkt->req->masterId() < system->maxMasters());
1144        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1145        pkt->req->incAccessDepth();
1146        if (missCount) {
1147            --missCount;
1148            if (missCount == 0)
1149                exitSimLoop("A cache reached the maximum miss count");
1150        }
1151    }
1152    void incHitCount(PacketPtr pkt)
1153    {
1154        assert(pkt->req->masterId() < system->maxMasters());
1155        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
1156
1157    }
1158
1159    /**
1160     * Checks if the cache is coalescing writes
1161     *
1162     * @return True if the cache is coalescing writes
1163     */
1164    bool coalesce() const;
1165
1166
1167    /**
1168     * Cache block visitor that writes back dirty cache blocks using
1169     * functional writes.
1170     */
1171    void writebackVisitor(CacheBlk &blk);
1172
1173    /**
1174     * Cache block visitor that invalidates all blocks in the cache.
1175     *
1176     * @warn Dirty cache lines will not be written back to memory.
1177     */
1178    void invalidateVisitor(CacheBlk &blk);
1179
1180    /**
1181     * Take an MSHR, turn it into a suitable downstream packet, and
1182     * send it out. This construct allows a queue entry to choose a suitable
1183     * approach based on its type.
1184     *
1185     * @param mshr The MSHR to turn into a packet and send
1186     * @return True if the port is waiting for a retry
1187     */
1188    virtual bool sendMSHRQueuePacket(MSHR* mshr);
1189
1190    /**
1191     * Similar to sendMSHR, but for a write-queue entry
1192     * instead. Create the packet, and send it, and if successful also
1193     * mark the entry in service.
1194     *
1195     * @param wq_entry The write-queue entry to turn into a packet and send
1196     * @return True if the port is waiting for a retry
1197     */
1198    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
1199
1200    /**
1201     * Serialize the state of the caches
1202     *
1203     * We currently don't support checkpointing cache state, so this panics.
1204     */
1205    void serialize(CheckpointOut &cp) const override;
1206    void unserialize(CheckpointIn &cp) override;
1207};
1208
1209/**
1210 * The write allocator inspects write packets and detects streaming
1211 * patterns. The write allocator supports a single stream where writes
1212 * are expected to access consecutive locations and keeps track of
1213 * size of the area covered by the concecutive writes in byteCount.
1214 *
1215 * 1) When byteCount has surpassed the coallesceLimit the mode
1216 * switches from ALLOCATE to COALESCE where writes should be delayed
1217 * until the whole block is written at which point a single packet
1218 * (whole line write) can service them.
1219 *
1220 * 2) When byteCount has also exceeded the noAllocateLimit (whole
1221 * line) we switch to NO_ALLOCATE when writes should not allocate in
1222 * the cache but rather send a whole line write to the memory below.
1223 */
1224class WriteAllocator : public SimObject {
1225  public:
1226    WriteAllocator(const WriteAllocatorParams *p) :
1227        SimObject(p),
1228        coalesceLimit(p->coalesce_limit * p->block_size),
1229        noAllocateLimit(p->no_allocate_limit * p->block_size),
1230        delayThreshold(p->delay_threshold)
1231    {
1232        reset();
1233    }
1234
1235    /**
1236     * Should writes be coalesced? This is true if the mode is set to
1237     * NO_ALLOCATE.
1238     *
1239     * @return return true if the cache should coalesce writes.
1240     */
1241    bool coalesce() const {
1242        return mode != WriteMode::ALLOCATE;
1243    }
1244
1245    /**
1246     * Should writes allocate?
1247     *
1248     * @return return true if the cache should not allocate for writes.
1249     */
1250    bool allocate() const {
1251        return mode != WriteMode::NO_ALLOCATE;
1252    }
1253
1254    /**
1255     * Reset the write allocator state, meaning that it allocates for
1256     * writes and has not recorded any information about qualifying
1257     * writes that might trigger a switch to coalescing and later no
1258     * allocation.
1259     */
1260    void reset() {
1261        mode = WriteMode::ALLOCATE;
1262        byteCount = 0;
1263        nextAddr = 0;
1264    }
1265
1266    /**
1267     * Access whether we need to delay the current write.
1268     *
1269     * @param blk_addr The block address the packet writes to
1270     * @return true if the current packet should be delayed
1271     */
1272    bool delay(Addr blk_addr) {
1273        if (delayCtr[blk_addr] > 0) {
1274            --delayCtr[blk_addr];
1275            return true;
1276        } else {
1277            return false;
1278        }
1279    }
1280
1281    /**
1282     * Clear delay counter for the input block
1283     *
1284     * @param blk_addr The accessed cache block
1285     */
1286    void resetDelay(Addr blk_addr) {
1287        delayCtr.erase(blk_addr);
1288    }
1289
1290    /**
1291     * Update the write mode based on the current write
1292     * packet. This method compares the packet's address with any
1293     * current stream, and updates the tracking and the mode
1294     * accordingly.
1295     *
1296     * @param write_addr Start address of the write request
1297     * @param write_size Size of the write request
1298     * @param blk_addr The block address that this packet writes to
1299     */
1300    void updateMode(Addr write_addr, unsigned write_size, Addr blk_addr);
1301
1302  private:
1303    /**
1304     * The current mode for write coalescing and allocation, either
1305     * normal operation (ALLOCATE), write coalescing (COALESCE), or
1306     * write coalescing without allocation (NO_ALLOCATE).
1307     */
1308    enum class WriteMode : char {
1309        ALLOCATE,
1310        COALESCE,
1311        NO_ALLOCATE,
1312    };
1313    WriteMode mode;
1314
1315    /** Address to match writes against to detect streams. */
1316    Addr nextAddr;
1317
1318    /**
1319     * Bytes written contiguously. Saturating once we no longer
1320     * allocate.
1321     */
1322    uint32_t byteCount;
1323
1324    /**
1325     * Limits for when to switch between the different write modes.
1326     */
1327    const uint32_t coalesceLimit;
1328    const uint32_t noAllocateLimit;
1329    /**
1330     * The number of times the allocator will delay an WriteReq MSHR.
1331     */
1332    const uint32_t delayThreshold;
1333
1334    /**
1335     * Keep track of the number of times the allocator has delayed an
1336     * WriteReq MSHR.
1337     */
1338    std::unordered_map<Addr, Counter> delayCtr;
1339};
1340
1341#endif //__MEM_CACHE_BASE_HH__
1342