cache.hh revision 11859:76c36516e0ae
1/*
2 * Copyright (c) 2012-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Dave Greene
42 *          Steve Reinhardt
43 *          Ron Dreslinski
44 *          Andreas Hansson
45 */
46
47/**
48 * @file
49 * Describes a cache based on template policies.
50 */
51
52#ifndef __MEM_CACHE_CACHE_HH__
53#define __MEM_CACHE_CACHE_HH__
54
55#include <unordered_set>
56
57#include "base/misc.hh" // fatal, panic, and warn
58#include "enums/Clusivity.hh"
59#include "mem/cache/base.hh"
60#include "mem/cache/blk.hh"
61#include "mem/cache/mshr.hh"
62#include "mem/cache/tags/base.hh"
63#include "params/Cache.hh"
64#include "sim/eventq.hh"
65
66//Forward decleration
67class BasePrefetcher;
68
69/**
70 * A template-policy based cache. The behavior of the cache can be altered by
71 * supplying different template policies. TagStore handles all tag and data
72 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
73 */
74class Cache : public BaseCache
75{
76  public:
77
78    /** A typedef for a list of CacheBlk pointers. */
79    typedef std::list<CacheBlk*> BlkList;
80
81  protected:
82
83    /**
84     * The CPU-side port extends the base cache slave port with access
85     * functions for functional, atomic and timing requests.
86     */
87    class CpuSidePort : public CacheSlavePort
88    {
89      private:
90
91        // a pointer to our specific cache implementation
92        Cache *cache;
93
94      protected:
95
96        virtual bool recvTimingSnoopResp(PacketPtr pkt);
97
98        virtual bool recvTimingReq(PacketPtr pkt);
99
100        virtual Tick recvAtomic(PacketPtr pkt);
101
102        virtual void recvFunctional(PacketPtr pkt);
103
104        virtual AddrRangeList getAddrRanges() const;
105
106      public:
107
108        CpuSidePort(const std::string &_name, Cache *_cache,
109                    const std::string &_label);
110
111    };
112
113    /**
114     * Override the default behaviour of sendDeferredPacket to enable
115     * the memory-side cache port to also send requests based on the
116     * current MSHR status. This queue has a pointer to our specific
117     * cache implementation and is used by the MemSidePort.
118     */
119    class CacheReqPacketQueue : public ReqPacketQueue
120    {
121
122      protected:
123
124        Cache &cache;
125        SnoopRespPacketQueue &snoopRespQueue;
126
127      public:
128
129        CacheReqPacketQueue(Cache &cache, MasterPort &port,
130                            SnoopRespPacketQueue &snoop_resp_queue,
131                            const std::string &label) :
132            ReqPacketQueue(cache, port, label), cache(cache),
133            snoopRespQueue(snoop_resp_queue) { }
134
135        /**
136         * Override the normal sendDeferredPacket and do not only
137         * consider the transmit list (used for responses), but also
138         * requests.
139         */
140        virtual void sendDeferredPacket();
141
142        /**
143         * Check if there is a conflicting snoop response about to be
144         * send out, and if so simply stall any requests, and schedule
145         * a send event at the same time as the next snoop response is
146         * being sent out.
147         */
148        bool checkConflictingSnoop(Addr addr)
149        {
150            if (snoopRespQueue.hasAddr(addr)) {
151                DPRINTF(CachePort, "Waiting for snoop response to be "
152                        "sent\n");
153                Tick when = snoopRespQueue.deferredPacketReadyTime();
154                schedSendEvent(when);
155                return true;
156            }
157            return false;
158        }
159    };
160
161    /**
162     * The memory-side port extends the base cache master port with
163     * access functions for functional, atomic and timing snoops.
164     */
165    class MemSidePort : public CacheMasterPort
166    {
167      private:
168
169        /** The cache-specific queue. */
170        CacheReqPacketQueue _reqQueue;
171
172        SnoopRespPacketQueue _snoopRespQueue;
173
174        // a pointer to our specific cache implementation
175        Cache *cache;
176
177      protected:
178
179        virtual void recvTimingSnoopReq(PacketPtr pkt);
180
181        virtual bool recvTimingResp(PacketPtr pkt);
182
183        virtual Tick recvAtomicSnoop(PacketPtr pkt);
184
185        virtual void recvFunctionalSnoop(PacketPtr pkt);
186
187      public:
188
189        MemSidePort(const std::string &_name, Cache *_cache,
190                    const std::string &_label);
191    };
192
193    /** Tag and data Storage */
194    BaseTags *tags;
195
196    /** Prefetcher */
197    BasePrefetcher *prefetcher;
198
199    /** Temporary cache block for occasional transitory use */
200    CacheBlk *tempBlock;
201
202    /**
203     * This cache should allocate a block on a line-sized write miss.
204     */
205    const bool doFastWrites;
206
207    /**
208     * Turn line-sized writes into WriteInvalidate transactions.
209     */
210    void promoteWholeLineWrites(PacketPtr pkt);
211
212    /**
213     * Notify the prefetcher on every access, not just misses.
214     */
215    const bool prefetchOnAccess;
216
217     /**
218     * Clusivity with respect to the upstream cache, determining if we
219     * fill into both this cache and the cache above on a miss. Note
220     * that we currently do not support strict clusivity policies.
221     */
222    const Enums::Clusivity clusivity;
223
224     /**
225     * Determine if clean lines should be written back or not. In
226     * cases where a downstream cache is mostly inclusive we likely
227     * want it to act as a victim cache also for lines that have not
228     * been modified. Hence, we cannot simply drop the line (or send a
229     * clean evict), but rather need to send the actual data.
230     */
231    const bool writebackClean;
232
233    /**
234     * Upstream caches need this packet until true is returned, so
235     * hold it for deletion until a subsequent call
236     */
237    std::unique_ptr<Packet> pendingDelete;
238
239    /**
240     * Writebacks from the tempBlock, resulting on the response path
241     * in atomic mode, must happen after the call to recvAtomic has
242     * finished (for the right ordering of the packets). We therefore
243     * need to hold on to the packets, and have a method and an event
244     * to send them.
245     */
246    PacketPtr tempBlockWriteback;
247
248    /**
249     * Send the outstanding tempBlock writeback. To be called after
250     * recvAtomic finishes in cases where the block we filled is in
251     * fact the tempBlock, and now needs to be written back.
252     */
253    void writebackTempBlockAtomic() {
254        assert(tempBlockWriteback != nullptr);
255        PacketList writebacks{tempBlockWriteback};
256        doWritebacksAtomic(writebacks);
257        tempBlockWriteback = nullptr;
258    }
259
260    /**
261     * An event to writeback the tempBlock after recvAtomic
262     * finishes. To avoid other calls to recvAtomic getting in
263     * between, we create this event with a higher priority.
264     */
265    EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \
266        writebackTempBlockAtomicEvent;
267
268    /**
269     * Store the outstanding requests that we are expecting snoop
270     * responses from so we can determine which snoop responses we
271     * generated and which ones were merely forwarded.
272     */
273    std::unordered_set<RequestPtr> outstandingSnoop;
274
275    /**
276     * Does all the processing necessary to perform the provided request.
277     * @param pkt The memory request to perform.
278     * @param blk The cache block to be updated.
279     * @param lat The latency of the access.
280     * @param writebacks List for any writebacks that need to be performed.
281     * @return Boolean indicating whether the request was satisfied.
282     */
283    bool access(PacketPtr pkt, CacheBlk *&blk,
284                Cycles &lat, PacketList &writebacks);
285
286    /**
287     *Handle doing the Compare and Swap function for SPARC.
288     */
289    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
290
291    /**
292     * Find a block frame for new block at address addr targeting the
293     * given security space, assuming that the block is not currently
294     * in the cache.  Append writebacks if any to provided packet
295     * list.  Return free block frame.  May return nullptr if there are
296     * no replaceable blocks at the moment.
297     */
298    CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
299
300    /**
301     * Invalidate a cache block.
302     *
303     * @param blk Block to invalidate
304     */
305    void invalidateBlock(CacheBlk *blk);
306
307    /**
308     * Maintain the clusivity of this cache by potentially
309     * invalidating a block. This method works in conjunction with
310     * satisfyRequest, but is separate to allow us to handle all MSHR
311     * targets before potentially dropping a block.
312     *
313     * @param from_cache Whether we have dealt with a packet from a cache
314     * @param blk The block that should potentially be dropped
315     */
316    void maintainClusivity(bool from_cache, CacheBlk *blk);
317
318    /**
319     * Populates a cache block and handles all outstanding requests for the
320     * satisfied fill request. This version takes two memory requests. One
321     * contains the fill data, the other is an optional target to satisfy.
322     * @param pkt The memory request with the fill data.
323     * @param blk The cache block if it already exists.
324     * @param writebacks List for any writebacks that need to be performed.
325     * @param allocate Whether to allocate a block or use the temp block
326     * @return Pointer to the new cache block.
327     */
328    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
329                         PacketList &writebacks, bool allocate);
330
331    /**
332     * Determine whether we should allocate on a fill or not. If this
333     * cache is mostly inclusive with regards to the upstream cache(s)
334     * we always allocate (for any non-forwarded and cacheable
335     * requests). In the case of a mostly exclusive cache, we allocate
336     * on fill if the packet did not come from a cache, thus if we:
337     * are dealing with a whole-line write (the latter behaves much
338     * like a writeback), the original target packet came from a
339     * non-caching source, or if we are performing a prefetch or LLSC.
340     *
341     * @param cmd Command of the incoming requesting packet
342     * @return Whether we should allocate on the fill
343     */
344    inline bool allocOnFill(MemCmd cmd) const override
345    {
346        return clusivity == Enums::mostly_incl ||
347            cmd == MemCmd::WriteLineReq ||
348            cmd == MemCmd::ReadReq ||
349            cmd == MemCmd::WriteReq ||
350            cmd.isPrefetch() ||
351            cmd.isLLSC();
352    }
353
354    /**
355     * Performs the access specified by the request.
356     * @param pkt The request to perform.
357     * @return The result of the access.
358     */
359    bool recvTimingReq(PacketPtr pkt);
360
361    /**
362     * Insert writebacks into the write buffer
363     */
364    void doWritebacks(PacketList& writebacks, Tick forward_time);
365
366    /**
367     * Send writebacks down the memory hierarchy in atomic mode
368     */
369    void doWritebacksAtomic(PacketList& writebacks);
370
371    /**
372     * Handling the special case of uncacheable write responses to
373     * make recvTimingResp less cluttered.
374     */
375    void handleUncacheableWriteResp(PacketPtr pkt);
376
377    /**
378     * Handles a response (cache line fill/write ack) from the bus.
379     * @param pkt The response packet
380     */
381    void recvTimingResp(PacketPtr pkt);
382
383    /**
384     * Snoops bus transactions to maintain coherence.
385     * @param pkt The current bus transaction.
386     */
387    void recvTimingSnoopReq(PacketPtr pkt);
388
389    /**
390     * Handle a snoop response.
391     * @param pkt Snoop response packet
392     */
393    void recvTimingSnoopResp(PacketPtr pkt);
394
395    /**
396     * Performs the access specified by the request.
397     * @param pkt The request to perform.
398     * @return The number of ticks required for the access.
399     */
400    Tick recvAtomic(PacketPtr pkt);
401
402    /**
403     * Snoop for the provided request in the cache and return the estimated
404     * time taken.
405     * @param pkt The memory request to snoop
406     * @return The number of ticks required for the snoop.
407     */
408    Tick recvAtomicSnoop(PacketPtr pkt);
409
410    /**
411     * Performs the access specified by the request.
412     * @param pkt The request to perform.
413     * @param fromCpuSide from the CPU side port or the memory side port
414     */
415    void functionalAccess(PacketPtr pkt, bool fromCpuSide);
416
417    /**
418     * Perform any necessary updates to the block and perform any data
419     * exchange between the packet and the block. The flags of the
420     * packet are also set accordingly.
421     *
422     * @param pkt Request packet from upstream that hit a block
423     * @param blk Cache block that the packet hit
424     * @param deferred_response Whether this hit is to block that
425     *                          originally missed
426     * @param pending_downgrade Whether the writable flag is to be removed
427     *
428     * @return True if the block is to be invalidated
429     */
430    void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
431                        bool deferred_response = false,
432                        bool pending_downgrade = false);
433
434    void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
435                                bool already_copied, bool pending_inval);
436
437    /**
438     * Perform an upward snoop if needed, and update the block state
439     * (possibly invalidating the block). Also create a response if required.
440     *
441     * @param pkt Snoop packet
442     * @param blk Cache block being snooped
443     * @param is_timing Timing or atomic for the response
444     * @param is_deferred Is this a deferred snoop or not?
445     * @param pending_inval Do we have a pending invalidation?
446     *
447     * @return The snoop delay incurred by the upwards snoop
448     */
449    uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk,
450                         bool is_timing, bool is_deferred, bool pending_inval);
451
452    /**
453     * Create a writeback request for the given block.
454     * @param blk The block to writeback.
455     * @return The writeback request for the block.
456     */
457    PacketPtr writebackBlk(CacheBlk *blk);
458
459    /**
460     * Create a CleanEvict request for the given block.
461     * @param blk The block to evict.
462     * @return The CleanEvict request for the block.
463     */
464    PacketPtr cleanEvictBlk(CacheBlk *blk);
465
466
467    void memWriteback() override;
468    void memInvalidate() override;
469    bool isDirty() const override;
470
471    /**
472     * Cache block visitor that writes back dirty cache blocks using
473     * functional writes.
474     *
475     * \return Always returns true.
476     */
477    bool writebackVisitor(CacheBlk &blk);
478    /**
479     * Cache block visitor that invalidates all blocks in the cache.
480     *
481     * @warn Dirty cache lines will not be written back to memory.
482     *
483     * \return Always returns true.
484     */
485    bool invalidateVisitor(CacheBlk &blk);
486
487    /**
488     * Create an appropriate downstream bus request packet for the
489     * given parameters.
490     * @param cpu_pkt  The miss that needs to be satisfied.
491     * @param blk The block currently in the cache corresponding to
492     * cpu_pkt (nullptr if none).
493     * @param needsWritable Indicates that the block must be writable
494     * even if the request in cpu_pkt doesn't indicate that.
495     * @return A new Packet containing the request, or nullptr if the
496     * current request in cpu_pkt should just be forwarded on.
497     */
498    PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
499                               bool needsWritable) const;
500
501    /**
502     * Return the next queue entry to service, either a pending miss
503     * from the MSHR queue, a buffered write from the write buffer, or
504     * something from the prefetcher. This function is responsible
505     * for prioritizing among those sources on the fly.
506     */
507    QueueEntry* getNextQueueEntry();
508
509    /**
510     * Send up a snoop request and find cached copies. If cached copies are
511     * found, set the BLOCK_CACHED flag in pkt.
512     */
513    bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
514
515    /**
516     * Return whether there are any outstanding misses.
517     */
518    bool outstandingMisses() const
519    {
520        return !mshrQueue.isEmpty();
521    }
522
523    CacheBlk *findBlock(Addr addr, bool is_secure) const {
524        return tags->findBlock(addr, is_secure);
525    }
526
527    bool inCache(Addr addr, bool is_secure) const override {
528        return (tags->findBlock(addr, is_secure) != 0);
529    }
530
531    bool inMissQueue(Addr addr, bool is_secure) const override {
532        return (mshrQueue.findMatch(addr, is_secure) != 0);
533    }
534
535    /**
536     * Find next request ready time from among possible sources.
537     */
538    Tick nextQueueReadyTime() const;
539
540  public:
541    /** Instantiates a basic cache object. */
542    Cache(const CacheParams *p);
543
544    /** Non-default destructor is needed to deallocate memory. */
545    virtual ~Cache();
546
547    void regStats() override;
548
549    /**
550     * Take an MSHR, turn it into a suitable downstream packet, and
551     * send it out. This construct allows a queue entry to choose a suitable
552     * approach based on its type.
553     *
554     * @param mshr The MSHR to turn into a packet and send
555     * @return True if the port is waiting for a retry
556     */
557    bool sendMSHRQueuePacket(MSHR* mshr);
558
559    /**
560     * Similar to sendMSHR, but for a write-queue entry
561     * instead. Create the packet, and send it, and if successful also
562     * mark the entry in service.
563     *
564     * @param wq_entry The write-queue entry to turn into a packet and send
565     * @return True if the port is waiting for a retry
566     */
567    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
568
569    /** serialize the state of the caches
570     * We currently don't support checkpointing cache state, so this panics.
571     */
572    void serialize(CheckpointOut &cp) const override;
573    void unserialize(CheckpointIn &cp) override;
574};
575
576/**
577 * Wrap a method and present it as a cache block visitor.
578 *
579 * For example the forEachBlk method in the tag arrays expects a
580 * callable object/function as their parameter. This class wraps a
581 * method in an object and presents  callable object that adheres to
582 * the cache block visitor protocol.
583 */
584class CacheBlkVisitorWrapper : public CacheBlkVisitor
585{
586  public:
587    typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
588
589    CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
590        : cache(_cache), visitor(_visitor) {}
591
592    bool operator()(CacheBlk &blk) override {
593        return (cache.*visitor)(blk);
594    }
595
596  private:
597    Cache &cache;
598    VisitorPtr visitor;
599};
600
601/**
602 * Cache block visitor that determines if there are dirty blocks in a
603 * cache.
604 *
605 * Use with the forEachBlk method in the tag array to determine if the
606 * array contains dirty blocks.
607 */
608class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
609{
610  public:
611    CacheBlkIsDirtyVisitor()
612        : _isDirty(false) {}
613
614    bool operator()(CacheBlk &blk) override {
615        if (blk.isDirty()) {
616            _isDirty = true;
617            return false;
618        } else {
619            return true;
620        }
621    }
622
623    /**
624     * Does the array contain a dirty line?
625     *
626     * \return true if yes, false otherwise.
627     */
628    bool isDirty() const { return _isDirty; };
629
630  private:
631    bool _isDirty;
632};
633
634#endif // __MEM_CACHE_CACHE_HH__
635