cache.hh revision 11375:f98df9231cdd
1/*
2 * Copyright (c) 2012-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Dave Greene
42 *          Steve Reinhardt
43 *          Ron Dreslinski
44 *          Andreas Hansson
45 */
46
47/**
48 * @file
49 * Describes a cache based on template policies.
50 */
51
52#ifndef __MEM_CACHE_CACHE_HH__
53#define __MEM_CACHE_CACHE_HH__
54
55#include "base/misc.hh" // fatal, panic, and warn
56#include "enums/Clusivity.hh"
57#include "mem/cache/base.hh"
58#include "mem/cache/blk.hh"
59#include "mem/cache/mshr.hh"
60#include "mem/cache/tags/base.hh"
61#include "params/Cache.hh"
62#include "sim/eventq.hh"
63
64//Forward decleration
65class BasePrefetcher;
66
67/**
68 * A template-policy based cache. The behavior of the cache can be altered by
69 * supplying different template policies. TagStore handles all tag and data
70 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
71 */
72class Cache : public BaseCache
73{
74  public:
75
76    /** A typedef for a list of CacheBlk pointers. */
77    typedef std::list<CacheBlk*> BlkList;
78
79  protected:
80
81    /**
82     * The CPU-side port extends the base cache slave port with access
83     * functions for functional, atomic and timing requests.
84     */
85    class CpuSidePort : public CacheSlavePort
86    {
87      private:
88
89        // a pointer to our specific cache implementation
90        Cache *cache;
91
92      protected:
93
94        virtual bool recvTimingSnoopResp(PacketPtr pkt);
95
96        virtual bool recvTimingReq(PacketPtr pkt);
97
98        virtual Tick recvAtomic(PacketPtr pkt);
99
100        virtual void recvFunctional(PacketPtr pkt);
101
102        virtual AddrRangeList getAddrRanges() const;
103
104      public:
105
106        CpuSidePort(const std::string &_name, Cache *_cache,
107                    const std::string &_label);
108
109    };
110
111    /**
112     * Override the default behaviour of sendDeferredPacket to enable
113     * the memory-side cache port to also send requests based on the
114     * current MSHR status. This queue has a pointer to our specific
115     * cache implementation and is used by the MemSidePort.
116     */
117    class CacheReqPacketQueue : public ReqPacketQueue
118    {
119
120      protected:
121
122        Cache &cache;
123        SnoopRespPacketQueue &snoopRespQueue;
124
125      public:
126
127        CacheReqPacketQueue(Cache &cache, MasterPort &port,
128                            SnoopRespPacketQueue &snoop_resp_queue,
129                            const std::string &label) :
130            ReqPacketQueue(cache, port, label), cache(cache),
131            snoopRespQueue(snoop_resp_queue) { }
132
133        /**
134         * Override the normal sendDeferredPacket and do not only
135         * consider the transmit list (used for responses), but also
136         * requests.
137         */
138        virtual void sendDeferredPacket();
139
140        /**
141         * Check if there is a conflicting snoop response about to be
142         * send out, and if so simply stall any requests, and schedule
143         * a send event at the same time as the next snoop response is
144         * being sent out.
145         */
146        bool checkConflictingSnoop(Addr addr)
147        {
148            if (snoopRespQueue.hasAddr(addr)) {
149                DPRINTF(CachePort, "Waiting for snoop response to be "
150                        "sent\n");
151                Tick when = snoopRespQueue.deferredPacketReadyTime();
152                schedSendEvent(when);
153                return true;
154            }
155            return false;
156        }
157    };
158
159    /**
160     * The memory-side port extends the base cache master port with
161     * access functions for functional, atomic and timing snoops.
162     */
163    class MemSidePort : public CacheMasterPort
164    {
165      private:
166
167        /** The cache-specific queue. */
168        CacheReqPacketQueue _reqQueue;
169
170        SnoopRespPacketQueue _snoopRespQueue;
171
172        // a pointer to our specific cache implementation
173        Cache *cache;
174
175      protected:
176
177        virtual void recvTimingSnoopReq(PacketPtr pkt);
178
179        virtual bool recvTimingResp(PacketPtr pkt);
180
181        virtual Tick recvAtomicSnoop(PacketPtr pkt);
182
183        virtual void recvFunctionalSnoop(PacketPtr pkt);
184
185      public:
186
187        MemSidePort(const std::string &_name, Cache *_cache,
188                    const std::string &_label);
189    };
190
191    /** Tag and data Storage */
192    BaseTags *tags;
193
194    /** Prefetcher */
195    BasePrefetcher *prefetcher;
196
197    /** Temporary cache block for occasional transitory use */
198    CacheBlk *tempBlock;
199
200    /**
201     * This cache should allocate a block on a line-sized write miss.
202     */
203    const bool doFastWrites;
204
205    /**
206     * Turn line-sized writes into WriteInvalidate transactions.
207     */
208    void promoteWholeLineWrites(PacketPtr pkt);
209
210    /**
211     * Notify the prefetcher on every access, not just misses.
212     */
213    const bool prefetchOnAccess;
214
215     /**
216     * Clusivity with respect to the upstream cache, determining if we
217     * fill into both this cache and the cache above on a miss. Note
218     * that we currently do not support strict clusivity policies.
219     */
220    const Enums::Clusivity clusivity;
221
222     /**
223     * Determine if clean lines should be written back or not. In
224     * cases where a downstream cache is mostly inclusive we likely
225     * want it to act as a victim cache also for lines that have not
226     * been modified. Hence, we cannot simply drop the line (or send a
227     * clean evict), but rather need to send the actual data.
228     */
229    const bool writebackClean;
230
231    /**
232     * Upstream caches need this packet until true is returned, so
233     * hold it for deletion until a subsequent call
234     */
235    std::unique_ptr<Packet> pendingDelete;
236
237    /**
238     * Writebacks from the tempBlock, resulting on the response path
239     * in atomic mode, must happen after the call to recvAtomic has
240     * finished (for the right ordering of the packets). We therefore
241     * need to hold on to the packets, and have a method and an event
242     * to send them.
243     */
244    PacketPtr tempBlockWriteback;
245
246    /**
247     * Send the outstanding tempBlock writeback. To be called after
248     * recvAtomic finishes in cases where the block we filled is in
249     * fact the tempBlock, and now needs to be written back.
250     */
251    void writebackTempBlockAtomic() {
252        assert(tempBlockWriteback != nullptr);
253        PacketList writebacks{tempBlockWriteback};
254        doWritebacksAtomic(writebacks);
255        tempBlockWriteback = nullptr;
256    }
257
258    /**
259     * An event to writeback the tempBlock after recvAtomic
260     * finishes. To avoid other calls to recvAtomic getting in
261     * between, we create this event with a higher priority.
262     */
263    EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \
264        writebackTempBlockAtomicEvent;
265
266    /**
267     * Store the outstanding requests that we are expecting snoop
268     * responses from so we can determine which snoop responses we
269     * generated and which ones were merely forwarded.
270     */
271    std::unordered_set<RequestPtr> outstandingSnoop;
272
273    /**
274     * Does all the processing necessary to perform the provided request.
275     * @param pkt The memory request to perform.
276     * @param blk The cache block to be updated.
277     * @param lat The latency of the access.
278     * @param writebacks List for any writebacks that need to be performed.
279     * @return Boolean indicating whether the request was satisfied.
280     */
281    bool access(PacketPtr pkt, CacheBlk *&blk,
282                Cycles &lat, PacketList &writebacks);
283
284    /**
285     *Handle doing the Compare and Swap function for SPARC.
286     */
287    void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
288
289    /**
290     * Find a block frame for new block at address addr targeting the
291     * given security space, assuming that the block is not currently
292     * in the cache.  Append writebacks if any to provided packet
293     * list.  Return free block frame.  May return NULL if there are
294     * no replaceable blocks at the moment.
295     */
296    CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
297
298    /**
299     * Invalidate a cache block.
300     *
301     * @param blk Block to invalidate
302     */
303    void invalidateBlock(CacheBlk *blk);
304
305    /**
306     * Populates a cache block and handles all outstanding requests for the
307     * satisfied fill request. This version takes two memory requests. One
308     * contains the fill data, the other is an optional target to satisfy.
309     * @param pkt The memory request with the fill data.
310     * @param blk The cache block if it already exists.
311     * @param writebacks List for any writebacks that need to be performed.
312     * @param allocate Whether to allocate a block or use the temp block
313     * @return Pointer to the new cache block.
314     */
315    CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
316                         PacketList &writebacks, bool allocate);
317
318    /**
319     * Determine whether we should allocate on a fill or not. If this
320     * cache is mostly inclusive with regards to the upstream cache(s)
321     * we always allocate (for any non-forwarded and cacheable
322     * requests). In the case of a mostly exclusive cache, we allocate
323     * on fill if the packet did not come from a cache, thus if we:
324     * are dealing with a whole-line write (the latter behaves much
325     * like a writeback), the original target packet came from a
326     * non-caching source, or if we are performing a prefetch or LLSC.
327     *
328     * @param cmd Command of the incoming requesting packet
329     * @return Whether we should allocate on the fill
330     */
331    inline bool allocOnFill(MemCmd cmd) const override
332    {
333        return clusivity == Enums::mostly_incl ||
334            cmd == MemCmd::WriteLineReq ||
335            cmd == MemCmd::ReadReq ||
336            cmd == MemCmd::WriteReq ||
337            cmd.isPrefetch() ||
338            cmd.isLLSC();
339    }
340
341    /**
342     * Performs the access specified by the request.
343     * @param pkt The request to perform.
344     * @return The result of the access.
345     */
346    bool recvTimingReq(PacketPtr pkt);
347
348    /**
349     * Insert writebacks into the write buffer
350     */
351    void doWritebacks(PacketList& writebacks, Tick forward_time);
352
353    /**
354     * Send writebacks down the memory hierarchy in atomic mode
355     */
356    void doWritebacksAtomic(PacketList& writebacks);
357
358    /**
359     * Handling the special case of uncacheable write responses to
360     * make recvTimingResp less cluttered.
361     */
362    void handleUncacheableWriteResp(PacketPtr pkt);
363
364    /**
365     * Handles a response (cache line fill/write ack) from the bus.
366     * @param pkt The response packet
367     */
368    void recvTimingResp(PacketPtr pkt);
369
370    /**
371     * Snoops bus transactions to maintain coherence.
372     * @param pkt The current bus transaction.
373     */
374    void recvTimingSnoopReq(PacketPtr pkt);
375
376    /**
377     * Handle a snoop response.
378     * @param pkt Snoop response packet
379     */
380    void recvTimingSnoopResp(PacketPtr pkt);
381
382    /**
383     * Performs the access specified by the request.
384     * @param pkt The request to perform.
385     * @return The number of ticks required for the access.
386     */
387    Tick recvAtomic(PacketPtr pkt);
388
389    /**
390     * Snoop for the provided request in the cache and return the estimated
391     * time taken.
392     * @param pkt The memory request to snoop
393     * @return The number of ticks required for the snoop.
394     */
395    Tick recvAtomicSnoop(PacketPtr pkt);
396
397    /**
398     * Performs the access specified by the request.
399     * @param pkt The request to perform.
400     * @param fromCpuSide from the CPU side port or the memory side port
401     */
402    void functionalAccess(PacketPtr pkt, bool fromCpuSide);
403
404    void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
405                               bool deferred_response = false,
406                               bool pending_downgrade = false);
407    bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk);
408
409    void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
410                                bool already_copied, bool pending_inval);
411
412    /**
413     * Perform an upward snoop if needed, and update the block state
414     * (possibly invalidating the block). Also create a response if required.
415     *
416     * @param pkt Snoop packet
417     * @param blk Cache block being snooped
418     * @param is_timing Timing or atomic for the response
419     * @param is_deferred Is this a deferred snoop or not?
420     * @param pending_inval Do we have a pending invalidation?
421     *
422     * @return The snoop delay incurred by the upwards snoop
423     */
424    uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk,
425                         bool is_timing, bool is_deferred, bool pending_inval);
426
427    /**
428     * Create a writeback request for the given block.
429     * @param blk The block to writeback.
430     * @return The writeback request for the block.
431     */
432    PacketPtr writebackBlk(CacheBlk *blk);
433
434    /**
435     * Create a CleanEvict request for the given block.
436     * @param blk The block to evict.
437     * @return The CleanEvict request for the block.
438     */
439    PacketPtr cleanEvictBlk(CacheBlk *blk);
440
441
442    void memWriteback() override;
443    void memInvalidate() override;
444    bool isDirty() const override;
445
446    /**
447     * Cache block visitor that writes back dirty cache blocks using
448     * functional writes.
449     *
450     * \return Always returns true.
451     */
452    bool writebackVisitor(CacheBlk &blk);
453    /**
454     * Cache block visitor that invalidates all blocks in the cache.
455     *
456     * @warn Dirty cache lines will not be written back to memory.
457     *
458     * \return Always returns true.
459     */
460    bool invalidateVisitor(CacheBlk &blk);
461
462    /**
463     * Generate an appropriate downstream bus request packet for the
464     * given parameters.
465     * @param cpu_pkt  The upstream request that needs to be satisfied.
466     * @param blk The block currently in the cache corresponding to
467     * cpu_pkt (NULL if none).
468     * @param needsExclusive  Indicates that an exclusive copy is required
469     * even if the request in cpu_pkt doesn't indicate that.
470     * @return A new Packet containing the request, or NULL if the
471     * current request in cpu_pkt should just be forwarded on.
472     */
473    PacketPtr getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
474                           bool needsExclusive) const;
475
476    /**
477     * Return the next queue entry to service, either a pending miss
478     * from the MSHR queue, a buffered write from the write buffer, or
479     * something from the prefetcher. This function is responsible
480     * for prioritizing among those sources on the fly.
481     */
482    QueueEntry* getNextQueueEntry();
483
484    /**
485     * Send up a snoop request and find cached copies. If cached copies are
486     * found, set the BLOCK_CACHED flag in pkt.
487     */
488    bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
489
490    /**
491     * Return whether there are any outstanding misses.
492     */
493    bool outstandingMisses() const
494    {
495        return !mshrQueue.isEmpty();
496    }
497
498    CacheBlk *findBlock(Addr addr, bool is_secure) const {
499        return tags->findBlock(addr, is_secure);
500    }
501
502    bool inCache(Addr addr, bool is_secure) const override {
503        return (tags->findBlock(addr, is_secure) != 0);
504    }
505
506    bool inMissQueue(Addr addr, bool is_secure) const override {
507        return (mshrQueue.findMatch(addr, is_secure) != 0);
508    }
509
510    /**
511     * Find next request ready time from among possible sources.
512     */
513    Tick nextQueueReadyTime() const;
514
515  public:
516    /** Instantiates a basic cache object. */
517    Cache(const CacheParams *p);
518
519    /** Non-default destructor is needed to deallocate memory. */
520    virtual ~Cache();
521
522    void regStats() override;
523
524    /**
525     * Take an MSHR, turn it into a suitable downstream packet, and
526     * send it out. This construct allows a queue entry to choose a suitable
527     * approach based on its type.
528     *
529     * @param mshr The MSHR to turn into a packet and send
530     * @return True if the port is waiting for a retry
531     */
532    bool sendMSHRQueuePacket(MSHR* mshr);
533
534    /**
535     * Similar to sendMSHR, but for a write-queue entry
536     * instead. Create the packet, and send it, and if successful also
537     * mark the entry in service.
538     *
539     * @param wq_entry The write-queue entry to turn into a packet and send
540     * @return True if the port is waiting for a retry
541     */
542    bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
543
544    /** serialize the state of the caches
545     * We currently don't support checkpointing cache state, so this panics.
546     */
547    void serialize(CheckpointOut &cp) const override;
548    void unserialize(CheckpointIn &cp) override;
549};
550
551/**
552 * Wrap a method and present it as a cache block visitor.
553 *
554 * For example the forEachBlk method in the tag arrays expects a
555 * callable object/function as their parameter. This class wraps a
556 * method in an object and presents  callable object that adheres to
557 * the cache block visitor protocol.
558 */
559class CacheBlkVisitorWrapper : public CacheBlkVisitor
560{
561  public:
562    typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
563
564    CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
565        : cache(_cache), visitor(_visitor) {}
566
567    bool operator()(CacheBlk &blk) override {
568        return (cache.*visitor)(blk);
569    }
570
571  private:
572    Cache &cache;
573    VisitorPtr visitor;
574};
575
576/**
577 * Cache block visitor that determines if there are dirty blocks in a
578 * cache.
579 *
580 * Use with the forEachBlk method in the tag array to determine if the
581 * array contains dirty blocks.
582 */
583class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
584{
585  public:
586    CacheBlkIsDirtyVisitor()
587        : _isDirty(false) {}
588
589    bool operator()(CacheBlk &blk) override {
590        if (blk.isDirty()) {
591            _isDirty = true;
592            return false;
593        } else {
594            return true;
595        }
596    }
597
598    /**
599     * Does the array contain a dirty line?
600     *
601     * \return true if yes, false otherwise.
602     */
603    bool isDirty() const { return _isDirty; };
604
605  private:
606    bool _isDirty;
607};
608
609#endif // __MEM_CACHE_CACHE_HH__
610