fa_lru.hh revision 12727:56c23b54bcb1
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <string>
54#include <unordered_map>
55
56#include "base/bitfield.hh"
57#include "base/intmath.hh"
58#include "base/logging.hh"
59#include "base/statistics.hh"
60#include "base/types.hh"
61#include "mem/cache/blk.hh"
62#include "mem/cache/tags/base.hh"
63#include "mem/packet.hh"
64#include "params/FALRU.hh"
65
66// Uncomment to enable sanity checks for the FALRU cache and the
67// TrackedCaches class
68//#define FALRU_DEBUG
69
70// A bitmask of the caches we are keeping track of. Currently the
71// lowest bit is the smallest cache we are tracking, as it is
72// specified by the corresponding parameter. The rest of the bits are
73// for exponentially growing cache sizes.
74typedef uint32_t CachesMask;
75
76/**
77 * A fully associative cache block.
78 */
79class FALRUBlk : public CacheBlk
80{
81  public:
82    /** The previous block in LRU order. */
83    FALRUBlk *prev;
84    /** The next block in LRU order. */
85    FALRUBlk *next;
86
87    /** A bit mask of the caches that fit this block. */
88    CachesMask inCachesMask;
89};
90
91/**
92 * A fully associative LRU cache. Keeps statistics for accesses to a number of
93 * cache sizes at once.
94 */
95class FALRU : public BaseTags
96{
97  public:
98    /** Typedef the block type used in this class. */
99    typedef FALRUBlk BlkType;
100
101  protected:
102    /** The cache blocks. */
103    FALRUBlk *blks;
104
105    /** The MRU block. */
106    FALRUBlk *head;
107    /** The LRU block. */
108    FALRUBlk *tail;
109
110    /** Hash table type mapping addresses to cache block pointers. */
111    typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
112    /** Iterator into the address hash table. */
113    typedef hash_t::const_iterator tagIterator;
114
115    /** The address hash table. */
116    hash_t tagHash;
117
118    /**
119     * Find the cache block for the given address.
120     * @param addr The address to find.
121     * @return The cache block of the address, if any.
122     */
123    FALRUBlk * hashLookup(Addr addr) const;
124
125    /**
126     * Move a cache block to the MRU position.
127     *
128     * @param blk The block to promote.
129     */
130    void moveToHead(FALRUBlk *blk);
131
132    /**
133     * Move a cache block to the LRU position.
134     *
135     * @param blk The block to demote.
136     */
137    void moveToTail(FALRUBlk *blk);
138
139  public:
140    typedef FALRUParams Params;
141
142    /**
143     * Construct and initialize this cache tagstore.
144     */
145    FALRU(const Params *p);
146    ~FALRU();
147
148    /**
149     * Register the stats for this object.
150     */
151    void regStats() override;
152
153    /**
154     * Invalidate a cache block.
155     * @param blk The block to invalidate.
156     */
157    void invalidate(CacheBlk *blk) override;
158
159    /**
160     * Access block and update replacement data.  May not succeed, in which
161     * case nullptr pointer is returned.  This has all the implications of a
162     * cache access and should only be used as such.
163     * Returns the access latency and inCachesMask flags as a side effect.
164     * @param addr The address to look for.
165     * @param is_secure True if the target memory space is secure.
166     * @param lat The latency of the access.
167     * @param in_cache_mask Mask indicating the caches in which the blk fits.
168     * @return Pointer to the cache block.
169     */
170    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
171                          CachesMask *in_cache_mask);
172
173    /**
174     * Just a wrapper of above function to conform with the base interface.
175     */
176    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
177
178    /**
179     * Find the block in the cache, do not update the replacement data.
180     * @param addr The address to look for.
181     * @param is_secure True if the target memory space is secure.
182     * @param asid The address space ID.
183     * @return Pointer to the cache block.
184     */
185    CacheBlk* findBlock(Addr addr, bool is_secure) const override;
186
187    /**
188     * Find replacement victim based on address.
189     *
190     * @param addr Address to find a victim for.
191     * @return Cache block to be replaced.
192     */
193    CacheBlk* findVictim(Addr addr) override;
194
195    /**
196     * Insert the new block into the cache and update replacement data.
197     *
198     * @param pkt Packet holding the address to update
199     * @param blk The block to update.
200     */
201    void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
202
203    /**
204     * Find the cache block given set and way
205     * @param set The set of the block.
206     * @param way The way of the block.
207     * @return The cache block.
208     */
209    CacheBlk* findBlockBySetAndWay(int set, int way) const override;
210
211    /**
212     * Generate the tag from the addres. For fully associative this is just the
213     * block address.
214     * @param addr The address to get the tag from.
215     * @return The tag.
216     */
217    Addr extractTag(Addr addr) const override
218    {
219        return blkAlign(addr);
220    }
221
222    /**
223     * Return the set of an address. Only one set in a fully associative cache.
224     * @param addr The address to get the set from.
225     * @return 0.
226     */
227    int extractSet(Addr addr) const override
228    {
229        return 0;
230    }
231
232    /**
233     * Regenerate the block address from the tag.
234     *
235     * @param block The block.
236     * @return the block address.
237     */
238    Addr regenerateBlkAddr(const CacheBlk* blk) const override
239    {
240        return blk->tag;
241    }
242
243    /**
244     * @todo Implement as in lru. Currently not used
245     */
246    virtual std::string print() const override { return ""; }
247
248    /**
249     * Visit each block in the tag store and apply a visitor to the
250     * block.
251     *
252     * The visitor should be a function (or object that behaves like a
253     * function) that takes a cache block reference as its parameter
254     * and returns a bool. A visitor can request the traversal to be
255     * stopped by returning false, returning true causes it to be
256     * called for the next block in the tag store.
257     *
258     * \param visitor Visitor to call on each block.
259     */
260    void forEachBlk(CacheBlkVisitor &visitor) override {
261        for (int i = 0; i < numBlocks; i++) {
262            if (!visitor(blks[i]))
263                return;
264        }
265    }
266
267  private:
268    /**
269     * Mechanism that allows us to simultaneously collect miss
270     * statistics for multiple caches. Currently, we keep track of
271     * caches from a set minimum size of interest up to the actual
272     * cache size.
273     */
274    class CacheTracking
275    {
276      public:
277        CacheTracking(unsigned min_size, unsigned max_size,
278                      unsigned block_size)
279            : blkSize(block_size),
280              minTrackedSize(min_size),
281              numTrackedCaches(max_size > min_size ?
282                               floorLog2(max_size) - floorLog2(min_size) : 0),
283              inAllCachesMask(mask(numTrackedCaches)),
284              boundaries(new FALRUBlk *[numTrackedCaches])
285        {
286            fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
287                     "Not enough bits (%s) in type CachesMask type to keep "
288                     "track of %d caches\n", sizeof(CachesMask),
289                     numTrackedCaches);
290        }
291
292        ~CacheTracking()
293        {
294            delete[] boundaries;
295        }
296
297        /**
298         * Initialiaze cache blocks and the tracking mechanism
299         *
300         * All blocks in the cache need to be initialized once.
301         *
302         * @param blk the MRU block
303         * @param blk the LRU block
304         */
305        void init(FALRUBlk *head, FALRUBlk *tail);
306
307        /**
308         * Update boundaries as a block will be moved to the MRU.
309         *
310         * For all caches that didn't fit the block before moving it,
311         * we move their boundaries one block closer to the MRU. We
312         * also update InCacheMasks as neccessary.
313         *
314         * @param blk the block that will be moved to the head
315         */
316        void moveBlockToHead(FALRUBlk *blk);
317
318        /**
319         * Update boundaries as a block will be moved to the LRU.
320         *
321         * For all caches that fitted the block before moving it, we
322         * move their boundaries one block closer to the LRU. We
323         * also update InCacheMasks as neccessary.
324         *
325         * @param blk the block that will be moved to the head
326         */
327        void moveBlockToTail(FALRUBlk *blk);
328
329        /**
330         * Notify of a block access.
331         *
332         * This should be called every time a block is accessed and it
333         * updates statistics. If the input block is nullptr then we
334         * treat the access as a miss. The block's InCacheMask
335         * determines the caches in which the block fits.
336         *
337         * @param blk the block to record the access for
338         */
339        void recordAccess(FALRUBlk *blk);
340
341        /**
342         * Check that the tracking mechanism is in consistent state.
343         *
344         * Iterate from the head (MRU) to the tail (LRU) of the list
345         * of blocks and assert the inCachesMask and the boundaries
346         * are in consistent state.
347         *
348         * @param head the MRU block of the actual cache
349         * @param head the LRU block of the actual cache
350         */
351        void check(FALRUBlk *head, FALRUBlk *tail);
352
353        /**
354         * Register the stats for this object.
355         */
356        void regStats(std::string name);
357
358      private:
359        /** The size of the cache block */
360        const unsigned blkSize;
361        /** The smallest cache we are tracking */
362        const unsigned minTrackedSize;
363        /** The number of different size caches being tracked. */
364        const int numTrackedCaches;
365        /** A mask for all cache being tracked. */
366        const CachesMask inAllCachesMask;
367        /** Array of pointers to blocks at the cache boundaries. */
368        FALRUBlk** boundaries;
369
370      protected:
371        /**
372         * @defgroup FALRUStats Fully Associative LRU specific statistics
373         * The FA lru stack lets us track multiple cache sizes at once. These
374         * statistics track the hits and misses for different cache sizes.
375         * @{
376         */
377
378        /** Hits in each cache */
379        Stats::Vector hits;
380        /** Misses in each cache */
381        Stats::Vector misses;
382        /** Total number of accesses */
383        Stats::Scalar accesses;
384
385        /**
386         * @}
387         */
388    };
389    CacheTracking cacheTracking;
390};
391
392#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
393