fa_lru.hh revision 12743:b5ccee582b40
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56
57#include "base/bitfield.hh"
58#include "base/intmath.hh"
59#include "base/logging.hh"
60#include "base/statistics.hh"
61#include "base/types.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/tags/base.hh"
64#include "mem/packet.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71// A bitmask of the caches we are keeping track of. Currently the
72// lowest bit is the smallest cache we are tracking, as it is
73// specified by the corresponding parameter. The rest of the bits are
74// for exponentially growing cache sizes.
75typedef uint32_t CachesMask;
76
77/**
78 * A fully associative cache block.
79 */
80class FALRUBlk : public CacheBlk
81{
82  public:
83    /** The previous block in LRU order. */
84    FALRUBlk *prev;
85    /** The next block in LRU order. */
86    FALRUBlk *next;
87
88    /** A bit mask of the caches that fit this block. */
89    CachesMask inCachesMask;
90};
91
92/**
93 * A fully associative LRU cache. Keeps statistics for accesses to a number of
94 * cache sizes at once.
95 */
96class FALRU : public BaseTags
97{
98  public:
99    /** Typedef the block type used in this class. */
100    typedef FALRUBlk BlkType;
101
102  protected:
103    /** The cache blocks. */
104    FALRUBlk *blks;
105
106    /** The MRU block. */
107    FALRUBlk *head;
108    /** The LRU block. */
109    FALRUBlk *tail;
110
111    /** Hash table type mapping addresses to cache block pointers. */
112    typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
113    /** Iterator into the address hash table. */
114    typedef hash_t::const_iterator tagIterator;
115
116    /** The address hash table. */
117    hash_t tagHash;
118
119    /**
120     * Find the cache block for the given address.
121     * @param addr The address to find.
122     * @return The cache block of the address, if any.
123     */
124    FALRUBlk * hashLookup(Addr addr) const;
125
126    /**
127     * Move a cache block to the MRU position.
128     *
129     * @param blk The block to promote.
130     */
131    void moveToHead(FALRUBlk *blk);
132
133    /**
134     * Move a cache block to the LRU position.
135     *
136     * @param blk The block to demote.
137     */
138    void moveToTail(FALRUBlk *blk);
139
140  public:
141    typedef FALRUParams Params;
142
143    /**
144     * Construct and initialize this cache tagstore.
145     */
146    FALRU(const Params *p);
147    ~FALRU();
148
149    /**
150     * Register the stats for this object.
151     */
152    void regStats() override;
153
154    /**
155     * Invalidate a cache block.
156     * @param blk The block to invalidate.
157     */
158    void invalidate(CacheBlk *blk) override;
159
160    /**
161     * Access block and update replacement data.  May not succeed, in which
162     * case nullptr pointer is returned.  This has all the implications of a
163     * cache access and should only be used as such.
164     * Returns the access latency and inCachesMask flags as a side effect.
165     * @param addr The address to look for.
166     * @param is_secure True if the target memory space is secure.
167     * @param lat The latency of the access.
168     * @param in_cache_mask Mask indicating the caches in which the blk fits.
169     * @return Pointer to the cache block.
170     */
171    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
172                          CachesMask *in_cache_mask);
173
174    /**
175     * Just a wrapper of above function to conform with the base interface.
176     */
177    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
178
179    /**
180     * Find the block in the cache, do not update the replacement data.
181     * @param addr The address to look for.
182     * @param is_secure True if the target memory space is secure.
183     * @param asid The address space ID.
184     * @return Pointer to the cache block.
185     */
186    CacheBlk* findBlock(Addr addr, bool is_secure) const override;
187
188    /**
189     * Find a block given set and way.
190     *
191     * @param set The set of the block.
192     * @param way The way of the block.
193     * @return The block.
194     */
195    ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
196
197    /**
198     * Find replacement victim based on address.
199     *
200     * @param addr Address to find a victim for.
201     * @return Cache block to be replaced.
202     */
203    CacheBlk* findVictim(Addr addr) override;
204
205    /**
206     * Insert the new block into the cache and update replacement data.
207     *
208     * @param pkt Packet holding the address to update
209     * @param blk The block to update.
210     */
211    void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
212
213    /**
214     * Generate the tag from the addres. For fully associative this is just the
215     * block address.
216     * @param addr The address to get the tag from.
217     * @return The tag.
218     */
219    Addr extractTag(Addr addr) const override
220    {
221        return blkAlign(addr);
222    }
223
224    /**
225     * Regenerate the block address from the tag.
226     *
227     * @param block The block.
228     * @return the block address.
229     */
230    Addr regenerateBlkAddr(const CacheBlk* blk) const override
231    {
232        return blk->tag;
233    }
234
235    void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
236        for (int i = 0; i < numBlocks; i++) {
237            visitor(blks[i]);
238        }
239    }
240
241    bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
242        for (int i = 0; i < numBlocks; i++) {
243            if (visitor(blks[i])) {
244                return true;
245            }
246        }
247        return false;
248    }
249
250  private:
251    /**
252     * Mechanism that allows us to simultaneously collect miss
253     * statistics for multiple caches. Currently, we keep track of
254     * caches from a set minimum size of interest up to the actual
255     * cache size.
256     */
257    class CacheTracking
258    {
259      public:
260        CacheTracking(unsigned min_size, unsigned max_size,
261                      unsigned block_size)
262            : blkSize(block_size),
263              minTrackedSize(min_size),
264              numTrackedCaches(max_size > min_size ?
265                               floorLog2(max_size) - floorLog2(min_size) : 0),
266              inAllCachesMask(mask(numTrackedCaches)),
267              boundaries(new FALRUBlk *[numTrackedCaches])
268        {
269            fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
270                     "Not enough bits (%s) in type CachesMask type to keep "
271                     "track of %d caches\n", sizeof(CachesMask),
272                     numTrackedCaches);
273        }
274
275        ~CacheTracking()
276        {
277            delete[] boundaries;
278        }
279
280        /**
281         * Initialiaze cache blocks and the tracking mechanism
282         *
283         * All blocks in the cache need to be initialized once.
284         *
285         * @param blk the MRU block
286         * @param blk the LRU block
287         */
288        void init(FALRUBlk *head, FALRUBlk *tail);
289
290        /**
291         * Update boundaries as a block will be moved to the MRU.
292         *
293         * For all caches that didn't fit the block before moving it,
294         * we move their boundaries one block closer to the MRU. We
295         * also update InCacheMasks as neccessary.
296         *
297         * @param blk the block that will be moved to the head
298         */
299        void moveBlockToHead(FALRUBlk *blk);
300
301        /**
302         * Update boundaries as a block will be moved to the LRU.
303         *
304         * For all caches that fitted the block before moving it, we
305         * move their boundaries one block closer to the LRU. We
306         * also update InCacheMasks as neccessary.
307         *
308         * @param blk the block that will be moved to the head
309         */
310        void moveBlockToTail(FALRUBlk *blk);
311
312        /**
313         * Notify of a block access.
314         *
315         * This should be called every time a block is accessed and it
316         * updates statistics. If the input block is nullptr then we
317         * treat the access as a miss. The block's InCacheMask
318         * determines the caches in which the block fits.
319         *
320         * @param blk the block to record the access for
321         */
322        void recordAccess(FALRUBlk *blk);
323
324        /**
325         * Check that the tracking mechanism is in consistent state.
326         *
327         * Iterate from the head (MRU) to the tail (LRU) of the list
328         * of blocks and assert the inCachesMask and the boundaries
329         * are in consistent state.
330         *
331         * @param head the MRU block of the actual cache
332         * @param head the LRU block of the actual cache
333         */
334        void check(FALRUBlk *head, FALRUBlk *tail);
335
336        /**
337         * Register the stats for this object.
338         */
339        void regStats(std::string name);
340
341      private:
342        /** The size of the cache block */
343        const unsigned blkSize;
344        /** The smallest cache we are tracking */
345        const unsigned minTrackedSize;
346        /** The number of different size caches being tracked. */
347        const int numTrackedCaches;
348        /** A mask for all cache being tracked. */
349        const CachesMask inAllCachesMask;
350        /** Array of pointers to blocks at the cache boundaries. */
351        FALRUBlk** boundaries;
352
353      protected:
354        /**
355         * @defgroup FALRUStats Fully Associative LRU specific statistics
356         * The FA lru stack lets us track multiple cache sizes at once. These
357         * statistics track the hits and misses for different cache sizes.
358         * @{
359         */
360
361        /** Hits in each cache */
362        Stats::Vector hits;
363        /** Misses in each cache */
364        Stats::Vector misses;
365        /** Total number of accesses */
366        Stats::Scalar accesses;
367
368        /**
369         * @}
370         */
371    };
372    CacheTracking cacheTracking;
373};
374
375#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
376