fa_lru.hh revision 13418:08101e89101e
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56#include <vector>
57
58#include "base/bitfield.hh"
59#include "base/intmath.hh"
60#include "base/logging.hh"
61#include "base/statistics.hh"
62#include "base/types.hh"
63#include "mem/cache/cache_blk.hh"
64#include "mem/cache/tags/base.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71class BaseCache;
72class ReplaceableEntry;
73
74// A bitmask of the caches we are keeping track of. Currently the
75// lowest bit is the smallest cache we are tracking, as it is
76// specified by the corresponding parameter. The rest of the bits are
77// for exponentially growing cache sizes.
78typedef uint32_t CachesMask;
79
80/**
81 * A fully associative cache block.
82 */
83class FALRUBlk : public CacheBlk
84{
85  public:
86    FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
87
88    /** The previous block in LRU order. */
89    FALRUBlk *prev;
90    /** The next block in LRU order. */
91    FALRUBlk *next;
92
93    /** A bit mask of the caches that fit this block. */
94    CachesMask inCachesMask;
95
96    /**
97     * Pretty-print inCachesMask and other CacheBlk information.
98     *
99     * @return string with basic state information
100     */
101    std::string print() const override;
102};
103
104/**
105 * A fully associative LRU cache. Keeps statistics for accesses to a number of
106 * cache sizes at once.
107 */
108class FALRU : public BaseTags
109{
110  public:
111    /** Typedef the block type used in this class. */
112    typedef FALRUBlk BlkType;
113
114  protected:
115    /** The cache blocks. */
116    FALRUBlk *blks;
117
118    /** The MRU block. */
119    FALRUBlk *head;
120    /** The LRU block. */
121    FALRUBlk *tail;
122
123    /** Hash table type mapping addresses to cache block pointers. */
124    struct PairHash
125    {
126        template <class T1, class T2>
127        std::size_t operator()(const std::pair<T1, T2> &p) const
128        {
129            return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
130        }
131    };
132    typedef std::pair<Addr, bool> TagHashKey;
133    typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
134
135    /** The address hash table. */
136    TagHash tagHash;
137
138    /**
139     * Move a cache block to the MRU position.
140     *
141     * @param blk The block to promote.
142     */
143    void moveToHead(FALRUBlk *blk);
144
145    /**
146     * Move a cache block to the LRU position.
147     *
148     * @param blk The block to demote.
149     */
150    void moveToTail(FALRUBlk *blk);
151
152  public:
153    typedef FALRUParams Params;
154
155    /**
156     * Construct and initialize this cache tagstore.
157     */
158    FALRU(const Params *p);
159    ~FALRU();
160
161    /**
162     * Initialize blocks and set the parent cache back pointer.
163     *
164     * @param _cache Pointer to parent cache.
165     */
166    void tagsInit(BaseCache *_cache) override;
167
168    /**
169     * Register the stats for this object.
170     */
171    void regStats() override;
172
173    /**
174     * Invalidate a cache block.
175     * @param blk The block to invalidate.
176     */
177    void invalidate(CacheBlk *blk) override;
178
179    /**
180     * Access block and update replacement data.  May not succeed, in which
181     * case nullptr pointer is returned.  This has all the implications of a
182     * cache access and should only be used as such.
183     * Returns tag lookup latency and the inCachesMask flags as a side effect.
184     *
185     * @param addr The address to look for.
186     * @param is_secure True if the target memory space is secure.
187     * @param lat The latency of the tag lookup.
188     * @param in_cache_mask Mask indicating the caches in which the blk fits.
189     * @return Pointer to the cache block.
190     */
191    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
192                          CachesMask *in_cache_mask);
193
194    /**
195     * Just a wrapper of above function to conform with the base interface.
196     */
197    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
198
199    /**
200     * Find the block in the cache, do not update the replacement data.
201     * @param addr The address to look for.
202     * @param is_secure True if the target memory space is secure.
203     * @param asid The address space ID.
204     * @return Pointer to the cache block.
205     */
206    CacheBlk* findBlock(Addr addr, bool is_secure) const override;
207
208    /**
209     * Find a block given set and way.
210     *
211     * @param set The set of the block.
212     * @param way The way of the block.
213     * @return The block.
214     */
215    ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
216
217    /**
218     * Find replacement victim based on address. The list of evicted blocks
219     * only contains the victim.
220     *
221     * @param addr Address to find a victim for.
222     * @param is_secure True if the target memory space is secure.
223     * @param evict_blks Cache blocks to be evicted.
224     * @return Cache block to be replaced.
225     */
226    CacheBlk* findVictim(Addr addr, const bool is_secure,
227                         std::vector<CacheBlk*>& evict_blks) const override;
228
229    /**
230     * Insert the new block into the cache and update replacement data.
231     *
232     * @param addr Address of the block.
233     * @param is_secure Whether the block is in secure space or not.
234     * @param src_master_ID The source requestor ID.
235     * @param task_ID The new task ID.
236     * @param blk The block to update.
237     */
238    void insertBlock(const Addr addr, const bool is_secure,
239                     const int src_master_ID, const uint32_t task_ID,
240                     CacheBlk *blk) override;
241
242    /**
243     * Generate the tag from the addres. For fully associative this is just the
244     * block address.
245     * @param addr The address to get the tag from.
246     * @return The tag.
247     */
248    Addr extractTag(Addr addr) const override
249    {
250        return blkAlign(addr);
251    }
252
253    /**
254     * Regenerate the block address from the tag.
255     *
256     * @param block The block.
257     * @return the block address.
258     */
259    Addr regenerateBlkAddr(const CacheBlk* blk) const override
260    {
261        return blk->tag;
262    }
263
264    void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
265        for (int i = 0; i < numBlocks; i++) {
266            visitor(blks[i]);
267        }
268    }
269
270    bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
271        for (int i = 0; i < numBlocks; i++) {
272            if (visitor(blks[i])) {
273                return true;
274            }
275        }
276        return false;
277    }
278
279  private:
280    /**
281     * Mechanism that allows us to simultaneously collect miss
282     * statistics for multiple caches. Currently, we keep track of
283     * caches from a set minimum size of interest up to the actual
284     * cache size.
285     */
286    class CacheTracking
287    {
288      public:
289        CacheTracking(unsigned min_size, unsigned max_size,
290                      unsigned block_size)
291            : blkSize(block_size),
292              minTrackedSize(min_size),
293              numTrackedCaches(max_size > min_size ?
294                               floorLog2(max_size) - floorLog2(min_size) : 0),
295              inAllCachesMask(mask(numTrackedCaches)),
296              boundaries(numTrackedCaches)
297        {
298            fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
299                     "Not enough bits (%s) in type CachesMask type to keep "
300                     "track of %d caches\n", sizeof(CachesMask),
301                     numTrackedCaches);
302        }
303
304        /**
305         * Initialiaze cache blocks and the tracking mechanism
306         *
307         * All blocks in the cache need to be initialized once.
308         *
309         * @param blk the MRU block
310         * @param blk the LRU block
311         */
312        void init(FALRUBlk *head, FALRUBlk *tail);
313
314        /**
315         * Update boundaries as a block will be moved to the MRU.
316         *
317         * For all caches that didn't fit the block before moving it,
318         * we move their boundaries one block closer to the MRU. We
319         * also update InCacheMasks as neccessary.
320         *
321         * @param blk the block that will be moved to the head
322         */
323        void moveBlockToHead(FALRUBlk *blk);
324
325        /**
326         * Update boundaries as a block will be moved to the LRU.
327         *
328         * For all caches that fitted the block before moving it, we
329         * move their boundaries one block closer to the LRU. We
330         * also update InCacheMasks as neccessary.
331         *
332         * @param blk the block that will be moved to the head
333         */
334        void moveBlockToTail(FALRUBlk *blk);
335
336        /**
337         * Notify of a block access.
338         *
339         * This should be called every time a block is accessed and it
340         * updates statistics. If the input block is nullptr then we
341         * treat the access as a miss. The block's InCacheMask
342         * determines the caches in which the block fits.
343         *
344         * @param blk the block to record the access for
345         */
346        void recordAccess(FALRUBlk *blk);
347
348        /**
349         * Check that the tracking mechanism is in consistent state.
350         *
351         * Iterate from the head (MRU) to the tail (LRU) of the list
352         * of blocks and assert the inCachesMask and the boundaries
353         * are in consistent state.
354         *
355         * @param head the MRU block of the actual cache
356         * @param head the LRU block of the actual cache
357         */
358        void check(const FALRUBlk *head, const FALRUBlk *tail) const;
359
360        /**
361         * Register the stats for this object.
362         */
363        void regStats(std::string name);
364
365      private:
366        /** The size of the cache block */
367        const unsigned blkSize;
368        /** The smallest cache we are tracking */
369        const unsigned minTrackedSize;
370        /** The number of different size caches being tracked. */
371        const int numTrackedCaches;
372        /** A mask for all cache being tracked. */
373        const CachesMask inAllCachesMask;
374        /** Array of pointers to blocks at the cache boundaries. */
375        std::vector<FALRUBlk*> boundaries;
376
377      protected:
378        /**
379         * @defgroup FALRUStats Fully Associative LRU specific statistics
380         * The FA lru stack lets us track multiple cache sizes at once. These
381         * statistics track the hits and misses for different cache sizes.
382         * @{
383         */
384
385        /** Hits in each cache */
386        Stats::Vector hits;
387        /** Misses in each cache */
388        Stats::Vector misses;
389        /** Total number of accesses */
390        Stats::Scalar accesses;
391
392        /**
393         * @}
394         */
395    };
396    CacheTracking cacheTracking;
397};
398
399#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
400