fa_lru.hh revision 13419:aaadcfae091a
1767SN/A/*
21762SN/A * Copyright (c) 2012-2013,2016,2018 ARM Limited
3767SN/A * All rights reserved.
4767SN/A *
5767SN/A * The license below extends only to copyright in the software and shall
6767SN/A * not be construed as granting a license to any other intellectual
7767SN/A * property including but not limited to intellectual property relating
8767SN/A * to a hardware implementation of the functionality of the software
9767SN/A * licensed hereunder.  You may use the software subject to the license
10767SN/A * terms below provided that you ensure that this notice is replicated
11767SN/A * unmodified and in its entirety in all distributions of the software,
12767SN/A * modified or unmodified, in source code or in binary form.
13767SN/A *
14767SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
15767SN/A * All rights reserved.
16767SN/A *
17767SN/A * Redistribution and use in source and binary forms, with or without
18767SN/A * modification, are permitted provided that the following conditions are
19767SN/A * met: redistributions of source code must retain the above copyright
20767SN/A * notice, this list of conditions and the following disclaimer;
21767SN/A * redistributions in binary form must reproduce the above copyright
22767SN/A * notice, this list of conditions and the following disclaimer in the
23767SN/A * documentation and/or other materials provided with the distribution;
24767SN/A * neither the name of the copyright holders nor the names of its
25767SN/A * contributors may be used to endorse or promote products derived from
26767SN/A * this software without specific prior written permission.
272665SN/A *
282665SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29767SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30767SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
311722SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32798SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33767SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34767SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35767SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36767SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37767SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
384762Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
393540Sgblack@eecs.umich.edu *
40909SN/A * Authors: Erik Hallnor
414762Snate@binkert.org *          Nikos Nikoleris
421872SN/A */
431722SN/A
441722SN/A/**
451722SN/A * @file
46767SN/A * Declaration of a fully associative LRU tag store.
472523SN/A */
48767SN/A
49767SN/A#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50887SN/A#define __MEM_CACHE_TAGS_FA_LRU_HH__
51887SN/A
52887SN/A#include <cstdint>
53887SN/A#include <functional>
54887SN/A#include <string>
55767SN/A#include <unordered_map>
56798SN/A#include <vector>
57798SN/A
58798SN/A#include "base/bitfield.hh"
59798SN/A#include "base/intmath.hh"
60798SN/A#include "base/logging.hh"
61767SN/A#include "base/statistics.hh"
62798SN/A#include "base/types.hh"
63798SN/A#include "mem/cache/cache_blk.hh"
64798SN/A#include "mem/cache/tags/base.hh"
65798SN/A#include "params/FALRU.hh"
66798SN/A
67767SN/A// Uncomment to enable sanity checks for the FALRU cache and the
68798SN/A// TrackedCaches class
69798SN/A//#define FALRU_DEBUG
70798SN/A
71798SN/Aclass BaseCache;
72798SN/Aclass ReplaceableEntry;
73767SN/A
74831SN/A// A bitmask of the caches we are keeping track of. Currently the
751290SN/A// lowest bit is the smallest cache we are tracking, as it is
761290SN/A// specified by the corresponding parameter. The rest of the bits are
77831SN/A// for exponentially growing cache sizes.
781290SN/Atypedef uint32_t CachesMask;
791290SN/A
80767SN/A/**
81767SN/A * A fully associative cache block.
824762Snate@binkert.org */
83885SN/Aclass FALRUBlk : public CacheBlk
84885SN/A{
85885SN/A  public:
862523SN/A    FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
87885SN/A
884762Snate@binkert.org    /** The previous block in LRU order. */
894762Snate@binkert.org    FALRUBlk *prev;
904762Snate@binkert.org    /** The next block in LRU order. */
914762Snate@binkert.org    FALRUBlk *next;
924762Snate@binkert.org
934762Snate@binkert.org    /** A bit mask of the caches that fit this block. */
944762Snate@binkert.org    CachesMask inCachesMask;
95767SN/A
963349SN/A    /**
97885SN/A     * Pretty-print inCachesMask and other CacheBlk information.
983349SN/A     *
99767SN/A     * @return string with basic state information
100885SN/A     */
101885SN/A    std::string print() const override;
102885SN/A};
103831SN/A
104885SN/A/**
105885SN/A * A fully associative LRU cache. Keeps statistics for accesses to a number of
106885SN/A * cache sizes at once.
107885SN/A */
108885SN/Aclass FALRU : public BaseTags
109817SN/A{
110885SN/A  public:
111885SN/A    /** Typedef the block type used in this class. */
112885SN/A    typedef FALRUBlk BlkType;
113885SN/A
114885SN/A  protected:
115817SN/A    /** The cache blocks. */
116777SN/A    FALRUBlk *blks;
117885SN/A
1181290SN/A    /** The MRU block. */
1191290SN/A    FALRUBlk *head;
1201290SN/A    /** The LRU block. */
1211290SN/A    FALRUBlk *tail;
1221290SN/A
1231290SN/A    /** Hash table type mapping addresses to cache block pointers. */
1241290SN/A    struct PairHash
1251763SN/A    {
1261290SN/A        template <class T1, class T2>
1271290SN/A        std::size_t operator()(const std::pair<T1, T2> &p) const
1281290SN/A        {
1291290SN/A            return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
1301290SN/A        }
1311290SN/A    };
1321290SN/A    typedef std::pair<Addr, bool> TagHashKey;
1331290SN/A    typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
1341290SN/A
1351290SN/A    /** The address hash table. */
1361290SN/A    TagHash tagHash;
137885SN/A
138885SN/A    /**
139885SN/A     * Move a cache block to the MRU position.
140767SN/A     *
141885SN/A     * @param blk The block to promote.
142885SN/A     */
143885SN/A    void moveToHead(FALRUBlk *blk);
144885SN/A
145885SN/A    /**
146885SN/A     * Move a cache block to the LRU position.
147767SN/A     *
148909SN/A     * @param blk The block to demote.
149767SN/A     */
150767SN/A    void moveToTail(FALRUBlk *blk);
151767SN/A
152  public:
153    typedef FALRUParams Params;
154
155    /**
156     * Construct and initialize this cache tagstore.
157     */
158    FALRU(const Params *p);
159    ~FALRU();
160
161    /**
162     * Initialize blocks as FALRUBlk instances.
163     */
164    void tagsInit() override;
165
166    /**
167     * Register the stats for this object.
168     */
169    void regStats() override;
170
171    /**
172     * Invalidate a cache block.
173     * @param blk The block to invalidate.
174     */
175    void invalidate(CacheBlk *blk) override;
176
177    /**
178     * Access block and update replacement data.  May not succeed, in which
179     * case nullptr pointer is returned.  This has all the implications of a
180     * cache access and should only be used as such.
181     * Returns tag lookup latency and the inCachesMask flags as a side effect.
182     *
183     * @param addr The address to look for.
184     * @param is_secure True if the target memory space is secure.
185     * @param lat The latency of the tag lookup.
186     * @param in_cache_mask Mask indicating the caches in which the blk fits.
187     * @return Pointer to the cache block.
188     */
189    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
190                          CachesMask *in_cache_mask);
191
192    /**
193     * Just a wrapper of above function to conform with the base interface.
194     */
195    CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
196
197    /**
198     * Find the block in the cache, do not update the replacement data.
199     * @param addr The address to look for.
200     * @param is_secure True if the target memory space is secure.
201     * @param asid The address space ID.
202     * @return Pointer to the cache block.
203     */
204    CacheBlk* findBlock(Addr addr, bool is_secure) const override;
205
206    /**
207     * Find a block given set and way.
208     *
209     * @param set The set of the block.
210     * @param way The way of the block.
211     * @return The block.
212     */
213    ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
214
215    /**
216     * Find replacement victim based on address. The list of evicted blocks
217     * only contains the victim.
218     *
219     * @param addr Address to find a victim for.
220     * @param is_secure True if the target memory space is secure.
221     * @param evict_blks Cache blocks to be evicted.
222     * @return Cache block to be replaced.
223     */
224    CacheBlk* findVictim(Addr addr, const bool is_secure,
225                         std::vector<CacheBlk*>& evict_blks) const override;
226
227    /**
228     * Insert the new block into the cache and update replacement data.
229     *
230     * @param addr Address of the block.
231     * @param is_secure Whether the block is in secure space or not.
232     * @param src_master_ID The source requestor ID.
233     * @param task_ID The new task ID.
234     * @param blk The block to update.
235     */
236    void insertBlock(const Addr addr, const bool is_secure,
237                     const int src_master_ID, const uint32_t task_ID,
238                     CacheBlk *blk) override;
239
240    /**
241     * Generate the tag from the addres. For fully associative this is just the
242     * block address.
243     * @param addr The address to get the tag from.
244     * @return The tag.
245     */
246    Addr extractTag(Addr addr) const override
247    {
248        return blkAlign(addr);
249    }
250
251    /**
252     * Regenerate the block address from the tag.
253     *
254     * @param block The block.
255     * @return the block address.
256     */
257    Addr regenerateBlkAddr(const CacheBlk* blk) const override
258    {
259        return blk->tag;
260    }
261
262    void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
263        for (int i = 0; i < numBlocks; i++) {
264            visitor(blks[i]);
265        }
266    }
267
268    bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
269        for (int i = 0; i < numBlocks; i++) {
270            if (visitor(blks[i])) {
271                return true;
272            }
273        }
274        return false;
275    }
276
277  private:
278    /**
279     * Mechanism that allows us to simultaneously collect miss
280     * statistics for multiple caches. Currently, we keep track of
281     * caches from a set minimum size of interest up to the actual
282     * cache size.
283     */
284    class CacheTracking
285    {
286      public:
287        CacheTracking(unsigned min_size, unsigned max_size,
288                      unsigned block_size)
289            : blkSize(block_size),
290              minTrackedSize(min_size),
291              numTrackedCaches(max_size > min_size ?
292                               floorLog2(max_size) - floorLog2(min_size) : 0),
293              inAllCachesMask(mask(numTrackedCaches)),
294              boundaries(numTrackedCaches)
295        {
296            fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
297                     "Not enough bits (%s) in type CachesMask type to keep "
298                     "track of %d caches\n", sizeof(CachesMask),
299                     numTrackedCaches);
300        }
301
302        /**
303         * Initialiaze cache blocks and the tracking mechanism
304         *
305         * All blocks in the cache need to be initialized once.
306         *
307         * @param blk the MRU block
308         * @param blk the LRU block
309         */
310        void init(FALRUBlk *head, FALRUBlk *tail);
311
312        /**
313         * Update boundaries as a block will be moved to the MRU.
314         *
315         * For all caches that didn't fit the block before moving it,
316         * we move their boundaries one block closer to the MRU. We
317         * also update InCacheMasks as neccessary.
318         *
319         * @param blk the block that will be moved to the head
320         */
321        void moveBlockToHead(FALRUBlk *blk);
322
323        /**
324         * Update boundaries as a block will be moved to the LRU.
325         *
326         * For all caches that fitted the block before moving it, we
327         * move their boundaries one block closer to the LRU. We
328         * also update InCacheMasks as neccessary.
329         *
330         * @param blk the block that will be moved to the head
331         */
332        void moveBlockToTail(FALRUBlk *blk);
333
334        /**
335         * Notify of a block access.
336         *
337         * This should be called every time a block is accessed and it
338         * updates statistics. If the input block is nullptr then we
339         * treat the access as a miss. The block's InCacheMask
340         * determines the caches in which the block fits.
341         *
342         * @param blk the block to record the access for
343         */
344        void recordAccess(FALRUBlk *blk);
345
346        /**
347         * Check that the tracking mechanism is in consistent state.
348         *
349         * Iterate from the head (MRU) to the tail (LRU) of the list
350         * of blocks and assert the inCachesMask and the boundaries
351         * are in consistent state.
352         *
353         * @param head the MRU block of the actual cache
354         * @param head the LRU block of the actual cache
355         */
356        void check(const FALRUBlk *head, const FALRUBlk *tail) const;
357
358        /**
359         * Register the stats for this object.
360         */
361        void regStats(std::string name);
362
363      private:
364        /** The size of the cache block */
365        const unsigned blkSize;
366        /** The smallest cache we are tracking */
367        const unsigned minTrackedSize;
368        /** The number of different size caches being tracked. */
369        const int numTrackedCaches;
370        /** A mask for all cache being tracked. */
371        const CachesMask inAllCachesMask;
372        /** Array of pointers to blocks at the cache boundaries. */
373        std::vector<FALRUBlk*> boundaries;
374
375      protected:
376        /**
377         * @defgroup FALRUStats Fully Associative LRU specific statistics
378         * The FA lru stack lets us track multiple cache sizes at once. These
379         * statistics track the hits and misses for different cache sizes.
380         * @{
381         */
382
383        /** Hits in each cache */
384        Stats::Vector hits;
385        /** Misses in each cache */
386        Stats::Vector misses;
387        /** Total number of accesses */
388        Stats::Scalar accesses;
389
390        /**
391         * @}
392         */
393    };
394    CacheTracking cacheTracking;
395};
396
397#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
398