fa_lru.hh (12773:387fa9e5c9ff) fa_lru.hh (12775:84d56bc8cd8b)
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56
57#include "base/bitfield.hh"
58#include "base/intmath.hh"
59#include "base/logging.hh"
60#include "base/statistics.hh"
61#include "base/types.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/tags/base.hh"
64#include "mem/packet.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71class ReplaceableEntry;
72
73// A bitmask of the caches we are keeping track of. Currently the
74// lowest bit is the smallest cache we are tracking, as it is
75// specified by the corresponding parameter. The rest of the bits are
76// for exponentially growing cache sizes.
77typedef uint32_t CachesMask;
78
79/**
80 * A fully associative cache block.
81 */
82class FALRUBlk : public CacheBlk
83{
84 public:
85 /** The previous block in LRU order. */
86 FALRUBlk *prev;
87 /** The next block in LRU order. */
88 FALRUBlk *next;
89
90 /** A bit mask of the caches that fit this block. */
91 CachesMask inCachesMask;
92};
93
94/**
95 * A fully associative LRU cache. Keeps statistics for accesses to a number of
96 * cache sizes at once.
97 */
98class FALRU : public BaseTags
99{
100 public:
101 /** Typedef the block type used in this class. */
102 typedef FALRUBlk BlkType;
103
104 protected:
105 /** The cache blocks. */
106 FALRUBlk *blks;
107
108 /** The MRU block. */
109 FALRUBlk *head;
110 /** The LRU block. */
111 FALRUBlk *tail;
112
113 /** Hash table type mapping addresses to cache block pointers. */
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56
57#include "base/bitfield.hh"
58#include "base/intmath.hh"
59#include "base/logging.hh"
60#include "base/statistics.hh"
61#include "base/types.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/tags/base.hh"
64#include "mem/packet.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71class ReplaceableEntry;
72
73// A bitmask of the caches we are keeping track of. Currently the
74// lowest bit is the smallest cache we are tracking, as it is
75// specified by the corresponding parameter. The rest of the bits are
76// for exponentially growing cache sizes.
77typedef uint32_t CachesMask;
78
79/**
80 * A fully associative cache block.
81 */
82class FALRUBlk : public CacheBlk
83{
84 public:
85 /** The previous block in LRU order. */
86 FALRUBlk *prev;
87 /** The next block in LRU order. */
88 FALRUBlk *next;
89
90 /** A bit mask of the caches that fit this block. */
91 CachesMask inCachesMask;
92};
93
94/**
95 * A fully associative LRU cache. Keeps statistics for accesses to a number of
96 * cache sizes at once.
97 */
98class FALRU : public BaseTags
99{
100 public:
101 /** Typedef the block type used in this class. */
102 typedef FALRUBlk BlkType;
103
104 protected:
105 /** The cache blocks. */
106 FALRUBlk *blks;
107
108 /** The MRU block. */
109 FALRUBlk *head;
110 /** The LRU block. */
111 FALRUBlk *tail;
112
113 /** Hash table type mapping addresses to cache block pointers. */
114 typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
115 /** Iterator into the address hash table. */
116 typedef hash_t::const_iterator tagIterator;
114 struct PairHash
115 {
116 template <class T1, class T2>
117 std::size_t operator()(const std::pair<T1, T2> &p) const
118 {
119 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
120 }
121 };
122 typedef std::pair<Addr, bool> TagHashKey;
123 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
117
118 /** The address hash table. */
124
125 /** The address hash table. */
119 hash_t tagHash;
126 TagHash tagHash;
120
121 /**
127
128 /**
122 * Find the cache block for the given address.
123 * @param addr The address to find.
124 * @return The cache block of the address, if any.
125 */
126 FALRUBlk * hashLookup(Addr addr) const;
127
128 /**
129 * Move a cache block to the MRU position.
130 *
131 * @param blk The block to promote.
132 */
133 void moveToHead(FALRUBlk *blk);
134
135 /**
136 * Move a cache block to the LRU position.
137 *
138 * @param blk The block to demote.
139 */
140 void moveToTail(FALRUBlk *blk);
141
142 public:
143 typedef FALRUParams Params;
144
145 /**
146 * Construct and initialize this cache tagstore.
147 */
148 FALRU(const Params *p);
149 ~FALRU();
150
151 /**
152 * Register the stats for this object.
153 */
154 void regStats() override;
155
156 /**
157 * Invalidate a cache block.
158 * @param blk The block to invalidate.
159 */
160 void invalidate(CacheBlk *blk) override;
161
162 /**
163 * Access block and update replacement data. May not succeed, in which
164 * case nullptr pointer is returned. This has all the implications of a
165 * cache access and should only be used as such.
166 * Returns the access latency and inCachesMask flags as a side effect.
167 * @param addr The address to look for.
168 * @param is_secure True if the target memory space is secure.
169 * @param lat The latency of the access.
170 * @param in_cache_mask Mask indicating the caches in which the blk fits.
171 * @return Pointer to the cache block.
172 */
173 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
174 CachesMask *in_cache_mask);
175
176 /**
177 * Just a wrapper of above function to conform with the base interface.
178 */
179 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
180
181 /**
182 * Find the block in the cache, do not update the replacement data.
183 * @param addr The address to look for.
184 * @param is_secure True if the target memory space is secure.
185 * @param asid The address space ID.
186 * @return Pointer to the cache block.
187 */
188 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
189
190 /**
191 * Find a block given set and way.
192 *
193 * @param set The set of the block.
194 * @param way The way of the block.
195 * @return The block.
196 */
197 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
198
199 /**
200 * Find replacement victim based on address. The list of evicted blocks
201 * only contains the victim.
202 *
203 * @param addr Address to find a victim for.
204 * @param is_secure True if the target memory space is secure.
205 * @param evict_blks Cache blocks to be evicted.
206 * @return Cache block to be replaced.
207 */
208 CacheBlk* findVictim(Addr addr, const bool is_secure,
209 std::vector<CacheBlk*>& evict_blks) const override;
210
211 /**
212 * Insert the new block into the cache and update replacement data.
213 *
214 * @param pkt Packet holding the address to update
215 * @param blk The block to update.
216 */
217 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
218
219 /**
220 * Generate the tag from the addres. For fully associative this is just the
221 * block address.
222 * @param addr The address to get the tag from.
223 * @return The tag.
224 */
225 Addr extractTag(Addr addr) const override
226 {
227 return blkAlign(addr);
228 }
229
230 /**
231 * Regenerate the block address from the tag.
232 *
233 * @param block The block.
234 * @return the block address.
235 */
236 Addr regenerateBlkAddr(const CacheBlk* blk) const override
237 {
238 return blk->tag;
239 }
240
241 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
242 for (int i = 0; i < numBlocks; i++) {
243 visitor(blks[i]);
244 }
245 }
246
247 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
248 for (int i = 0; i < numBlocks; i++) {
249 if (visitor(blks[i])) {
250 return true;
251 }
252 }
253 return false;
254 }
255
256 private:
257 /**
258 * Mechanism that allows us to simultaneously collect miss
259 * statistics for multiple caches. Currently, we keep track of
260 * caches from a set minimum size of interest up to the actual
261 * cache size.
262 */
263 class CacheTracking
264 {
265 public:
266 CacheTracking(unsigned min_size, unsigned max_size,
267 unsigned block_size)
268 : blkSize(block_size),
269 minTrackedSize(min_size),
270 numTrackedCaches(max_size > min_size ?
271 floorLog2(max_size) - floorLog2(min_size) : 0),
272 inAllCachesMask(mask(numTrackedCaches)),
273 boundaries(new FALRUBlk *[numTrackedCaches])
274 {
275 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
276 "Not enough bits (%s) in type CachesMask type to keep "
277 "track of %d caches\n", sizeof(CachesMask),
278 numTrackedCaches);
279 }
280
281 ~CacheTracking()
282 {
283 delete[] boundaries;
284 }
285
286 /**
287 * Initialiaze cache blocks and the tracking mechanism
288 *
289 * All blocks in the cache need to be initialized once.
290 *
291 * @param blk the MRU block
292 * @param blk the LRU block
293 */
294 void init(FALRUBlk *head, FALRUBlk *tail);
295
296 /**
297 * Update boundaries as a block will be moved to the MRU.
298 *
299 * For all caches that didn't fit the block before moving it,
300 * we move their boundaries one block closer to the MRU. We
301 * also update InCacheMasks as neccessary.
302 *
303 * @param blk the block that will be moved to the head
304 */
305 void moveBlockToHead(FALRUBlk *blk);
306
307 /**
308 * Update boundaries as a block will be moved to the LRU.
309 *
310 * For all caches that fitted the block before moving it, we
311 * move their boundaries one block closer to the LRU. We
312 * also update InCacheMasks as neccessary.
313 *
314 * @param blk the block that will be moved to the head
315 */
316 void moveBlockToTail(FALRUBlk *blk);
317
318 /**
319 * Notify of a block access.
320 *
321 * This should be called every time a block is accessed and it
322 * updates statistics. If the input block is nullptr then we
323 * treat the access as a miss. The block's InCacheMask
324 * determines the caches in which the block fits.
325 *
326 * @param blk the block to record the access for
327 */
328 void recordAccess(FALRUBlk *blk);
329
330 /**
331 * Check that the tracking mechanism is in consistent state.
332 *
333 * Iterate from the head (MRU) to the tail (LRU) of the list
334 * of blocks and assert the inCachesMask and the boundaries
335 * are in consistent state.
336 *
337 * @param head the MRU block of the actual cache
338 * @param head the LRU block of the actual cache
339 */
340 void check(FALRUBlk *head, FALRUBlk *tail);
341
342 /**
343 * Register the stats for this object.
344 */
345 void regStats(std::string name);
346
347 private:
348 /** The size of the cache block */
349 const unsigned blkSize;
350 /** The smallest cache we are tracking */
351 const unsigned minTrackedSize;
352 /** The number of different size caches being tracked. */
353 const int numTrackedCaches;
354 /** A mask for all cache being tracked. */
355 const CachesMask inAllCachesMask;
356 /** Array of pointers to blocks at the cache boundaries. */
357 FALRUBlk** boundaries;
358
359 protected:
360 /**
361 * @defgroup FALRUStats Fully Associative LRU specific statistics
362 * The FA lru stack lets us track multiple cache sizes at once. These
363 * statistics track the hits and misses for different cache sizes.
364 * @{
365 */
366
367 /** Hits in each cache */
368 Stats::Vector hits;
369 /** Misses in each cache */
370 Stats::Vector misses;
371 /** Total number of accesses */
372 Stats::Scalar accesses;
373
374 /**
375 * @}
376 */
377 };
378 CacheTracking cacheTracking;
379};
380
381#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
129 * Move a cache block to the MRU position.
130 *
131 * @param blk The block to promote.
132 */
133 void moveToHead(FALRUBlk *blk);
134
135 /**
136 * Move a cache block to the LRU position.
137 *
138 * @param blk The block to demote.
139 */
140 void moveToTail(FALRUBlk *blk);
141
142 public:
143 typedef FALRUParams Params;
144
145 /**
146 * Construct and initialize this cache tagstore.
147 */
148 FALRU(const Params *p);
149 ~FALRU();
150
151 /**
152 * Register the stats for this object.
153 */
154 void regStats() override;
155
156 /**
157 * Invalidate a cache block.
158 * @param blk The block to invalidate.
159 */
160 void invalidate(CacheBlk *blk) override;
161
162 /**
163 * Access block and update replacement data. May not succeed, in which
164 * case nullptr pointer is returned. This has all the implications of a
165 * cache access and should only be used as such.
166 * Returns the access latency and inCachesMask flags as a side effect.
167 * @param addr The address to look for.
168 * @param is_secure True if the target memory space is secure.
169 * @param lat The latency of the access.
170 * @param in_cache_mask Mask indicating the caches in which the blk fits.
171 * @return Pointer to the cache block.
172 */
173 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
174 CachesMask *in_cache_mask);
175
176 /**
177 * Just a wrapper of above function to conform with the base interface.
178 */
179 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
180
181 /**
182 * Find the block in the cache, do not update the replacement data.
183 * @param addr The address to look for.
184 * @param is_secure True if the target memory space is secure.
185 * @param asid The address space ID.
186 * @return Pointer to the cache block.
187 */
188 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
189
190 /**
191 * Find a block given set and way.
192 *
193 * @param set The set of the block.
194 * @param way The way of the block.
195 * @return The block.
196 */
197 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
198
199 /**
200 * Find replacement victim based on address. The list of evicted blocks
201 * only contains the victim.
202 *
203 * @param addr Address to find a victim for.
204 * @param is_secure True if the target memory space is secure.
205 * @param evict_blks Cache blocks to be evicted.
206 * @return Cache block to be replaced.
207 */
208 CacheBlk* findVictim(Addr addr, const bool is_secure,
209 std::vector<CacheBlk*>& evict_blks) const override;
210
211 /**
212 * Insert the new block into the cache and update replacement data.
213 *
214 * @param pkt Packet holding the address to update
215 * @param blk The block to update.
216 */
217 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
218
219 /**
220 * Generate the tag from the addres. For fully associative this is just the
221 * block address.
222 * @param addr The address to get the tag from.
223 * @return The tag.
224 */
225 Addr extractTag(Addr addr) const override
226 {
227 return blkAlign(addr);
228 }
229
230 /**
231 * Regenerate the block address from the tag.
232 *
233 * @param block The block.
234 * @return the block address.
235 */
236 Addr regenerateBlkAddr(const CacheBlk* blk) const override
237 {
238 return blk->tag;
239 }
240
241 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
242 for (int i = 0; i < numBlocks; i++) {
243 visitor(blks[i]);
244 }
245 }
246
247 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
248 for (int i = 0; i < numBlocks; i++) {
249 if (visitor(blks[i])) {
250 return true;
251 }
252 }
253 return false;
254 }
255
256 private:
257 /**
258 * Mechanism that allows us to simultaneously collect miss
259 * statistics for multiple caches. Currently, we keep track of
260 * caches from a set minimum size of interest up to the actual
261 * cache size.
262 */
263 class CacheTracking
264 {
265 public:
266 CacheTracking(unsigned min_size, unsigned max_size,
267 unsigned block_size)
268 : blkSize(block_size),
269 minTrackedSize(min_size),
270 numTrackedCaches(max_size > min_size ?
271 floorLog2(max_size) - floorLog2(min_size) : 0),
272 inAllCachesMask(mask(numTrackedCaches)),
273 boundaries(new FALRUBlk *[numTrackedCaches])
274 {
275 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
276 "Not enough bits (%s) in type CachesMask type to keep "
277 "track of %d caches\n", sizeof(CachesMask),
278 numTrackedCaches);
279 }
280
281 ~CacheTracking()
282 {
283 delete[] boundaries;
284 }
285
286 /**
287 * Initialiaze cache blocks and the tracking mechanism
288 *
289 * All blocks in the cache need to be initialized once.
290 *
291 * @param blk the MRU block
292 * @param blk the LRU block
293 */
294 void init(FALRUBlk *head, FALRUBlk *tail);
295
296 /**
297 * Update boundaries as a block will be moved to the MRU.
298 *
299 * For all caches that didn't fit the block before moving it,
300 * we move their boundaries one block closer to the MRU. We
301 * also update InCacheMasks as neccessary.
302 *
303 * @param blk the block that will be moved to the head
304 */
305 void moveBlockToHead(FALRUBlk *blk);
306
307 /**
308 * Update boundaries as a block will be moved to the LRU.
309 *
310 * For all caches that fitted the block before moving it, we
311 * move their boundaries one block closer to the LRU. We
312 * also update InCacheMasks as neccessary.
313 *
314 * @param blk the block that will be moved to the head
315 */
316 void moveBlockToTail(FALRUBlk *blk);
317
318 /**
319 * Notify of a block access.
320 *
321 * This should be called every time a block is accessed and it
322 * updates statistics. If the input block is nullptr then we
323 * treat the access as a miss. The block's InCacheMask
324 * determines the caches in which the block fits.
325 *
326 * @param blk the block to record the access for
327 */
328 void recordAccess(FALRUBlk *blk);
329
330 /**
331 * Check that the tracking mechanism is in consistent state.
332 *
333 * Iterate from the head (MRU) to the tail (LRU) of the list
334 * of blocks and assert the inCachesMask and the boundaries
335 * are in consistent state.
336 *
337 * @param head the MRU block of the actual cache
338 * @param head the LRU block of the actual cache
339 */
340 void check(FALRUBlk *head, FALRUBlk *tail);
341
342 /**
343 * Register the stats for this object.
344 */
345 void regStats(std::string name);
346
347 private:
348 /** The size of the cache block */
349 const unsigned blkSize;
350 /** The smallest cache we are tracking */
351 const unsigned minTrackedSize;
352 /** The number of different size caches being tracked. */
353 const int numTrackedCaches;
354 /** A mask for all cache being tracked. */
355 const CachesMask inAllCachesMask;
356 /** Array of pointers to blocks at the cache boundaries. */
357 FALRUBlk** boundaries;
358
359 protected:
360 /**
361 * @defgroup FALRUStats Fully Associative LRU specific statistics
362 * The FA lru stack lets us track multiple cache sizes at once. These
363 * statistics track the hits and misses for different cache sizes.
364 * @{
365 */
366
367 /** Hits in each cache */
368 Stats::Vector hits;
369 /** Misses in each cache */
370 Stats::Vector misses;
371 /** Total number of accesses */
372 Stats::Scalar accesses;
373
374 /**
375 * @}
376 */
377 };
378 CacheTracking cacheTracking;
379};
380
381#endif // __MEM_CACHE_TAGS_FA_LRU_HH__