fa_lru.hh (13162:b6a5d452d52d) fa_lru.hh (13163:55923cb33a7e)
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56#include <vector>
56
57#include "base/bitfield.hh"
58#include "base/intmath.hh"
59#include "base/logging.hh"
60#include "base/statistics.hh"
61#include "base/types.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/tags/base.hh"
64#include "mem/packet.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71class ReplaceableEntry;
72
73// A bitmask of the caches we are keeping track of. Currently the
74// lowest bit is the smallest cache we are tracking, as it is
75// specified by the corresponding parameter. The rest of the bits are
76// for exponentially growing cache sizes.
77typedef uint32_t CachesMask;
78
79/**
80 * A fully associative cache block.
81 */
82class FALRUBlk : public CacheBlk
83{
84 public:
85 FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
86
87 /** The previous block in LRU order. */
88 FALRUBlk *prev;
89 /** The next block in LRU order. */
90 FALRUBlk *next;
91
92 /** A bit mask of the caches that fit this block. */
93 CachesMask inCachesMask;
94};
95
96/**
97 * A fully associative LRU cache. Keeps statistics for accesses to a number of
98 * cache sizes at once.
99 */
100class FALRU : public BaseTags
101{
102 public:
103 /** Typedef the block type used in this class. */
104 typedef FALRUBlk BlkType;
105
106 protected:
107 /** The cache blocks. */
108 FALRUBlk *blks;
109
110 /** The MRU block. */
111 FALRUBlk *head;
112 /** The LRU block. */
113 FALRUBlk *tail;
114
115 /** Hash table type mapping addresses to cache block pointers. */
116 struct PairHash
117 {
118 template <class T1, class T2>
119 std::size_t operator()(const std::pair<T1, T2> &p) const
120 {
121 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
122 }
123 };
124 typedef std::pair<Addr, bool> TagHashKey;
125 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
126
127 /** The address hash table. */
128 TagHash tagHash;
129
130 /**
131 * Move a cache block to the MRU position.
132 *
133 * @param blk The block to promote.
134 */
135 void moveToHead(FALRUBlk *blk);
136
137 /**
138 * Move a cache block to the LRU position.
139 *
140 * @param blk The block to demote.
141 */
142 void moveToTail(FALRUBlk *blk);
143
144 public:
145 typedef FALRUParams Params;
146
147 /**
148 * Construct and initialize this cache tagstore.
149 */
150 FALRU(const Params *p);
151 ~FALRU();
152
153 /**
154 * Register the stats for this object.
155 */
156 void regStats() override;
157
158 /**
159 * Invalidate a cache block.
160 * @param blk The block to invalidate.
161 */
162 void invalidate(CacheBlk *blk) override;
163
164 /**
165 * Access block and update replacement data. May not succeed, in which
166 * case nullptr pointer is returned. This has all the implications of a
167 * cache access and should only be used as such.
168 * Returns the access latency and inCachesMask flags as a side effect.
169 * @param addr The address to look for.
170 * @param is_secure True if the target memory space is secure.
171 * @param lat The latency of the access.
172 * @param in_cache_mask Mask indicating the caches in which the blk fits.
173 * @return Pointer to the cache block.
174 */
175 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
176 CachesMask *in_cache_mask);
177
178 /**
179 * Just a wrapper of above function to conform with the base interface.
180 */
181 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
182
183 /**
184 * Find the block in the cache, do not update the replacement data.
185 * @param addr The address to look for.
186 * @param is_secure True if the target memory space is secure.
187 * @param asid The address space ID.
188 * @return Pointer to the cache block.
189 */
190 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
191
192 /**
193 * Find a block given set and way.
194 *
195 * @param set The set of the block.
196 * @param way The way of the block.
197 * @return The block.
198 */
199 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
200
201 /**
202 * Find replacement victim based on address. The list of evicted blocks
203 * only contains the victim.
204 *
205 * @param addr Address to find a victim for.
206 * @param is_secure True if the target memory space is secure.
207 * @param evict_blks Cache blocks to be evicted.
208 * @return Cache block to be replaced.
209 */
210 CacheBlk* findVictim(Addr addr, const bool is_secure,
211 std::vector<CacheBlk*>& evict_blks) const override;
212
213 /**
214 * Insert the new block into the cache and update replacement data.
215 *
216 * @param pkt Packet holding the address to update
217 * @param blk The block to update.
218 */
219 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
220
221 /**
222 * Generate the tag from the addres. For fully associative this is just the
223 * block address.
224 * @param addr The address to get the tag from.
225 * @return The tag.
226 */
227 Addr extractTag(Addr addr) const override
228 {
229 return blkAlign(addr);
230 }
231
232 /**
233 * Regenerate the block address from the tag.
234 *
235 * @param block The block.
236 * @return the block address.
237 */
238 Addr regenerateBlkAddr(const CacheBlk* blk) const override
239 {
240 return blk->tag;
241 }
242
243 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
244 for (int i = 0; i < numBlocks; i++) {
245 visitor(blks[i]);
246 }
247 }
248
249 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
250 for (int i = 0; i < numBlocks; i++) {
251 if (visitor(blks[i])) {
252 return true;
253 }
254 }
255 return false;
256 }
257
258 private:
259 /**
260 * Mechanism that allows us to simultaneously collect miss
261 * statistics for multiple caches. Currently, we keep track of
262 * caches from a set minimum size of interest up to the actual
263 * cache size.
264 */
265 class CacheTracking
266 {
267 public:
268 CacheTracking(unsigned min_size, unsigned max_size,
269 unsigned block_size)
270 : blkSize(block_size),
271 minTrackedSize(min_size),
272 numTrackedCaches(max_size > min_size ?
273 floorLog2(max_size) - floorLog2(min_size) : 0),
274 inAllCachesMask(mask(numTrackedCaches)),
57
58#include "base/bitfield.hh"
59#include "base/intmath.hh"
60#include "base/logging.hh"
61#include "base/statistics.hh"
62#include "base/types.hh"
63#include "mem/cache/blk.hh"
64#include "mem/cache/tags/base.hh"
65#include "mem/packet.hh"
66#include "params/FALRU.hh"
67
68// Uncomment to enable sanity checks for the FALRU cache and the
69// TrackedCaches class
70//#define FALRU_DEBUG
71
72class ReplaceableEntry;
73
74// A bitmask of the caches we are keeping track of. Currently the
75// lowest bit is the smallest cache we are tracking, as it is
76// specified by the corresponding parameter. The rest of the bits are
77// for exponentially growing cache sizes.
78typedef uint32_t CachesMask;
79
80/**
81 * A fully associative cache block.
82 */
83class FALRUBlk : public CacheBlk
84{
85 public:
86 FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
87
88 /** The previous block in LRU order. */
89 FALRUBlk *prev;
90 /** The next block in LRU order. */
91 FALRUBlk *next;
92
93 /** A bit mask of the caches that fit this block. */
94 CachesMask inCachesMask;
95};
96
97/**
98 * A fully associative LRU cache. Keeps statistics for accesses to a number of
99 * cache sizes at once.
100 */
101class FALRU : public BaseTags
102{
103 public:
104 /** Typedef the block type used in this class. */
105 typedef FALRUBlk BlkType;
106
107 protected:
108 /** The cache blocks. */
109 FALRUBlk *blks;
110
111 /** The MRU block. */
112 FALRUBlk *head;
113 /** The LRU block. */
114 FALRUBlk *tail;
115
116 /** Hash table type mapping addresses to cache block pointers. */
117 struct PairHash
118 {
119 template <class T1, class T2>
120 std::size_t operator()(const std::pair<T1, T2> &p) const
121 {
122 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
123 }
124 };
125 typedef std::pair<Addr, bool> TagHashKey;
126 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
127
128 /** The address hash table. */
129 TagHash tagHash;
130
131 /**
132 * Move a cache block to the MRU position.
133 *
134 * @param blk The block to promote.
135 */
136 void moveToHead(FALRUBlk *blk);
137
138 /**
139 * Move a cache block to the LRU position.
140 *
141 * @param blk The block to demote.
142 */
143 void moveToTail(FALRUBlk *blk);
144
145 public:
146 typedef FALRUParams Params;
147
148 /**
149 * Construct and initialize this cache tagstore.
150 */
151 FALRU(const Params *p);
152 ~FALRU();
153
154 /**
155 * Register the stats for this object.
156 */
157 void regStats() override;
158
159 /**
160 * Invalidate a cache block.
161 * @param blk The block to invalidate.
162 */
163 void invalidate(CacheBlk *blk) override;
164
165 /**
166 * Access block and update replacement data. May not succeed, in which
167 * case nullptr pointer is returned. This has all the implications of a
168 * cache access and should only be used as such.
169 * Returns the access latency and inCachesMask flags as a side effect.
170 * @param addr The address to look for.
171 * @param is_secure True if the target memory space is secure.
172 * @param lat The latency of the access.
173 * @param in_cache_mask Mask indicating the caches in which the blk fits.
174 * @return Pointer to the cache block.
175 */
176 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
177 CachesMask *in_cache_mask);
178
179 /**
180 * Just a wrapper of above function to conform with the base interface.
181 */
182 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
183
184 /**
185 * Find the block in the cache, do not update the replacement data.
186 * @param addr The address to look for.
187 * @param is_secure True if the target memory space is secure.
188 * @param asid The address space ID.
189 * @return Pointer to the cache block.
190 */
191 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
192
193 /**
194 * Find a block given set and way.
195 *
196 * @param set The set of the block.
197 * @param way The way of the block.
198 * @return The block.
199 */
200 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
201
202 /**
203 * Find replacement victim based on address. The list of evicted blocks
204 * only contains the victim.
205 *
206 * @param addr Address to find a victim for.
207 * @param is_secure True if the target memory space is secure.
208 * @param evict_blks Cache blocks to be evicted.
209 * @return Cache block to be replaced.
210 */
211 CacheBlk* findVictim(Addr addr, const bool is_secure,
212 std::vector<CacheBlk*>& evict_blks) const override;
213
214 /**
215 * Insert the new block into the cache and update replacement data.
216 *
217 * @param pkt Packet holding the address to update
218 * @param blk The block to update.
219 */
220 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
221
222 /**
223 * Generate the tag from the addres. For fully associative this is just the
224 * block address.
225 * @param addr The address to get the tag from.
226 * @return The tag.
227 */
228 Addr extractTag(Addr addr) const override
229 {
230 return blkAlign(addr);
231 }
232
233 /**
234 * Regenerate the block address from the tag.
235 *
236 * @param block The block.
237 * @return the block address.
238 */
239 Addr regenerateBlkAddr(const CacheBlk* blk) const override
240 {
241 return blk->tag;
242 }
243
244 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
245 for (int i = 0; i < numBlocks; i++) {
246 visitor(blks[i]);
247 }
248 }
249
250 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
251 for (int i = 0; i < numBlocks; i++) {
252 if (visitor(blks[i])) {
253 return true;
254 }
255 }
256 return false;
257 }
258
259 private:
260 /**
261 * Mechanism that allows us to simultaneously collect miss
262 * statistics for multiple caches. Currently, we keep track of
263 * caches from a set minimum size of interest up to the actual
264 * cache size.
265 */
266 class CacheTracking
267 {
268 public:
269 CacheTracking(unsigned min_size, unsigned max_size,
270 unsigned block_size)
271 : blkSize(block_size),
272 minTrackedSize(min_size),
273 numTrackedCaches(max_size > min_size ?
274 floorLog2(max_size) - floorLog2(min_size) : 0),
275 inAllCachesMask(mask(numTrackedCaches)),
275 boundaries(new FALRUBlk *[numTrackedCaches])
276 boundaries(numTrackedCaches)
276 {
277 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
278 "Not enough bits (%s) in type CachesMask type to keep "
279 "track of %d caches\n", sizeof(CachesMask),
280 numTrackedCaches);
281 }
282
277 {
278 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
279 "Not enough bits (%s) in type CachesMask type to keep "
280 "track of %d caches\n", sizeof(CachesMask),
281 numTrackedCaches);
282 }
283
283 ~CacheTracking()
284 {
285 delete[] boundaries;
286 }
287
288 /**
289 * Initialiaze cache blocks and the tracking mechanism
290 *
291 * All blocks in the cache need to be initialized once.
292 *
293 * @param blk the MRU block
294 * @param blk the LRU block
295 */
296 void init(FALRUBlk *head, FALRUBlk *tail);
297
298 /**
299 * Update boundaries as a block will be moved to the MRU.
300 *
301 * For all caches that didn't fit the block before moving it,
302 * we move their boundaries one block closer to the MRU. We
303 * also update InCacheMasks as neccessary.
304 *
305 * @param blk the block that will be moved to the head
306 */
307 void moveBlockToHead(FALRUBlk *blk);
308
309 /**
310 * Update boundaries as a block will be moved to the LRU.
311 *
312 * For all caches that fitted the block before moving it, we
313 * move their boundaries one block closer to the LRU. We
314 * also update InCacheMasks as neccessary.
315 *
316 * @param blk the block that will be moved to the head
317 */
318 void moveBlockToTail(FALRUBlk *blk);
319
320 /**
321 * Notify of a block access.
322 *
323 * This should be called every time a block is accessed and it
324 * updates statistics. If the input block is nullptr then we
325 * treat the access as a miss. The block's InCacheMask
326 * determines the caches in which the block fits.
327 *
328 * @param blk the block to record the access for
329 */
330 void recordAccess(FALRUBlk *blk);
331
332 /**
333 * Check that the tracking mechanism is in consistent state.
334 *
335 * Iterate from the head (MRU) to the tail (LRU) of the list
336 * of blocks and assert the inCachesMask and the boundaries
337 * are in consistent state.
338 *
339 * @param head the MRU block of the actual cache
340 * @param head the LRU block of the actual cache
341 */
342 void check(FALRUBlk *head, FALRUBlk *tail);
343
344 /**
345 * Register the stats for this object.
346 */
347 void regStats(std::string name);
348
349 private:
350 /** The size of the cache block */
351 const unsigned blkSize;
352 /** The smallest cache we are tracking */
353 const unsigned minTrackedSize;
354 /** The number of different size caches being tracked. */
355 const int numTrackedCaches;
356 /** A mask for all cache being tracked. */
357 const CachesMask inAllCachesMask;
358 /** Array of pointers to blocks at the cache boundaries. */
284 /**
285 * Initialiaze cache blocks and the tracking mechanism
286 *
287 * All blocks in the cache need to be initialized once.
288 *
289 * @param blk the MRU block
290 * @param blk the LRU block
291 */
292 void init(FALRUBlk *head, FALRUBlk *tail);
293
294 /**
295 * Update boundaries as a block will be moved to the MRU.
296 *
297 * For all caches that didn't fit the block before moving it,
298 * we move their boundaries one block closer to the MRU. We
299 * also update InCacheMasks as neccessary.
300 *
301 * @param blk the block that will be moved to the head
302 */
303 void moveBlockToHead(FALRUBlk *blk);
304
305 /**
306 * Update boundaries as a block will be moved to the LRU.
307 *
308 * For all caches that fitted the block before moving it, we
309 * move their boundaries one block closer to the LRU. We
310 * also update InCacheMasks as neccessary.
311 *
312 * @param blk the block that will be moved to the head
313 */
314 void moveBlockToTail(FALRUBlk *blk);
315
316 /**
317 * Notify of a block access.
318 *
319 * This should be called every time a block is accessed and it
320 * updates statistics. If the input block is nullptr then we
321 * treat the access as a miss. The block's InCacheMask
322 * determines the caches in which the block fits.
323 *
324 * @param blk the block to record the access for
325 */
326 void recordAccess(FALRUBlk *blk);
327
328 /**
329 * Check that the tracking mechanism is in consistent state.
330 *
331 * Iterate from the head (MRU) to the tail (LRU) of the list
332 * of blocks and assert the inCachesMask and the boundaries
333 * are in consistent state.
334 *
335 * @param head the MRU block of the actual cache
336 * @param head the LRU block of the actual cache
337 */
338 void check(FALRUBlk *head, FALRUBlk *tail);
339
340 /**
341 * Register the stats for this object.
342 */
343 void regStats(std::string name);
344
345 private:
346 /** The size of the cache block */
347 const unsigned blkSize;
348 /** The smallest cache we are tracking */
349 const unsigned minTrackedSize;
350 /** The number of different size caches being tracked. */
351 const int numTrackedCaches;
352 /** A mask for all cache being tracked. */
353 const CachesMask inAllCachesMask;
354 /** Array of pointers to blocks at the cache boundaries. */
359 FALRUBlk** boundaries;
355 std::vector<FALRUBlk*> boundaries;
360
361 protected:
362 /**
363 * @defgroup FALRUStats Fully Associative LRU specific statistics
364 * The FA lru stack lets us track multiple cache sizes at once. These
365 * statistics track the hits and misses for different cache sizes.
366 * @{
367 */
368
369 /** Hits in each cache */
370 Stats::Vector hits;
371 /** Misses in each cache */
372 Stats::Vector misses;
373 /** Total number of accesses */
374 Stats::Scalar accesses;
375
376 /**
377 * @}
378 */
379 };
380 CacheTracking cacheTracking;
381};
382
383#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
356
357 protected:
358 /**
359 * @defgroup FALRUStats Fully Associative LRU specific statistics
360 * The FA lru stack lets us track multiple cache sizes at once. These
361 * statistics track the hits and misses for different cache sizes.
362 * @{
363 */
364
365 /** Hits in each cache */
366 Stats::Vector hits;
367 /** Misses in each cache */
368 Stats::Vector misses;
369 /** Total number of accesses */
370 Stats::Scalar accesses;
371
372 /**
373 * @}
374 */
375 };
376 CacheTracking cacheTracking;
377};
378
379#endif // __MEM_CACHE_TAGS_FA_LRU_HH__