fa_lru.hh (12727:56c23b54bcb1) fa_lru.hh (12728:57bdea4f96aa)
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
53#include <string>
54#include <unordered_map>
55
56#include "base/bitfield.hh"
57#include "base/intmath.hh"
58#include "base/logging.hh"
59#include "base/statistics.hh"
60#include "base/types.hh"
61#include "mem/cache/blk.hh"
62#include "mem/cache/tags/base.hh"
63#include "mem/packet.hh"
64#include "params/FALRU.hh"
65
66// Uncomment to enable sanity checks for the FALRU cache and the
67// TrackedCaches class
68//#define FALRU_DEBUG
69
70// A bitmask of the caches we are keeping track of. Currently the
71// lowest bit is the smallest cache we are tracking, as it is
72// specified by the corresponding parameter. The rest of the bits are
73// for exponentially growing cache sizes.
74typedef uint32_t CachesMask;
75
76/**
77 * A fully associative cache block.
78 */
79class FALRUBlk : public CacheBlk
80{
81 public:
82 /** The previous block in LRU order. */
83 FALRUBlk *prev;
84 /** The next block in LRU order. */
85 FALRUBlk *next;
86
87 /** A bit mask of the caches that fit this block. */
88 CachesMask inCachesMask;
89};
90
91/**
92 * A fully associative LRU cache. Keeps statistics for accesses to a number of
93 * cache sizes at once.
94 */
95class FALRU : public BaseTags
96{
97 public:
98 /** Typedef the block type used in this class. */
99 typedef FALRUBlk BlkType;
100
101 protected:
102 /** The cache blocks. */
103 FALRUBlk *blks;
104
105 /** The MRU block. */
106 FALRUBlk *head;
107 /** The LRU block. */
108 FALRUBlk *tail;
109
110 /** Hash table type mapping addresses to cache block pointers. */
111 typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
112 /** Iterator into the address hash table. */
113 typedef hash_t::const_iterator tagIterator;
114
115 /** The address hash table. */
116 hash_t tagHash;
117
118 /**
119 * Find the cache block for the given address.
120 * @param addr The address to find.
121 * @return The cache block of the address, if any.
122 */
123 FALRUBlk * hashLookup(Addr addr) const;
124
125 /**
126 * Move a cache block to the MRU position.
127 *
128 * @param blk The block to promote.
129 */
130 void moveToHead(FALRUBlk *blk);
131
132 /**
133 * Move a cache block to the LRU position.
134 *
135 * @param blk The block to demote.
136 */
137 void moveToTail(FALRUBlk *blk);
138
139 public:
140 typedef FALRUParams Params;
141
142 /**
143 * Construct and initialize this cache tagstore.
144 */
145 FALRU(const Params *p);
146 ~FALRU();
147
148 /**
149 * Register the stats for this object.
150 */
151 void regStats() override;
152
153 /**
154 * Invalidate a cache block.
155 * @param blk The block to invalidate.
156 */
157 void invalidate(CacheBlk *blk) override;
158
159 /**
160 * Access block and update replacement data. May not succeed, in which
161 * case nullptr pointer is returned. This has all the implications of a
162 * cache access and should only be used as such.
163 * Returns the access latency and inCachesMask flags as a side effect.
164 * @param addr The address to look for.
165 * @param is_secure True if the target memory space is secure.
166 * @param lat The latency of the access.
167 * @param in_cache_mask Mask indicating the caches in which the blk fits.
168 * @return Pointer to the cache block.
169 */
170 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
171 CachesMask *in_cache_mask);
172
173 /**
174 * Just a wrapper of above function to conform with the base interface.
175 */
176 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
177
178 /**
179 * Find the block in the cache, do not update the replacement data.
180 * @param addr The address to look for.
181 * @param is_secure True if the target memory space is secure.
182 * @param asid The address space ID.
183 * @return Pointer to the cache block.
184 */
185 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
186
187 /**
188 * Find replacement victim based on address.
189 *
190 * @param addr Address to find a victim for.
191 * @return Cache block to be replaced.
192 */
193 CacheBlk* findVictim(Addr addr) override;
194
195 /**
196 * Insert the new block into the cache and update replacement data.
197 *
198 * @param pkt Packet holding the address to update
199 * @param blk The block to update.
200 */
201 void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
202
203 /**
204 * Find the cache block given set and way
205 * @param set The set of the block.
206 * @param way The way of the block.
207 * @return The cache block.
208 */
209 CacheBlk* findBlockBySetAndWay(int set, int way) const override;
210
211 /**
212 * Generate the tag from the addres. For fully associative this is just the
213 * block address.
214 * @param addr The address to get the tag from.
215 * @return The tag.
216 */
217 Addr extractTag(Addr addr) const override
218 {
219 return blkAlign(addr);
220 }
221
222 /**
223 * Return the set of an address. Only one set in a fully associative cache.
224 * @param addr The address to get the set from.
225 * @return 0.
226 */
227 int extractSet(Addr addr) const override
228 {
229 return 0;
230 }
231
232 /**
233 * Regenerate the block address from the tag.
234 *
235 * @param block The block.
236 * @return the block address.
237 */
238 Addr regenerateBlkAddr(const CacheBlk* blk) const override
239 {
240 return blk->tag;
241 }
242
54#include <string>
55#include <unordered_map>
56
57#include "base/bitfield.hh"
58#include "base/intmath.hh"
59#include "base/logging.hh"
60#include "base/statistics.hh"
61#include "base/types.hh"
62#include "mem/cache/blk.hh"
63#include "mem/cache/tags/base.hh"
64#include "mem/packet.hh"
65#include "params/FALRU.hh"
66
67// Uncomment to enable sanity checks for the FALRU cache and the
68// TrackedCaches class
69//#define FALRU_DEBUG
70
71// A bitmask of the caches we are keeping track of. Currently the
72// lowest bit is the smallest cache we are tracking, as it is
73// specified by the corresponding parameter. The rest of the bits are
74// for exponentially growing cache sizes.
75typedef uint32_t CachesMask;
76
77/**
78 * A fully associative cache block.
79 */
80class FALRUBlk : public CacheBlk
81{
82 public:
83 /** The previous block in LRU order. */
84 FALRUBlk *prev;
85 /** The next block in LRU order. */
86 FALRUBlk *next;
87
88 /** A bit mask of the caches that fit this block. */
89 CachesMask inCachesMask;
90};
91
92/**
93 * A fully associative LRU cache. Keeps statistics for accesses to a number of
94 * cache sizes at once.
95 */
96class FALRU : public BaseTags
97{
98 public:
99 /** Typedef the block type used in this class. */
100 typedef FALRUBlk BlkType;
101
102 protected:
103 /** The cache blocks. */
104 FALRUBlk *blks;
105
106 /** The MRU block. */
107 FALRUBlk *head;
108 /** The LRU block. */
109 FALRUBlk *tail;
110
111 /** Hash table type mapping addresses to cache block pointers. */
112 typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
113 /** Iterator into the address hash table. */
114 typedef hash_t::const_iterator tagIterator;
115
116 /** The address hash table. */
117 hash_t tagHash;
118
119 /**
120 * Find the cache block for the given address.
121 * @param addr The address to find.
122 * @return The cache block of the address, if any.
123 */
124 FALRUBlk * hashLookup(Addr addr) const;
125
126 /**
127 * Move a cache block to the MRU position.
128 *
129 * @param blk The block to promote.
130 */
131 void moveToHead(FALRUBlk *blk);
132
133 /**
134 * Move a cache block to the LRU position.
135 *
136 * @param blk The block to demote.
137 */
138 void moveToTail(FALRUBlk *blk);
139
140 public:
141 typedef FALRUParams Params;
142
143 /**
144 * Construct and initialize this cache tagstore.
145 */
146 FALRU(const Params *p);
147 ~FALRU();
148
149 /**
150 * Register the stats for this object.
151 */
152 void regStats() override;
153
154 /**
155 * Invalidate a cache block.
156 * @param blk The block to invalidate.
157 */
158 void invalidate(CacheBlk *blk) override;
159
160 /**
161 * Access block and update replacement data. May not succeed, in which
162 * case nullptr pointer is returned. This has all the implications of a
163 * cache access and should only be used as such.
164 * Returns the access latency and inCachesMask flags as a side effect.
165 * @param addr The address to look for.
166 * @param is_secure True if the target memory space is secure.
167 * @param lat The latency of the access.
168 * @param in_cache_mask Mask indicating the caches in which the blk fits.
169 * @return Pointer to the cache block.
170 */
171 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
172 CachesMask *in_cache_mask);
173
174 /**
175 * Just a wrapper of above function to conform with the base interface.
176 */
177 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
178
179 /**
180 * Find the block in the cache, do not update the replacement data.
181 * @param addr The address to look for.
182 * @param is_secure True if the target memory space is secure.
183 * @param asid The address space ID.
184 * @return Pointer to the cache block.
185 */
186 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
187
188 /**
189 * Find replacement victim based on address.
190 *
191 * @param addr Address to find a victim for.
192 * @return Cache block to be replaced.
193 */
194 CacheBlk* findVictim(Addr addr) override;
195
196 /**
197 * Insert the new block into the cache and update replacement data.
198 *
199 * @param pkt Packet holding the address to update
200 * @param blk The block to update.
201 */
202 void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
203
204 /**
205 * Find the cache block given set and way
206 * @param set The set of the block.
207 * @param way The way of the block.
208 * @return The cache block.
209 */
210 CacheBlk* findBlockBySetAndWay(int set, int way) const override;
211
212 /**
213 * Generate the tag from the addres. For fully associative this is just the
214 * block address.
215 * @param addr The address to get the tag from.
216 * @return The tag.
217 */
218 Addr extractTag(Addr addr) const override
219 {
220 return blkAlign(addr);
221 }
222
223 /**
224 * Return the set of an address. Only one set in a fully associative cache.
225 * @param addr The address to get the set from.
226 * @return 0.
227 */
228 int extractSet(Addr addr) const override
229 {
230 return 0;
231 }
232
233 /**
234 * Regenerate the block address from the tag.
235 *
236 * @param block The block.
237 * @return the block address.
238 */
239 Addr regenerateBlkAddr(const CacheBlk* blk) const override
240 {
241 return blk->tag;
242 }
243
243 /**
244 * @todo Implement as in lru. Currently not used
245 */
246 virtual std::string print() const override { return ""; }
244 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
245 for (int i = 0; i < numBlocks; i++) {
246 visitor(blks[i]);
247 }
248 }
247
249
248 /**
249 * Visit each block in the tag store and apply a visitor to the
250 * block.
251 *
252 * The visitor should be a function (or object that behaves like a
253 * function) that takes a cache block reference as its parameter
254 * and returns a bool. A visitor can request the traversal to be
255 * stopped by returning false, returning true causes it to be
256 * called for the next block in the tag store.
257 *
258 * \param visitor Visitor to call on each block.
259 */
260 void forEachBlk(CacheBlkVisitor &visitor) override {
250 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
261 for (int i = 0; i < numBlocks; i++) {
251 for (int i = 0; i < numBlocks; i++) {
262 if (!visitor(blks[i]))
263 return;
252 if (visitor(blks[i])) {
253 return true;
254 }
264 }
255 }
256 return false;
265 }
266
267 private:
268 /**
269 * Mechanism that allows us to simultaneously collect miss
270 * statistics for multiple caches. Currently, we keep track of
271 * caches from a set minimum size of interest up to the actual
272 * cache size.
273 */
274 class CacheTracking
275 {
276 public:
277 CacheTracking(unsigned min_size, unsigned max_size,
278 unsigned block_size)
279 : blkSize(block_size),
280 minTrackedSize(min_size),
281 numTrackedCaches(max_size > min_size ?
282 floorLog2(max_size) - floorLog2(min_size) : 0),
283 inAllCachesMask(mask(numTrackedCaches)),
284 boundaries(new FALRUBlk *[numTrackedCaches])
285 {
286 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
287 "Not enough bits (%s) in type CachesMask type to keep "
288 "track of %d caches\n", sizeof(CachesMask),
289 numTrackedCaches);
290 }
291
292 ~CacheTracking()
293 {
294 delete[] boundaries;
295 }
296
297 /**
298 * Initialiaze cache blocks and the tracking mechanism
299 *
300 * All blocks in the cache need to be initialized once.
301 *
302 * @param blk the MRU block
303 * @param blk the LRU block
304 */
305 void init(FALRUBlk *head, FALRUBlk *tail);
306
307 /**
308 * Update boundaries as a block will be moved to the MRU.
309 *
310 * For all caches that didn't fit the block before moving it,
311 * we move their boundaries one block closer to the MRU. We
312 * also update InCacheMasks as neccessary.
313 *
314 * @param blk the block that will be moved to the head
315 */
316 void moveBlockToHead(FALRUBlk *blk);
317
318 /**
319 * Update boundaries as a block will be moved to the LRU.
320 *
321 * For all caches that fitted the block before moving it, we
322 * move their boundaries one block closer to the LRU. We
323 * also update InCacheMasks as neccessary.
324 *
325 * @param blk the block that will be moved to the head
326 */
327 void moveBlockToTail(FALRUBlk *blk);
328
329 /**
330 * Notify of a block access.
331 *
332 * This should be called every time a block is accessed and it
333 * updates statistics. If the input block is nullptr then we
334 * treat the access as a miss. The block's InCacheMask
335 * determines the caches in which the block fits.
336 *
337 * @param blk the block to record the access for
338 */
339 void recordAccess(FALRUBlk *blk);
340
341 /**
342 * Check that the tracking mechanism is in consistent state.
343 *
344 * Iterate from the head (MRU) to the tail (LRU) of the list
345 * of blocks and assert the inCachesMask and the boundaries
346 * are in consistent state.
347 *
348 * @param head the MRU block of the actual cache
349 * @param head the LRU block of the actual cache
350 */
351 void check(FALRUBlk *head, FALRUBlk *tail);
352
353 /**
354 * Register the stats for this object.
355 */
356 void regStats(std::string name);
357
358 private:
359 /** The size of the cache block */
360 const unsigned blkSize;
361 /** The smallest cache we are tracking */
362 const unsigned minTrackedSize;
363 /** The number of different size caches being tracked. */
364 const int numTrackedCaches;
365 /** A mask for all cache being tracked. */
366 const CachesMask inAllCachesMask;
367 /** Array of pointers to blocks at the cache boundaries. */
368 FALRUBlk** boundaries;
369
370 protected:
371 /**
372 * @defgroup FALRUStats Fully Associative LRU specific statistics
373 * The FA lru stack lets us track multiple cache sizes at once. These
374 * statistics track the hits and misses for different cache sizes.
375 * @{
376 */
377
378 /** Hits in each cache */
379 Stats::Vector hits;
380 /** Misses in each cache */
381 Stats::Vector misses;
382 /** Total number of accesses */
383 Stats::Scalar accesses;
384
385 /**
386 * @}
387 */
388 };
389 CacheTracking cacheTracking;
390};
391
392#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
257 }
258
259 private:
260 /**
261 * Mechanism that allows us to simultaneously collect miss
262 * statistics for multiple caches. Currently, we keep track of
263 * caches from a set minimum size of interest up to the actual
264 * cache size.
265 */
266 class CacheTracking
267 {
268 public:
269 CacheTracking(unsigned min_size, unsigned max_size,
270 unsigned block_size)
271 : blkSize(block_size),
272 minTrackedSize(min_size),
273 numTrackedCaches(max_size > min_size ?
274 floorLog2(max_size) - floorLog2(min_size) : 0),
275 inAllCachesMask(mask(numTrackedCaches)),
276 boundaries(new FALRUBlk *[numTrackedCaches])
277 {
278 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
279 "Not enough bits (%s) in type CachesMask type to keep "
280 "track of %d caches\n", sizeof(CachesMask),
281 numTrackedCaches);
282 }
283
284 ~CacheTracking()
285 {
286 delete[] boundaries;
287 }
288
289 /**
290 * Initialiaze cache blocks and the tracking mechanism
291 *
292 * All blocks in the cache need to be initialized once.
293 *
294 * @param blk the MRU block
295 * @param blk the LRU block
296 */
297 void init(FALRUBlk *head, FALRUBlk *tail);
298
299 /**
300 * Update boundaries as a block will be moved to the MRU.
301 *
302 * For all caches that didn't fit the block before moving it,
303 * we move their boundaries one block closer to the MRU. We
304 * also update InCacheMasks as neccessary.
305 *
306 * @param blk the block that will be moved to the head
307 */
308 void moveBlockToHead(FALRUBlk *blk);
309
310 /**
311 * Update boundaries as a block will be moved to the LRU.
312 *
313 * For all caches that fitted the block before moving it, we
314 * move their boundaries one block closer to the LRU. We
315 * also update InCacheMasks as neccessary.
316 *
317 * @param blk the block that will be moved to the head
318 */
319 void moveBlockToTail(FALRUBlk *blk);
320
321 /**
322 * Notify of a block access.
323 *
324 * This should be called every time a block is accessed and it
325 * updates statistics. If the input block is nullptr then we
326 * treat the access as a miss. The block's InCacheMask
327 * determines the caches in which the block fits.
328 *
329 * @param blk the block to record the access for
330 */
331 void recordAccess(FALRUBlk *blk);
332
333 /**
334 * Check that the tracking mechanism is in consistent state.
335 *
336 * Iterate from the head (MRU) to the tail (LRU) of the list
337 * of blocks and assert the inCachesMask and the boundaries
338 * are in consistent state.
339 *
340 * @param head the MRU block of the actual cache
341 * @param head the LRU block of the actual cache
342 */
343 void check(FALRUBlk *head, FALRUBlk *tail);
344
345 /**
346 * Register the stats for this object.
347 */
348 void regStats(std::string name);
349
350 private:
351 /** The size of the cache block */
352 const unsigned blkSize;
353 /** The smallest cache we are tracking */
354 const unsigned minTrackedSize;
355 /** The number of different size caches being tracked. */
356 const int numTrackedCaches;
357 /** A mask for all cache being tracked. */
358 const CachesMask inAllCachesMask;
359 /** Array of pointers to blocks at the cache boundaries. */
360 FALRUBlk** boundaries;
361
362 protected:
363 /**
364 * @defgroup FALRUStats Fully Associative LRU specific statistics
365 * The FA lru stack lets us track multiple cache sizes at once. These
366 * statistics track the hits and misses for different cache sizes.
367 * @{
368 */
369
370 /** Hits in each cache */
371 Stats::Vector hits;
372 /** Misses in each cache */
373 Stats::Vector misses;
374 /** Total number of accesses */
375 Stats::Scalar accesses;
376
377 /**
378 * @}
379 */
380 };
381 CacheTracking cacheTracking;
382};
383
384#endif // __MEM_CACHE_TAGS_FA_LRU_HH__