fa_lru.hh (13752:135bb759ee9c) fa_lru.hh (13941:2c19da00ef9c)
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56#include <vector>
57
58#include "base/bitfield.hh"
59#include "base/intmath.hh"
60#include "base/logging.hh"
61#include "base/statistics.hh"
62#include "base/types.hh"
63#include "mem/cache/cache_blk.hh"
64#include "mem/cache/tags/base.hh"
65#include "mem/packet.hh"
66#include "params/FALRU.hh"
67
68// Uncomment to enable sanity checks for the FALRU cache and the
69// TrackedCaches class
70//#define FALRU_DEBUG
71
72class BaseCache;
73class ReplaceableEntry;
74
75// A bitmask of the caches we are keeping track of. Currently the
76// lowest bit is the smallest cache we are tracking, as it is
77// specified by the corresponding parameter. The rest of the bits are
78// for exponentially growing cache sizes.
79typedef uint32_t CachesMask;
80
81/**
82 * A fully associative cache block.
83 */
84class FALRUBlk : public CacheBlk
85{
86 public:
87 FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
88
89 /** The previous block in LRU order. */
90 FALRUBlk *prev;
91 /** The next block in LRU order. */
92 FALRUBlk *next;
93
94 /** A bit mask of the caches that fit this block. */
95 CachesMask inCachesMask;
96
97 /**
98 * Pretty-print inCachesMask and other CacheBlk information.
99 *
100 * @return string with basic state information
101 */
102 std::string print() const override;
103};
104
105/**
106 * A fully associative LRU cache. Keeps statistics for accesses to a number of
107 * cache sizes at once.
108 */
109class FALRU : public BaseTags
110{
111 public:
112 /** Typedef the block type used in this class. */
113 typedef FALRUBlk BlkType;
114
115 protected:
116 /** The cache blocks. */
117 FALRUBlk *blks;
118
119 /** The MRU block. */
120 FALRUBlk *head;
121 /** The LRU block. */
122 FALRUBlk *tail;
123
124 /** Hash table type mapping addresses to cache block pointers. */
125 struct PairHash
126 {
127 template <class T1, class T2>
128 std::size_t operator()(const std::pair<T1, T2> &p) const
129 {
130 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
131 }
132 };
133 typedef std::pair<Addr, bool> TagHashKey;
134 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
135
136 /** The address hash table. */
137 TagHash tagHash;
138
139 /**
140 * Move a cache block to the MRU position.
141 *
142 * @param blk The block to promote.
143 */
144 void moveToHead(FALRUBlk *blk);
145
146 /**
147 * Move a cache block to the LRU position.
148 *
149 * @param blk The block to demote.
150 */
151 void moveToTail(FALRUBlk *blk);
152
153 public:
154 typedef FALRUParams Params;
155
156 /**
157 * Construct and initialize this cache tagstore.
158 */
159 FALRU(const Params *p);
160 ~FALRU();
161
162 /**
163 * Initialize blocks as FALRUBlk instances.
164 */
165 void tagsInit() override;
166
167 /**
168 * Register the stats for this object.
169 */
170 void regStats() override;
171
172 /**
173 * Invalidate a cache block.
174 * @param blk The block to invalidate.
175 */
176 void invalidate(CacheBlk *blk) override;
177
178 /**
179 * Access block and update replacement data. May not succeed, in which
180 * case nullptr pointer is returned. This has all the implications of a
181 * cache access and should only be used as such.
182 * Returns tag lookup latency and the inCachesMask flags as a side effect.
183 *
184 * @param addr The address to look for.
185 * @param is_secure True if the target memory space is secure.
186 * @param lat The latency of the tag lookup.
187 * @param in_cache_mask Mask indicating the caches in which the blk fits.
188 * @return Pointer to the cache block.
189 */
190 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
191 CachesMask *in_cache_mask);
192
193 /**
194 * Just a wrapper of above function to conform with the base interface.
195 */
196 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
197
198 /**
199 * Find the block in the cache, do not update the replacement data.
200 * @param addr The address to look for.
201 * @param is_secure True if the target memory space is secure.
202 * @param asid The address space ID.
203 * @return Pointer to the cache block.
204 */
205 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
206
207 /**
208 * Find a block given set and way.
209 *
210 * @param set The set of the block.
211 * @param way The way of the block.
212 * @return The block.
213 */
214 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
215
216 /**
217 * Find replacement victim based on address. The list of evicted blocks
218 * only contains the victim.
219 *
220 * @param addr Address to find a victim for.
221 * @param is_secure True if the target memory space is secure.
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <cstdint>
53#include <functional>
54#include <string>
55#include <unordered_map>
56#include <vector>
57
58#include "base/bitfield.hh"
59#include "base/intmath.hh"
60#include "base/logging.hh"
61#include "base/statistics.hh"
62#include "base/types.hh"
63#include "mem/cache/cache_blk.hh"
64#include "mem/cache/tags/base.hh"
65#include "mem/packet.hh"
66#include "params/FALRU.hh"
67
68// Uncomment to enable sanity checks for the FALRU cache and the
69// TrackedCaches class
70//#define FALRU_DEBUG
71
72class BaseCache;
73class ReplaceableEntry;
74
75// A bitmask of the caches we are keeping track of. Currently the
76// lowest bit is the smallest cache we are tracking, as it is
77// specified by the corresponding parameter. The rest of the bits are
78// for exponentially growing cache sizes.
79typedef uint32_t CachesMask;
80
81/**
82 * A fully associative cache block.
83 */
84class FALRUBlk : public CacheBlk
85{
86 public:
87 FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {}
88
89 /** The previous block in LRU order. */
90 FALRUBlk *prev;
91 /** The next block in LRU order. */
92 FALRUBlk *next;
93
94 /** A bit mask of the caches that fit this block. */
95 CachesMask inCachesMask;
96
97 /**
98 * Pretty-print inCachesMask and other CacheBlk information.
99 *
100 * @return string with basic state information
101 */
102 std::string print() const override;
103};
104
105/**
106 * A fully associative LRU cache. Keeps statistics for accesses to a number of
107 * cache sizes at once.
108 */
109class FALRU : public BaseTags
110{
111 public:
112 /** Typedef the block type used in this class. */
113 typedef FALRUBlk BlkType;
114
115 protected:
116 /** The cache blocks. */
117 FALRUBlk *blks;
118
119 /** The MRU block. */
120 FALRUBlk *head;
121 /** The LRU block. */
122 FALRUBlk *tail;
123
124 /** Hash table type mapping addresses to cache block pointers. */
125 struct PairHash
126 {
127 template <class T1, class T2>
128 std::size_t operator()(const std::pair<T1, T2> &p) const
129 {
130 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second);
131 }
132 };
133 typedef std::pair<Addr, bool> TagHashKey;
134 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash;
135
136 /** The address hash table. */
137 TagHash tagHash;
138
139 /**
140 * Move a cache block to the MRU position.
141 *
142 * @param blk The block to promote.
143 */
144 void moveToHead(FALRUBlk *blk);
145
146 /**
147 * Move a cache block to the LRU position.
148 *
149 * @param blk The block to demote.
150 */
151 void moveToTail(FALRUBlk *blk);
152
153 public:
154 typedef FALRUParams Params;
155
156 /**
157 * Construct and initialize this cache tagstore.
158 */
159 FALRU(const Params *p);
160 ~FALRU();
161
162 /**
163 * Initialize blocks as FALRUBlk instances.
164 */
165 void tagsInit() override;
166
167 /**
168 * Register the stats for this object.
169 */
170 void regStats() override;
171
172 /**
173 * Invalidate a cache block.
174 * @param blk The block to invalidate.
175 */
176 void invalidate(CacheBlk *blk) override;
177
178 /**
179 * Access block and update replacement data. May not succeed, in which
180 * case nullptr pointer is returned. This has all the implications of a
181 * cache access and should only be used as such.
182 * Returns tag lookup latency and the inCachesMask flags as a side effect.
183 *
184 * @param addr The address to look for.
185 * @param is_secure True if the target memory space is secure.
186 * @param lat The latency of the tag lookup.
187 * @param in_cache_mask Mask indicating the caches in which the blk fits.
188 * @return Pointer to the cache block.
189 */
190 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
191 CachesMask *in_cache_mask);
192
193 /**
194 * Just a wrapper of above function to conform with the base interface.
195 */
196 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
197
198 /**
199 * Find the block in the cache, do not update the replacement data.
200 * @param addr The address to look for.
201 * @param is_secure True if the target memory space is secure.
202 * @param asid The address space ID.
203 * @return Pointer to the cache block.
204 */
205 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
206
207 /**
208 * Find a block given set and way.
209 *
210 * @param set The set of the block.
211 * @param way The way of the block.
212 * @return The block.
213 */
214 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
215
216 /**
217 * Find replacement victim based on address. The list of evicted blocks
218 * only contains the victim.
219 *
220 * @param addr Address to find a victim for.
221 * @param is_secure True if the target memory space is secure.
222 * @param size Size, in bits, of new block to allocate.
222 * @param evict_blks Cache blocks to be evicted.
223 * @return Cache block to be replaced.
224 */
225 CacheBlk* findVictim(Addr addr, const bool is_secure,
223 * @param evict_blks Cache blocks to be evicted.
224 * @return Cache block to be replaced.
225 */
226 CacheBlk* findVictim(Addr addr, const bool is_secure,
227 const std::size_t size,
226 std::vector<CacheBlk*>& evict_blks) const override;
227
228 /**
229 * Insert the new block into the cache and update replacement data.
230 *
231 * @param pkt Packet holding the address to update
232 * @param blk The block to update.
233 */
234 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
235
236 /**
237 * Generate the tag from the addres. For fully associative this is just the
238 * block address.
239 * @param addr The address to get the tag from.
240 * @return The tag.
241 */
242 Addr extractTag(Addr addr) const override
243 {
244 return blkAlign(addr);
245 }
246
247 /**
248 * Regenerate the block address from the tag.
249 *
250 * @param block The block.
251 * @return the block address.
252 */
253 Addr regenerateBlkAddr(const CacheBlk* blk) const override
254 {
255 return blk->tag;
256 }
257
258 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
259 for (int i = 0; i < numBlocks; i++) {
260 visitor(blks[i]);
261 }
262 }
263
264 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
265 for (int i = 0; i < numBlocks; i++) {
266 if (visitor(blks[i])) {
267 return true;
268 }
269 }
270 return false;
271 }
272
273 private:
274 /**
275 * Mechanism that allows us to simultaneously collect miss
276 * statistics for multiple caches. Currently, we keep track of
277 * caches from a set minimum size of interest up to the actual
278 * cache size.
279 */
280 class CacheTracking
281 {
282 public:
283 CacheTracking(unsigned min_size, unsigned max_size,
284 unsigned block_size)
285 : blkSize(block_size),
286 minTrackedSize(min_size),
287 numTrackedCaches(max_size > min_size ?
288 floorLog2(max_size) - floorLog2(min_size) : 0),
289 inAllCachesMask(mask(numTrackedCaches)),
290 boundaries(numTrackedCaches)
291 {
292 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
293 "Not enough bits (%s) in type CachesMask type to keep "
294 "track of %d caches\n", sizeof(CachesMask),
295 numTrackedCaches);
296 }
297
298 /**
299 * Initialiaze cache blocks and the tracking mechanism
300 *
301 * All blocks in the cache need to be initialized once.
302 *
303 * @param blk the MRU block
304 * @param blk the LRU block
305 */
306 void init(FALRUBlk *head, FALRUBlk *tail);
307
308 /**
309 * Update boundaries as a block will be moved to the MRU.
310 *
311 * For all caches that didn't fit the block before moving it,
312 * we move their boundaries one block closer to the MRU. We
313 * also update InCacheMasks as neccessary.
314 *
315 * @param blk the block that will be moved to the head
316 */
317 void moveBlockToHead(FALRUBlk *blk);
318
319 /**
320 * Update boundaries as a block will be moved to the LRU.
321 *
322 * For all caches that fitted the block before moving it, we
323 * move their boundaries one block closer to the LRU. We
324 * also update InCacheMasks as neccessary.
325 *
326 * @param blk the block that will be moved to the head
327 */
328 void moveBlockToTail(FALRUBlk *blk);
329
330 /**
331 * Notify of a block access.
332 *
333 * This should be called every time a block is accessed and it
334 * updates statistics. If the input block is nullptr then we
335 * treat the access as a miss. The block's InCacheMask
336 * determines the caches in which the block fits.
337 *
338 * @param blk the block to record the access for
339 */
340 void recordAccess(FALRUBlk *blk);
341
342 /**
343 * Check that the tracking mechanism is in consistent state.
344 *
345 * Iterate from the head (MRU) to the tail (LRU) of the list
346 * of blocks and assert the inCachesMask and the boundaries
347 * are in consistent state.
348 *
349 * @param head the MRU block of the actual cache
350 * @param head the LRU block of the actual cache
351 */
352 void check(const FALRUBlk *head, const FALRUBlk *tail) const;
353
354 /**
355 * Register the stats for this object.
356 */
357 void regStats(std::string name);
358
359 private:
360 /** The size of the cache block */
361 const unsigned blkSize;
362 /** The smallest cache we are tracking */
363 const unsigned minTrackedSize;
364 /** The number of different size caches being tracked. */
365 const int numTrackedCaches;
366 /** A mask for all cache being tracked. */
367 const CachesMask inAllCachesMask;
368 /** Array of pointers to blocks at the cache boundaries. */
369 std::vector<FALRUBlk*> boundaries;
370
371 protected:
372 /**
373 * @defgroup FALRUStats Fully Associative LRU specific statistics
374 * The FA lru stack lets us track multiple cache sizes at once. These
375 * statistics track the hits and misses for different cache sizes.
376 * @{
377 */
378
379 /** Hits in each cache */
380 Stats::Vector hits;
381 /** Misses in each cache */
382 Stats::Vector misses;
383 /** Total number of accesses */
384 Stats::Scalar accesses;
385
386 /**
387 * @}
388 */
389 };
390 CacheTracking cacheTracking;
391};
392
393#endif // __MEM_CACHE_TAGS_FA_LRU_HH__
228 std::vector<CacheBlk*>& evict_blks) const override;
229
230 /**
231 * Insert the new block into the cache and update replacement data.
232 *
233 * @param pkt Packet holding the address to update
234 * @param blk The block to update.
235 */
236 void insertBlock(const PacketPtr pkt, CacheBlk *blk) override;
237
238 /**
239 * Generate the tag from the addres. For fully associative this is just the
240 * block address.
241 * @param addr The address to get the tag from.
242 * @return The tag.
243 */
244 Addr extractTag(Addr addr) const override
245 {
246 return blkAlign(addr);
247 }
248
249 /**
250 * Regenerate the block address from the tag.
251 *
252 * @param block The block.
253 * @return the block address.
254 */
255 Addr regenerateBlkAddr(const CacheBlk* blk) const override
256 {
257 return blk->tag;
258 }
259
260 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
261 for (int i = 0; i < numBlocks; i++) {
262 visitor(blks[i]);
263 }
264 }
265
266 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
267 for (int i = 0; i < numBlocks; i++) {
268 if (visitor(blks[i])) {
269 return true;
270 }
271 }
272 return false;
273 }
274
275 private:
276 /**
277 * Mechanism that allows us to simultaneously collect miss
278 * statistics for multiple caches. Currently, we keep track of
279 * caches from a set minimum size of interest up to the actual
280 * cache size.
281 */
282 class CacheTracking
283 {
284 public:
285 CacheTracking(unsigned min_size, unsigned max_size,
286 unsigned block_size)
287 : blkSize(block_size),
288 minTrackedSize(min_size),
289 numTrackedCaches(max_size > min_size ?
290 floorLog2(max_size) - floorLog2(min_size) : 0),
291 inAllCachesMask(mask(numTrackedCaches)),
292 boundaries(numTrackedCaches)
293 {
294 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
295 "Not enough bits (%s) in type CachesMask type to keep "
296 "track of %d caches\n", sizeof(CachesMask),
297 numTrackedCaches);
298 }
299
300 /**
301 * Initialiaze cache blocks and the tracking mechanism
302 *
303 * All blocks in the cache need to be initialized once.
304 *
305 * @param blk the MRU block
306 * @param blk the LRU block
307 */
308 void init(FALRUBlk *head, FALRUBlk *tail);
309
310 /**
311 * Update boundaries as a block will be moved to the MRU.
312 *
313 * For all caches that didn't fit the block before moving it,
314 * we move their boundaries one block closer to the MRU. We
315 * also update InCacheMasks as neccessary.
316 *
317 * @param blk the block that will be moved to the head
318 */
319 void moveBlockToHead(FALRUBlk *blk);
320
321 /**
322 * Update boundaries as a block will be moved to the LRU.
323 *
324 * For all caches that fitted the block before moving it, we
325 * move their boundaries one block closer to the LRU. We
326 * also update InCacheMasks as neccessary.
327 *
328 * @param blk the block that will be moved to the head
329 */
330 void moveBlockToTail(FALRUBlk *blk);
331
332 /**
333 * Notify of a block access.
334 *
335 * This should be called every time a block is accessed and it
336 * updates statistics. If the input block is nullptr then we
337 * treat the access as a miss. The block's InCacheMask
338 * determines the caches in which the block fits.
339 *
340 * @param blk the block to record the access for
341 */
342 void recordAccess(FALRUBlk *blk);
343
344 /**
345 * Check that the tracking mechanism is in consistent state.
346 *
347 * Iterate from the head (MRU) to the tail (LRU) of the list
348 * of blocks and assert the inCachesMask and the boundaries
349 * are in consistent state.
350 *
351 * @param head the MRU block of the actual cache
352 * @param head the LRU block of the actual cache
353 */
354 void check(const FALRUBlk *head, const FALRUBlk *tail) const;
355
356 /**
357 * Register the stats for this object.
358 */
359 void regStats(std::string name);
360
361 private:
362 /** The size of the cache block */
363 const unsigned blkSize;
364 /** The smallest cache we are tracking */
365 const unsigned minTrackedSize;
366 /** The number of different size caches being tracked. */
367 const int numTrackedCaches;
368 /** A mask for all cache being tracked. */
369 const CachesMask inAllCachesMask;
370 /** Array of pointers to blocks at the cache boundaries. */
371 std::vector<FALRUBlk*> boundaries;
372
373 protected:
374 /**
375 * @defgroup FALRUStats Fully Associative LRU specific statistics
376 * The FA lru stack lets us track multiple cache sizes at once. These
377 * statistics track the hits and misses for different cache sizes.
378 * @{
379 */
380
381 /** Hits in each cache */
382 Stats::Vector hits;
383 /** Misses in each cache */
384 Stats::Vector misses;
385 /** Total number of accesses */
386 Stats::Scalar accesses;
387
388 /**
389 * @}
390 */
391 };
392 CacheTracking cacheTracking;
393};
394
395#endif // __MEM_CACHE_TAGS_FA_LRU_HH__