fa_lru.cc (12743:b5ccee582b40) fa_lru.cc (12744:d1ff0b42b747)
1/*
2 * Copyright (c) 2013,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definitions a fully associative LRU tagstore.
47 */
48
49#include "mem/cache/tags/fa_lru.hh"
50
51#include <cassert>
52#include <sstream>
53
54#include "base/intmath.hh"
55#include "base/logging.hh"
56#include "mem/cache/base.hh"
57
58FALRU::FALRU(const Params *p)
59 : BaseTags(p),
60
61 cacheTracking(p->min_tracked_cache_size, size, blkSize)
62{
63 if (!isPowerOf2(blkSize))
64 fatal("cache block size (in bytes) `%d' must be a power of two",
65 blkSize);
66 if (!isPowerOf2(size))
67 fatal("Cache Size must be power of 2 for now");
68
69 blks = new FALRUBlk[numBlocks];
70
71 head = &(blks[0]);
72 head->prev = nullptr;
73 head->next = &(blks[1]);
74 head->set = 0;
75 head->way = 0;
76 head->data = &dataBlks[0];
77
78 for (unsigned i = 1; i < numBlocks - 1; i++) {
79 blks[i].prev = &(blks[i-1]);
80 blks[i].next = &(blks[i+1]);
81 blks[i].set = 0;
82 blks[i].way = i;
83
84 // Associate a data chunk to the block
85 blks[i].data = &dataBlks[blkSize*i];
86 }
87
88 tail = &(blks[numBlocks - 1]);
89 tail->prev = &(blks[numBlocks - 2]);
90 tail->next = nullptr;
91 tail->set = 0;
92 tail->way = numBlocks - 1;
93 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
94
95 cacheTracking.init(head, tail);
96}
97
98FALRU::~FALRU()
99{
100 delete[] blks;
101}
102
103void
104FALRU::regStats()
105{
106 BaseTags::regStats();
107 cacheTracking.regStats(name());
108}
109
110FALRUBlk *
111FALRU::hashLookup(Addr addr) const
112{
113 tagIterator iter = tagHash.find(addr);
114 if (iter != tagHash.end()) {
115 return (*iter).second;
116 }
117 return nullptr;
118}
119
120void
121FALRU::invalidate(CacheBlk *blk)
122{
123 BaseTags::invalidate(blk);
124
125 // Move the block to the tail to make it the next victim
126 moveToTail((FALRUBlk*)blk);
127
128 // Erase block entry in the hash table
129 tagHash.erase(blk->tag);
130}
131
132CacheBlk*
133FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
134{
135 return accessBlock(addr, is_secure, lat, 0);
136}
137
138CacheBlk*
139FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
140 CachesMask *in_caches_mask)
141{
142 CachesMask mask = 0;
143 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
144
145 if (blk != nullptr) {
146 // If a cache hit
147 lat = accessLatency;
148 // Check if the block to be accessed is available. If not,
149 // apply the accessLatency on top of block->whenReady.
150 if (blk->whenReady > curTick() &&
151 cache->ticksToCycles(blk->whenReady - curTick()) >
152 accessLatency) {
153 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
154 accessLatency;
155 }
156 mask = blk->inCachesMask;
157
158 moveToHead(blk);
159 } else {
160 // If a cache miss
161 lat = lookupLatency;
162 }
163 if (in_caches_mask) {
164 *in_caches_mask = mask;
165 }
166
167 cacheTracking.recordAccess(blk);
168
169 return blk;
170}
171
172
173CacheBlk*
174FALRU::findBlock(Addr addr, bool is_secure) const
175{
176 Addr tag = extractTag(addr);
177 FALRUBlk* blk = hashLookup(tag);
178
179 if (blk && blk->isValid()) {
180 assert(blk->tag == tag);
181 assert(blk->isSecure() == is_secure);
182 } else {
183 blk = nullptr;
184 }
185 return blk;
186}
187
188ReplaceableEntry*
189FALRU::findBlockBySetAndWay(int set, int way) const
190{
191 assert(set == 0);
192 return &blks[way];
193}
194
195CacheBlk*
1/*
2 * Copyright (c) 2013,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definitions a fully associative LRU tagstore.
47 */
48
49#include "mem/cache/tags/fa_lru.hh"
50
51#include <cassert>
52#include <sstream>
53
54#include "base/intmath.hh"
55#include "base/logging.hh"
56#include "mem/cache/base.hh"
57
58FALRU::FALRU(const Params *p)
59 : BaseTags(p),
60
61 cacheTracking(p->min_tracked_cache_size, size, blkSize)
62{
63 if (!isPowerOf2(blkSize))
64 fatal("cache block size (in bytes) `%d' must be a power of two",
65 blkSize);
66 if (!isPowerOf2(size))
67 fatal("Cache Size must be power of 2 for now");
68
69 blks = new FALRUBlk[numBlocks];
70
71 head = &(blks[0]);
72 head->prev = nullptr;
73 head->next = &(blks[1]);
74 head->set = 0;
75 head->way = 0;
76 head->data = &dataBlks[0];
77
78 for (unsigned i = 1; i < numBlocks - 1; i++) {
79 blks[i].prev = &(blks[i-1]);
80 blks[i].next = &(blks[i+1]);
81 blks[i].set = 0;
82 blks[i].way = i;
83
84 // Associate a data chunk to the block
85 blks[i].data = &dataBlks[blkSize*i];
86 }
87
88 tail = &(blks[numBlocks - 1]);
89 tail->prev = &(blks[numBlocks - 2]);
90 tail->next = nullptr;
91 tail->set = 0;
92 tail->way = numBlocks - 1;
93 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
94
95 cacheTracking.init(head, tail);
96}
97
98FALRU::~FALRU()
99{
100 delete[] blks;
101}
102
103void
104FALRU::regStats()
105{
106 BaseTags::regStats();
107 cacheTracking.regStats(name());
108}
109
110FALRUBlk *
111FALRU::hashLookup(Addr addr) const
112{
113 tagIterator iter = tagHash.find(addr);
114 if (iter != tagHash.end()) {
115 return (*iter).second;
116 }
117 return nullptr;
118}
119
120void
121FALRU::invalidate(CacheBlk *blk)
122{
123 BaseTags::invalidate(blk);
124
125 // Move the block to the tail to make it the next victim
126 moveToTail((FALRUBlk*)blk);
127
128 // Erase block entry in the hash table
129 tagHash.erase(blk->tag);
130}
131
132CacheBlk*
133FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
134{
135 return accessBlock(addr, is_secure, lat, 0);
136}
137
138CacheBlk*
139FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
140 CachesMask *in_caches_mask)
141{
142 CachesMask mask = 0;
143 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
144
145 if (blk != nullptr) {
146 // If a cache hit
147 lat = accessLatency;
148 // Check if the block to be accessed is available. If not,
149 // apply the accessLatency on top of block->whenReady.
150 if (blk->whenReady > curTick() &&
151 cache->ticksToCycles(blk->whenReady - curTick()) >
152 accessLatency) {
153 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
154 accessLatency;
155 }
156 mask = blk->inCachesMask;
157
158 moveToHead(blk);
159 } else {
160 // If a cache miss
161 lat = lookupLatency;
162 }
163 if (in_caches_mask) {
164 *in_caches_mask = mask;
165 }
166
167 cacheTracking.recordAccess(blk);
168
169 return blk;
170}
171
172
173CacheBlk*
174FALRU::findBlock(Addr addr, bool is_secure) const
175{
176 Addr tag = extractTag(addr);
177 FALRUBlk* blk = hashLookup(tag);
178
179 if (blk && blk->isValid()) {
180 assert(blk->tag == tag);
181 assert(blk->isSecure() == is_secure);
182 } else {
183 blk = nullptr;
184 }
185 return blk;
186}
187
188ReplaceableEntry*
189FALRU::findBlockBySetAndWay(int set, int way) const
190{
191 assert(set == 0);
192 return &blks[way];
193}
194
195CacheBlk*
196FALRU::findVictim(Addr addr)
196FALRU::findVictim(Addr addr, std::vector<CacheBlk*>& evict_blks) const
197{
197{
198 return tail;
198 // The victim is always stored on the tail for the FALRU
199 FALRUBlk* victim = tail;
200
201 // There is only one eviction for this replacement
202 evict_blks.push_back(victim);
203
204 return victim;
199}
200
201void
202FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
203{
204 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
205
206 // Make sure block is not present in the cache
207 assert(falruBlk->inCachesMask == 0);
208
209 // Do common block insertion functionality
210 BaseTags::insertBlock(pkt, blk);
211
212 // New block is the MRU
213 moveToHead(falruBlk);
214
215 // Insert new block in the hash table
216 tagHash[falruBlk->tag] = falruBlk;
217}
218
219void
220FALRU::moveToHead(FALRUBlk *blk)
221{
222 // If block is not already head, do the moving
223 if (blk != head) {
224 cacheTracking.moveBlockToHead(blk);
225 // If block is tail, set previous block as new tail
226 if (blk == tail){
227 assert(blk->next == nullptr);
228 tail = blk->prev;
229 tail->next = nullptr;
230 // Inform block's surrounding blocks that it has been moved
231 } else {
232 blk->prev->next = blk->next;
233 blk->next->prev = blk->prev;
234 }
235
236 // Swap pointers
237 blk->next = head;
238 blk->prev = nullptr;
239 head->prev = blk;
240 head = blk;
241
242 cacheTracking.check(head, tail);
243 }
244}
245
246void
247FALRU::moveToTail(FALRUBlk *blk)
248{
249 // If block is not already tail, do the moving
250 if (blk != tail) {
251 cacheTracking.moveBlockToTail(blk);
252 // If block is head, set next block as new head
253 if (blk == head){
254 assert(blk->prev == nullptr);
255 head = blk->next;
256 head->prev = nullptr;
257 // Inform block's surrounding blocks that it has been moved
258 } else {
259 blk->prev->next = blk->next;
260 blk->next->prev = blk->prev;
261 }
262
263 // Swap pointers
264 blk->prev = tail;
265 blk->next = nullptr;
266 tail->next = blk;
267 tail = blk;
268
269 cacheTracking.check(head, tail);
270 }
271}
272
273FALRU *
274FALRUParams::create()
275{
276 return new FALRU(this);
277}
278
279void
280FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
281{
282#ifdef FALRU_DEBUG
283 FALRUBlk* blk = head;
284 unsigned curr_size = 0;
285 unsigned tracked_cache_size = minTrackedSize;
286 CachesMask in_caches_mask = inAllCachesMask;
287 int j = 0;
288
289 while (blk) {
290 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
291 "%x found %x", blk->inCachesMask, in_caches_mask);
292
293 curr_size += blkSize;
294 if (curr_size == tracked_cache_size && blk != tail) {
295 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
296 "cache", j);
297 tracked_cache_size <<= 1;
298 // from this point, blocks fit only in the larger caches
299 in_caches_mask &= ~(1U << j);
300 ++j;
301 }
302 blk = blk->next;
303 }
304#endif // FALRU_DEBUG
305}
306
307void
308FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
309{
310 // early exit if we are not tracking any extra caches
311 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
312 unsigned curr_size = 0;
313 unsigned tracked_cache_size = minTrackedSize;
314 CachesMask in_caches_mask = inAllCachesMask;
315 int j = 0;
316
317 while (blk) {
318 blk->inCachesMask = in_caches_mask;
319
320 curr_size += blkSize;
321 if (curr_size == tracked_cache_size && blk != tail) {
322 boundaries[j] = blk;
323
324 tracked_cache_size <<= 1;
325 // from this point, blocks fit only in the larger caches
326 in_caches_mask &= ~(1U << j);
327 ++j;
328 }
329 blk = blk->next;
330 }
331}
332
333
334void
335FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
336{
337 // Get the mask of all caches, in which the block didn't fit
338 // before moving it to the head
339 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
340
341 for (int i = 0; i < numTrackedCaches; i++) {
342 CachesMask current_cache_mask = 1U << i;
343 if (current_cache_mask & update_caches_mask) {
344 // if the ith cache didn't fit the block (before it is moved to
345 // the head), move the ith boundary 1 block closer to the
346 // MRU
347 boundaries[i]->inCachesMask &= ~current_cache_mask;
348 boundaries[i] = boundaries[i]->prev;
349 } else if (boundaries[i] == blk) {
350 // Make sure the boundary doesn't point to the block
351 // we are about to move
352 boundaries[i] = blk->prev;
353 }
354 }
355
356 // Make block reside in all caches
357 blk->inCachesMask = inAllCachesMask;
358}
359
360void
361FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
362{
363 CachesMask update_caches_mask = blk->inCachesMask;
364
365 for (int i = 0; i < numTrackedCaches; i++) {
366 CachesMask current_cache_mask = 1U << i;
367 if (current_cache_mask & update_caches_mask) {
368 // if the ith cache fitted the block (before it is moved to
369 // the tail), move the ith boundary 1 block closer to the
370 // LRU
371 boundaries[i] = boundaries[i]->next;
372 if (boundaries[i] == blk) {
373 // Make sure the boundary doesn't point to the block
374 // we are about to move
375 boundaries[i] = blk->next;
376 }
377 boundaries[i]->inCachesMask |= current_cache_mask;
378 }
379 }
380
381 // The block now fits only in the actual cache
382 blk->inCachesMask = 0;
383}
384
385void
386FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
387{
388 for (int i = 0; i < numTrackedCaches; i++) {
389 if (blk && ((1U << i) & blk->inCachesMask)) {
390 hits[i]++;
391 } else {
392 misses[i]++;
393 }
394 }
395
396 // Record stats for the actual cache too
397 if (blk) {
398 hits[numTrackedCaches]++;
399 } else {
400 misses[numTrackedCaches]++;
401 }
402
403 accesses++;
404}
405
406void
407printSize(std::ostream &stream, size_t size)
408{
409 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
410 int div = 0;
411 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
412 div++;
413 size >>= 10;
414 }
415 stream << size << SIZES[div];
416}
417
418void
419FALRU::CacheTracking::regStats(std::string name)
420{
421 hits
422 .init(numTrackedCaches + 1)
423 .name(name + ".falru_hits")
424 .desc("The number of hits in each cache size.")
425 ;
426 misses
427 .init(numTrackedCaches + 1)
428 .name(name + ".falru_misses")
429 .desc("The number of misses in each cache size.")
430 ;
431 accesses
432 .name(name + ".falru_accesses")
433 .desc("The number of accesses to the FA LRU cache.")
434 ;
435
436 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
437 std::stringstream size_str;
438 printSize(size_str, minTrackedSize << i);
439 hits.subname(i, size_str.str());
440 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
441 misses.subname(i, size_str.str());
442 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
443 }
444}
205}
206
207void
208FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
209{
210 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
211
212 // Make sure block is not present in the cache
213 assert(falruBlk->inCachesMask == 0);
214
215 // Do common block insertion functionality
216 BaseTags::insertBlock(pkt, blk);
217
218 // New block is the MRU
219 moveToHead(falruBlk);
220
221 // Insert new block in the hash table
222 tagHash[falruBlk->tag] = falruBlk;
223}
224
225void
226FALRU::moveToHead(FALRUBlk *blk)
227{
228 // If block is not already head, do the moving
229 if (blk != head) {
230 cacheTracking.moveBlockToHead(blk);
231 // If block is tail, set previous block as new tail
232 if (blk == tail){
233 assert(blk->next == nullptr);
234 tail = blk->prev;
235 tail->next = nullptr;
236 // Inform block's surrounding blocks that it has been moved
237 } else {
238 blk->prev->next = blk->next;
239 blk->next->prev = blk->prev;
240 }
241
242 // Swap pointers
243 blk->next = head;
244 blk->prev = nullptr;
245 head->prev = blk;
246 head = blk;
247
248 cacheTracking.check(head, tail);
249 }
250}
251
252void
253FALRU::moveToTail(FALRUBlk *blk)
254{
255 // If block is not already tail, do the moving
256 if (blk != tail) {
257 cacheTracking.moveBlockToTail(blk);
258 // If block is head, set next block as new head
259 if (blk == head){
260 assert(blk->prev == nullptr);
261 head = blk->next;
262 head->prev = nullptr;
263 // Inform block's surrounding blocks that it has been moved
264 } else {
265 blk->prev->next = blk->next;
266 blk->next->prev = blk->prev;
267 }
268
269 // Swap pointers
270 blk->prev = tail;
271 blk->next = nullptr;
272 tail->next = blk;
273 tail = blk;
274
275 cacheTracking.check(head, tail);
276 }
277}
278
279FALRU *
280FALRUParams::create()
281{
282 return new FALRU(this);
283}
284
285void
286FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
287{
288#ifdef FALRU_DEBUG
289 FALRUBlk* blk = head;
290 unsigned curr_size = 0;
291 unsigned tracked_cache_size = minTrackedSize;
292 CachesMask in_caches_mask = inAllCachesMask;
293 int j = 0;
294
295 while (blk) {
296 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
297 "%x found %x", blk->inCachesMask, in_caches_mask);
298
299 curr_size += blkSize;
300 if (curr_size == tracked_cache_size && blk != tail) {
301 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
302 "cache", j);
303 tracked_cache_size <<= 1;
304 // from this point, blocks fit only in the larger caches
305 in_caches_mask &= ~(1U << j);
306 ++j;
307 }
308 blk = blk->next;
309 }
310#endif // FALRU_DEBUG
311}
312
313void
314FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
315{
316 // early exit if we are not tracking any extra caches
317 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
318 unsigned curr_size = 0;
319 unsigned tracked_cache_size = minTrackedSize;
320 CachesMask in_caches_mask = inAllCachesMask;
321 int j = 0;
322
323 while (blk) {
324 blk->inCachesMask = in_caches_mask;
325
326 curr_size += blkSize;
327 if (curr_size == tracked_cache_size && blk != tail) {
328 boundaries[j] = blk;
329
330 tracked_cache_size <<= 1;
331 // from this point, blocks fit only in the larger caches
332 in_caches_mask &= ~(1U << j);
333 ++j;
334 }
335 blk = blk->next;
336 }
337}
338
339
340void
341FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
342{
343 // Get the mask of all caches, in which the block didn't fit
344 // before moving it to the head
345 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
346
347 for (int i = 0; i < numTrackedCaches; i++) {
348 CachesMask current_cache_mask = 1U << i;
349 if (current_cache_mask & update_caches_mask) {
350 // if the ith cache didn't fit the block (before it is moved to
351 // the head), move the ith boundary 1 block closer to the
352 // MRU
353 boundaries[i]->inCachesMask &= ~current_cache_mask;
354 boundaries[i] = boundaries[i]->prev;
355 } else if (boundaries[i] == blk) {
356 // Make sure the boundary doesn't point to the block
357 // we are about to move
358 boundaries[i] = blk->prev;
359 }
360 }
361
362 // Make block reside in all caches
363 blk->inCachesMask = inAllCachesMask;
364}
365
366void
367FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
368{
369 CachesMask update_caches_mask = blk->inCachesMask;
370
371 for (int i = 0; i < numTrackedCaches; i++) {
372 CachesMask current_cache_mask = 1U << i;
373 if (current_cache_mask & update_caches_mask) {
374 // if the ith cache fitted the block (before it is moved to
375 // the tail), move the ith boundary 1 block closer to the
376 // LRU
377 boundaries[i] = boundaries[i]->next;
378 if (boundaries[i] == blk) {
379 // Make sure the boundary doesn't point to the block
380 // we are about to move
381 boundaries[i] = blk->next;
382 }
383 boundaries[i]->inCachesMask |= current_cache_mask;
384 }
385 }
386
387 // The block now fits only in the actual cache
388 blk->inCachesMask = 0;
389}
390
391void
392FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
393{
394 for (int i = 0; i < numTrackedCaches; i++) {
395 if (blk && ((1U << i) & blk->inCachesMask)) {
396 hits[i]++;
397 } else {
398 misses[i]++;
399 }
400 }
401
402 // Record stats for the actual cache too
403 if (blk) {
404 hits[numTrackedCaches]++;
405 } else {
406 misses[numTrackedCaches]++;
407 }
408
409 accesses++;
410}
411
412void
413printSize(std::ostream &stream, size_t size)
414{
415 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
416 int div = 0;
417 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
418 div++;
419 size >>= 10;
420 }
421 stream << size << SIZES[div];
422}
423
424void
425FALRU::CacheTracking::regStats(std::string name)
426{
427 hits
428 .init(numTrackedCaches + 1)
429 .name(name + ".falru_hits")
430 .desc("The number of hits in each cache size.")
431 ;
432 misses
433 .init(numTrackedCaches + 1)
434 .name(name + ".falru_misses")
435 .desc("The number of misses in each cache size.")
436 ;
437 accesses
438 .name(name + ".falru_accesses")
439 .desc("The number of accesses to the FA LRU cache.")
440 ;
441
442 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
443 std::stringstream size_str;
444 printSize(size_str, minTrackedSize << i);
445 hits.subname(i, size_str.str());
446 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
447 misses.subname(i, size_str.str());
448 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
449 }
450}