fa_lru.cc (13225:8d1621fc586e) fa_lru.cc (13353:63f4073c1fc7)
1/*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Nikos Nikoleris
43 * Daniel Carvalho
44 */
45
46/**
47 * @file
48 * Definitions a fully associative LRU tagstore.
49 */
50
51#include "mem/cache/tags/fa_lru.hh"
52
53#include <cassert>
54#include <sstream>
55
56#include "base/intmath.hh"
57#include "base/logging.hh"
58#include "mem/cache/base.hh"
59#include "mem/cache/replacement_policies/replaceable_entry.hh"
60
61std::string
62FALRUBlk::print() const
63{
64 return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask);
65}
66
67FALRU::FALRU(const Params *p)
68 : BaseTags(p),
69
70 cacheTracking(p->min_tracked_cache_size, size, blkSize)
71{
72 if (!isPowerOf2(blkSize))
73 fatal("cache block size (in bytes) `%d' must be a power of two",
74 blkSize);
75 if (!isPowerOf2(size))
76 fatal("Cache Size must be power of 2 for now");
77
78 blks = new FALRUBlk[numBlocks];
79}
80
81FALRU::~FALRU()
82{
83 delete[] blks;
84}
85
86void
87FALRU::init(BaseCache* cache)
88{
89 // Set parent cache
90 setCache(cache);
91
92 head = &(blks[0]);
93 head->prev = nullptr;
94 head->next = &(blks[1]);
95 head->setPosition(0, 0);
96 head->data = &dataBlks[0];
97
98 for (unsigned i = 1; i < numBlocks - 1; i++) {
99 blks[i].prev = &(blks[i-1]);
100 blks[i].next = &(blks[i+1]);
101 blks[i].setPosition(0, i);
102
103 // Associate a data chunk to the block
104 blks[i].data = &dataBlks[blkSize*i];
105 }
106
107 tail = &(blks[numBlocks - 1]);
108 tail->prev = &(blks[numBlocks - 2]);
109 tail->next = nullptr;
110 tail->setPosition(0, numBlocks - 1);
111 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
112
113 cacheTracking.init(head, tail);
114}
115
116void
117FALRU::regStats()
118{
119 BaseTags::regStats();
120 cacheTracking.regStats(name());
121}
122
123void
124FALRU::invalidate(CacheBlk *blk)
125{
126 // Erase block entry reference in the hash table
1/*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Nikos Nikoleris
43 * Daniel Carvalho
44 */
45
46/**
47 * @file
48 * Definitions a fully associative LRU tagstore.
49 */
50
51#include "mem/cache/tags/fa_lru.hh"
52
53#include <cassert>
54#include <sstream>
55
56#include "base/intmath.hh"
57#include "base/logging.hh"
58#include "mem/cache/base.hh"
59#include "mem/cache/replacement_policies/replaceable_entry.hh"
60
61std::string
62FALRUBlk::print() const
63{
64 return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask);
65}
66
67FALRU::FALRU(const Params *p)
68 : BaseTags(p),
69
70 cacheTracking(p->min_tracked_cache_size, size, blkSize)
71{
72 if (!isPowerOf2(blkSize))
73 fatal("cache block size (in bytes) `%d' must be a power of two",
74 blkSize);
75 if (!isPowerOf2(size))
76 fatal("Cache Size must be power of 2 for now");
77
78 blks = new FALRUBlk[numBlocks];
79}
80
81FALRU::~FALRU()
82{
83 delete[] blks;
84}
85
86void
87FALRU::init(BaseCache* cache)
88{
89 // Set parent cache
90 setCache(cache);
91
92 head = &(blks[0]);
93 head->prev = nullptr;
94 head->next = &(blks[1]);
95 head->setPosition(0, 0);
96 head->data = &dataBlks[0];
97
98 for (unsigned i = 1; i < numBlocks - 1; i++) {
99 blks[i].prev = &(blks[i-1]);
100 blks[i].next = &(blks[i+1]);
101 blks[i].setPosition(0, i);
102
103 // Associate a data chunk to the block
104 blks[i].data = &dataBlks[blkSize*i];
105 }
106
107 tail = &(blks[numBlocks - 1]);
108 tail->prev = &(blks[numBlocks - 2]);
109 tail->next = nullptr;
110 tail->setPosition(0, numBlocks - 1);
111 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
112
113 cacheTracking.init(head, tail);
114}
115
116void
117FALRU::regStats()
118{
119 BaseTags::regStats();
120 cacheTracking.regStats(name());
121}
122
123void
124FALRU::invalidate(CacheBlk *blk)
125{
126 // Erase block entry reference in the hash table
127 auto num_erased = tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
127 auto num_erased M5_VAR_USED =
128 tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
128
129 // Sanity check; only one block reference should be erased
130 assert(num_erased == 1);
131
132 // Invalidate block entry. Must be done after the hash is erased
133 BaseTags::invalidate(blk);
134
135 // Decrease the number of tags in use
136 tagsInUse--;
137
138 // Move the block to the tail to make it the next victim
139 moveToTail((FALRUBlk*)blk);
140}
141
142CacheBlk*
143FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
144{
145 return accessBlock(addr, is_secure, lat, 0);
146}
147
148CacheBlk*
149FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
150 CachesMask *in_caches_mask)
151{
152 CachesMask mask = 0;
153 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
154
155 if (blk && blk->isValid()) {
156 // If a cache hit
157 lat = accessLatency;
158 // Check if the block to be accessed is available. If not,
159 // apply the accessLatency on top of block->whenReady.
160 if (blk->whenReady > curTick() &&
161 cache->ticksToCycles(blk->whenReady - curTick()) >
162 accessLatency) {
163 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
164 accessLatency;
165 }
166 mask = blk->inCachesMask;
167
168 moveToHead(blk);
169 } else {
170 // If a cache miss
171 lat = lookupLatency;
172 }
173 if (in_caches_mask) {
174 *in_caches_mask = mask;
175 }
176
177 cacheTracking.recordAccess(blk);
178
179 return blk;
180}
181
182CacheBlk*
183FALRU::findBlock(Addr addr, bool is_secure) const
184{
185 FALRUBlk* blk = nullptr;
186
187 Addr tag = extractTag(addr);
188 auto iter = tagHash.find(std::make_pair(tag, is_secure));
189 if (iter != tagHash.end()) {
190 blk = (*iter).second;
191 }
192
193 if (blk && blk->isValid()) {
194 assert(blk->tag == tag);
195 assert(blk->isSecure() == is_secure);
196 }
197
198 return blk;
199}
200
201ReplaceableEntry*
202FALRU::findBlockBySetAndWay(int set, int way) const
203{
204 assert(set == 0);
205 return &blks[way];
206}
207
208CacheBlk*
209FALRU::findVictim(Addr addr, const bool is_secure,
210 std::vector<CacheBlk*>& evict_blks) const
211{
212 // The victim is always stored on the tail for the FALRU
213 FALRUBlk* victim = tail;
214
215 // There is only one eviction for this replacement
216 evict_blks.push_back(victim);
217
218 return victim;
219}
220
221void
222FALRU::insertBlock(const Addr addr, const bool is_secure,
223 const int src_master_ID, const uint32_t task_ID,
224 CacheBlk *blk)
225{
226 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
227
228 // Make sure block is not present in the cache
229 assert(falruBlk->inCachesMask == 0);
230
231 // Do common block insertion functionality
232 BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
233
234 // Increment tag counter
235 tagsInUse++;
236
237 // New block is the MRU
238 moveToHead(falruBlk);
239
240 // Insert new block in the hash table
241 tagHash[std::make_pair(blk->tag, blk->isSecure())] = falruBlk;
242}
243
244void
245FALRU::moveToHead(FALRUBlk *blk)
246{
247 // If block is not already head, do the moving
248 if (blk != head) {
249 cacheTracking.moveBlockToHead(blk);
250 // If block is tail, set previous block as new tail
251 if (blk == tail){
252 assert(blk->next == nullptr);
253 tail = blk->prev;
254 tail->next = nullptr;
255 // Inform block's surrounding blocks that it has been moved
256 } else {
257 blk->prev->next = blk->next;
258 blk->next->prev = blk->prev;
259 }
260
261 // Swap pointers
262 blk->next = head;
263 blk->prev = nullptr;
264 head->prev = blk;
265 head = blk;
266
267 cacheTracking.check(head, tail);
268 }
269}
270
271void
272FALRU::moveToTail(FALRUBlk *blk)
273{
274 // If block is not already tail, do the moving
275 if (blk != tail) {
276 cacheTracking.moveBlockToTail(blk);
277 // If block is head, set next block as new head
278 if (blk == head){
279 assert(blk->prev == nullptr);
280 head = blk->next;
281 head->prev = nullptr;
282 // Inform block's surrounding blocks that it has been moved
283 } else {
284 blk->prev->next = blk->next;
285 blk->next->prev = blk->prev;
286 }
287
288 // Swap pointers
289 blk->prev = tail;
290 blk->next = nullptr;
291 tail->next = blk;
292 tail = blk;
293
294 cacheTracking.check(head, tail);
295 }
296}
297
298FALRU *
299FALRUParams::create()
300{
301 return new FALRU(this);
302}
303
304void
305FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
306{
307#ifdef FALRU_DEBUG
308 const FALRUBlk* blk = head;
309 unsigned curr_size = 0;
310 unsigned tracked_cache_size = minTrackedSize;
311 CachesMask in_caches_mask = inAllCachesMask;
312 int j = 0;
313
314 while (blk) {
315 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
316 "%x found %x", blk->inCachesMask, in_caches_mask);
317
318 curr_size += blkSize;
319 if (curr_size == tracked_cache_size && blk != tail) {
320 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
321 "cache", j);
322 tracked_cache_size <<= 1;
323 // from this point, blocks fit only in the larger caches
324 in_caches_mask &= ~(1U << j);
325 ++j;
326 }
327 blk = blk->next;
328 }
329#endif // FALRU_DEBUG
330}
331
332void
333FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
334{
335 // early exit if we are not tracking any extra caches
336 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
337 unsigned curr_size = 0;
338 unsigned tracked_cache_size = minTrackedSize;
339 CachesMask in_caches_mask = inAllCachesMask;
340 int j = 0;
341
342 while (blk) {
343 blk->inCachesMask = in_caches_mask;
344
345 curr_size += blkSize;
346 if (curr_size == tracked_cache_size && blk != tail) {
347 boundaries[j] = blk;
348
349 tracked_cache_size <<= 1;
350 // from this point, blocks fit only in the larger caches
351 in_caches_mask &= ~(1U << j);
352 ++j;
353 }
354 blk = blk->next;
355 }
356}
357
358
359void
360FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
361{
362 // Get the mask of all caches, in which the block didn't fit
363 // before moving it to the head
364 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
365
366 for (int i = 0; i < numTrackedCaches; i++) {
367 CachesMask current_cache_mask = 1U << i;
368 if (current_cache_mask & update_caches_mask) {
369 // if the ith cache didn't fit the block (before it is moved to
370 // the head), move the ith boundary 1 block closer to the
371 // MRU
372 boundaries[i]->inCachesMask &= ~current_cache_mask;
373 boundaries[i] = boundaries[i]->prev;
374 } else if (boundaries[i] == blk) {
375 // Make sure the boundary doesn't point to the block
376 // we are about to move
377 boundaries[i] = blk->prev;
378 }
379 }
380
381 // Make block reside in all caches
382 blk->inCachesMask = inAllCachesMask;
383}
384
385void
386FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
387{
388 CachesMask update_caches_mask = blk->inCachesMask;
389
390 for (int i = 0; i < numTrackedCaches; i++) {
391 CachesMask current_cache_mask = 1U << i;
392 if (current_cache_mask & update_caches_mask) {
393 // if the ith cache fitted the block (before it is moved to
394 // the tail), move the ith boundary 1 block closer to the
395 // LRU
396 boundaries[i] = boundaries[i]->next;
397 if (boundaries[i] == blk) {
398 // Make sure the boundary doesn't point to the block
399 // we are about to move
400 boundaries[i] = blk->next;
401 }
402 boundaries[i]->inCachesMask |= current_cache_mask;
403 }
404 }
405
406 // The block now fits only in the actual cache
407 blk->inCachesMask = 0;
408}
409
410void
411FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
412{
413 for (int i = 0; i < numTrackedCaches; i++) {
414 if (blk && ((1U << i) & blk->inCachesMask)) {
415 hits[i]++;
416 } else {
417 misses[i]++;
418 }
419 }
420
421 // Record stats for the actual cache too
422 if (blk && blk->isValid()) {
423 hits[numTrackedCaches]++;
424 } else {
425 misses[numTrackedCaches]++;
426 }
427
428 accesses++;
429}
430
431void
432printSize(std::ostream &stream, size_t size)
433{
434 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
435 int div = 0;
436 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
437 div++;
438 size >>= 10;
439 }
440 stream << size << SIZES[div];
441}
442
443void
444FALRU::CacheTracking::regStats(std::string name)
445{
446 hits
447 .init(numTrackedCaches + 1)
448 .name(name + ".falru_hits")
449 .desc("The number of hits in each cache size.")
450 ;
451 misses
452 .init(numTrackedCaches + 1)
453 .name(name + ".falru_misses")
454 .desc("The number of misses in each cache size.")
455 ;
456 accesses
457 .name(name + ".falru_accesses")
458 .desc("The number of accesses to the FA LRU cache.")
459 ;
460
461 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
462 std::stringstream size_str;
463 printSize(size_str, minTrackedSize << i);
464 hits.subname(i, size_str.str());
465 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
466 misses.subname(i, size_str.str());
467 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
468 }
469}
129
130 // Sanity check; only one block reference should be erased
131 assert(num_erased == 1);
132
133 // Invalidate block entry. Must be done after the hash is erased
134 BaseTags::invalidate(blk);
135
136 // Decrease the number of tags in use
137 tagsInUse--;
138
139 // Move the block to the tail to make it the next victim
140 moveToTail((FALRUBlk*)blk);
141}
142
143CacheBlk*
144FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
145{
146 return accessBlock(addr, is_secure, lat, 0);
147}
148
149CacheBlk*
150FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
151 CachesMask *in_caches_mask)
152{
153 CachesMask mask = 0;
154 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
155
156 if (blk && blk->isValid()) {
157 // If a cache hit
158 lat = accessLatency;
159 // Check if the block to be accessed is available. If not,
160 // apply the accessLatency on top of block->whenReady.
161 if (blk->whenReady > curTick() &&
162 cache->ticksToCycles(blk->whenReady - curTick()) >
163 accessLatency) {
164 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
165 accessLatency;
166 }
167 mask = blk->inCachesMask;
168
169 moveToHead(blk);
170 } else {
171 // If a cache miss
172 lat = lookupLatency;
173 }
174 if (in_caches_mask) {
175 *in_caches_mask = mask;
176 }
177
178 cacheTracking.recordAccess(blk);
179
180 return blk;
181}
182
183CacheBlk*
184FALRU::findBlock(Addr addr, bool is_secure) const
185{
186 FALRUBlk* blk = nullptr;
187
188 Addr tag = extractTag(addr);
189 auto iter = tagHash.find(std::make_pair(tag, is_secure));
190 if (iter != tagHash.end()) {
191 blk = (*iter).second;
192 }
193
194 if (blk && blk->isValid()) {
195 assert(blk->tag == tag);
196 assert(blk->isSecure() == is_secure);
197 }
198
199 return blk;
200}
201
202ReplaceableEntry*
203FALRU::findBlockBySetAndWay(int set, int way) const
204{
205 assert(set == 0);
206 return &blks[way];
207}
208
209CacheBlk*
210FALRU::findVictim(Addr addr, const bool is_secure,
211 std::vector<CacheBlk*>& evict_blks) const
212{
213 // The victim is always stored on the tail for the FALRU
214 FALRUBlk* victim = tail;
215
216 // There is only one eviction for this replacement
217 evict_blks.push_back(victim);
218
219 return victim;
220}
221
222void
223FALRU::insertBlock(const Addr addr, const bool is_secure,
224 const int src_master_ID, const uint32_t task_ID,
225 CacheBlk *blk)
226{
227 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
228
229 // Make sure block is not present in the cache
230 assert(falruBlk->inCachesMask == 0);
231
232 // Do common block insertion functionality
233 BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
234
235 // Increment tag counter
236 tagsInUse++;
237
238 // New block is the MRU
239 moveToHead(falruBlk);
240
241 // Insert new block in the hash table
242 tagHash[std::make_pair(blk->tag, blk->isSecure())] = falruBlk;
243}
244
245void
246FALRU::moveToHead(FALRUBlk *blk)
247{
248 // If block is not already head, do the moving
249 if (blk != head) {
250 cacheTracking.moveBlockToHead(blk);
251 // If block is tail, set previous block as new tail
252 if (blk == tail){
253 assert(blk->next == nullptr);
254 tail = blk->prev;
255 tail->next = nullptr;
256 // Inform block's surrounding blocks that it has been moved
257 } else {
258 blk->prev->next = blk->next;
259 blk->next->prev = blk->prev;
260 }
261
262 // Swap pointers
263 blk->next = head;
264 blk->prev = nullptr;
265 head->prev = blk;
266 head = blk;
267
268 cacheTracking.check(head, tail);
269 }
270}
271
272void
273FALRU::moveToTail(FALRUBlk *blk)
274{
275 // If block is not already tail, do the moving
276 if (blk != tail) {
277 cacheTracking.moveBlockToTail(blk);
278 // If block is head, set next block as new head
279 if (blk == head){
280 assert(blk->prev == nullptr);
281 head = blk->next;
282 head->prev = nullptr;
283 // Inform block's surrounding blocks that it has been moved
284 } else {
285 blk->prev->next = blk->next;
286 blk->next->prev = blk->prev;
287 }
288
289 // Swap pointers
290 blk->prev = tail;
291 blk->next = nullptr;
292 tail->next = blk;
293 tail = blk;
294
295 cacheTracking.check(head, tail);
296 }
297}
298
299FALRU *
300FALRUParams::create()
301{
302 return new FALRU(this);
303}
304
305void
306FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
307{
308#ifdef FALRU_DEBUG
309 const FALRUBlk* blk = head;
310 unsigned curr_size = 0;
311 unsigned tracked_cache_size = minTrackedSize;
312 CachesMask in_caches_mask = inAllCachesMask;
313 int j = 0;
314
315 while (blk) {
316 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
317 "%x found %x", blk->inCachesMask, in_caches_mask);
318
319 curr_size += blkSize;
320 if (curr_size == tracked_cache_size && blk != tail) {
321 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
322 "cache", j);
323 tracked_cache_size <<= 1;
324 // from this point, blocks fit only in the larger caches
325 in_caches_mask &= ~(1U << j);
326 ++j;
327 }
328 blk = blk->next;
329 }
330#endif // FALRU_DEBUG
331}
332
333void
334FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
335{
336 // early exit if we are not tracking any extra caches
337 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
338 unsigned curr_size = 0;
339 unsigned tracked_cache_size = minTrackedSize;
340 CachesMask in_caches_mask = inAllCachesMask;
341 int j = 0;
342
343 while (blk) {
344 blk->inCachesMask = in_caches_mask;
345
346 curr_size += blkSize;
347 if (curr_size == tracked_cache_size && blk != tail) {
348 boundaries[j] = blk;
349
350 tracked_cache_size <<= 1;
351 // from this point, blocks fit only in the larger caches
352 in_caches_mask &= ~(1U << j);
353 ++j;
354 }
355 blk = blk->next;
356 }
357}
358
359
360void
361FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
362{
363 // Get the mask of all caches, in which the block didn't fit
364 // before moving it to the head
365 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
366
367 for (int i = 0; i < numTrackedCaches; i++) {
368 CachesMask current_cache_mask = 1U << i;
369 if (current_cache_mask & update_caches_mask) {
370 // if the ith cache didn't fit the block (before it is moved to
371 // the head), move the ith boundary 1 block closer to the
372 // MRU
373 boundaries[i]->inCachesMask &= ~current_cache_mask;
374 boundaries[i] = boundaries[i]->prev;
375 } else if (boundaries[i] == blk) {
376 // Make sure the boundary doesn't point to the block
377 // we are about to move
378 boundaries[i] = blk->prev;
379 }
380 }
381
382 // Make block reside in all caches
383 blk->inCachesMask = inAllCachesMask;
384}
385
386void
387FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
388{
389 CachesMask update_caches_mask = blk->inCachesMask;
390
391 for (int i = 0; i < numTrackedCaches; i++) {
392 CachesMask current_cache_mask = 1U << i;
393 if (current_cache_mask & update_caches_mask) {
394 // if the ith cache fitted the block (before it is moved to
395 // the tail), move the ith boundary 1 block closer to the
396 // LRU
397 boundaries[i] = boundaries[i]->next;
398 if (boundaries[i] == blk) {
399 // Make sure the boundary doesn't point to the block
400 // we are about to move
401 boundaries[i] = blk->next;
402 }
403 boundaries[i]->inCachesMask |= current_cache_mask;
404 }
405 }
406
407 // The block now fits only in the actual cache
408 blk->inCachesMask = 0;
409}
410
411void
412FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
413{
414 for (int i = 0; i < numTrackedCaches; i++) {
415 if (blk && ((1U << i) & blk->inCachesMask)) {
416 hits[i]++;
417 } else {
418 misses[i]++;
419 }
420 }
421
422 // Record stats for the actual cache too
423 if (blk && blk->isValid()) {
424 hits[numTrackedCaches]++;
425 } else {
426 misses[numTrackedCaches]++;
427 }
428
429 accesses++;
430}
431
432void
433printSize(std::ostream &stream, size_t size)
434{
435 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
436 int div = 0;
437 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
438 div++;
439 size >>= 10;
440 }
441 stream << size << SIZES[div];
442}
443
444void
445FALRU::CacheTracking::regStats(std::string name)
446{
447 hits
448 .init(numTrackedCaches + 1)
449 .name(name + ".falru_hits")
450 .desc("The number of hits in each cache size.")
451 ;
452 misses
453 .init(numTrackedCaches + 1)
454 .name(name + ".falru_misses")
455 .desc("The number of misses in each cache size.")
456 ;
457 accesses
458 .name(name + ".falru_accesses")
459 .desc("The number of accesses to the FA LRU cache.")
460 ;
461
462 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
463 std::stringstream size_str;
464 printSize(size_str, minTrackedSize << i);
465 hits.subname(i, size_str.str());
466 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
467 misses.subname(i, size_str.str());
468 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
469 }
470}