1/* 2 * Copyright (c) 2013,2016-2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definitions a fully associative LRU tagstore. 47 */ 48 49#include "mem/cache/tags/fa_lru.hh" 50 51#include <cassert> 52#include <sstream> 53 54#include "base/intmath.hh" 55#include "base/logging.hh" 56#include "mem/cache/base.hh" 57 58FALRU::FALRU(const Params *p) 59 : BaseTags(p), 60 61 cacheTracking(p->min_tracked_cache_size, size, blkSize) 62{ 63 if (!isPowerOf2(blkSize)) 64 fatal("cache block size (in bytes) `%d' must be a power of two", 65 blkSize); 66 if (!isPowerOf2(size)) 67 fatal("Cache Size must be power of 2 for now"); 68 69 blks = new FALRUBlk[numBlocks]; 70 71 head = &(blks[0]); 72 head->prev = nullptr; 73 head->next = &(blks[1]); 74 head->set = 0; 75 head->way = 0; 76 head->data = &dataBlks[0]; 77 78 for (unsigned i = 1; i < numBlocks - 1; i++) { 79 blks[i].prev = &(blks[i-1]); 80 blks[i].next = &(blks[i+1]); 81 blks[i].set = 0; 82 blks[i].way = i; 83 84 // Associate a data chunk to the block 85 blks[i].data = &dataBlks[blkSize*i]; 86 } 87 88 tail = &(blks[numBlocks - 1]); 89 tail->prev = &(blks[numBlocks - 2]); 90 tail->next = nullptr; 91 tail->set = 0; 92 tail->way = numBlocks - 1; 93 tail->data = &dataBlks[(numBlocks - 1) * blkSize]; 94 95 cacheTracking.init(head, tail); 96} 97 98FALRU::~FALRU() 99{ 100 delete[] blks; 101} 102 103void 104FALRU::regStats() 105{ 106 BaseTags::regStats(); 107 cacheTracking.regStats(name()); 108} 109 110FALRUBlk * 111FALRU::hashLookup(Addr addr) const 112{ 113 tagIterator iter = tagHash.find(addr); 114 if (iter != tagHash.end()) { 115 return (*iter).second; 116 } 117 return nullptr; 118} 119 120void 121FALRU::invalidate(CacheBlk *blk) 122{ 123 BaseTags::invalidate(blk); 124 125 // Decrease the number of tags in use 126 tagsInUse--; 127 128 // Move the block to the tail to make it the next victim 129 moveToTail((FALRUBlk*)blk); 130 131 // Erase block entry in the hash table 132 tagHash.erase(blk->tag); 133} 134 135CacheBlk* 136FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat) 137{ 138 return accessBlock(addr, is_secure, lat, 0); 139} 140 141CacheBlk* 142FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, 143 CachesMask *in_caches_mask) 144{ 145 CachesMask mask = 0; 146 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure)); 147 148 if (blk != nullptr) { 149 // If a cache hit 150 lat = accessLatency; 151 // Check if the block to be accessed is available. If not, 152 // apply the accessLatency on top of block->whenReady. 153 if (blk->whenReady > curTick() && 154 cache->ticksToCycles(blk->whenReady - curTick()) > 155 accessLatency) { 156 lat = cache->ticksToCycles(blk->whenReady - curTick()) + 157 accessLatency; 158 } 159 mask = blk->inCachesMask; 160 161 moveToHead(blk); 162 } else { 163 // If a cache miss 164 lat = lookupLatency; 165 } 166 if (in_caches_mask) { 167 *in_caches_mask = mask; 168 } 169 170 cacheTracking.recordAccess(blk); 171 172 return blk; 173} 174 175CacheBlk* 176FALRU::findBlock(Addr addr, bool is_secure) const 177{ 178 Addr tag = extractTag(addr); 179 FALRUBlk* blk = hashLookup(tag); 180 181 if (blk && blk->isValid()) { 182 assert(blk->tag == tag); 183 assert(blk->isSecure() == is_secure); 184 } else { 185 blk = nullptr; 186 } 187 return blk; 188} 189 190ReplaceableEntry* 191FALRU::findBlockBySetAndWay(int set, int way) const 192{ 193 assert(set == 0); 194 return &blks[way]; 195} 196 197CacheBlk* 198FALRU::findVictim(Addr addr, const bool is_secure, 199 std::vector<CacheBlk*>& evict_blks) const 200{ 201 // The victim is always stored on the tail for the FALRU 202 FALRUBlk* victim = tail; 203 204 // There is only one eviction for this replacement 205 evict_blks.push_back(victim); 206 207 return victim; 208} 209 210void
| 1/* 2 * Copyright (c) 2013,2016-2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Definitions a fully associative LRU tagstore. 47 */ 48 49#include "mem/cache/tags/fa_lru.hh" 50 51#include <cassert> 52#include <sstream> 53 54#include "base/intmath.hh" 55#include "base/logging.hh" 56#include "mem/cache/base.hh" 57 58FALRU::FALRU(const Params *p) 59 : BaseTags(p), 60 61 cacheTracking(p->min_tracked_cache_size, size, blkSize) 62{ 63 if (!isPowerOf2(blkSize)) 64 fatal("cache block size (in bytes) `%d' must be a power of two", 65 blkSize); 66 if (!isPowerOf2(size)) 67 fatal("Cache Size must be power of 2 for now"); 68 69 blks = new FALRUBlk[numBlocks]; 70 71 head = &(blks[0]); 72 head->prev = nullptr; 73 head->next = &(blks[1]); 74 head->set = 0; 75 head->way = 0; 76 head->data = &dataBlks[0]; 77 78 for (unsigned i = 1; i < numBlocks - 1; i++) { 79 blks[i].prev = &(blks[i-1]); 80 blks[i].next = &(blks[i+1]); 81 blks[i].set = 0; 82 blks[i].way = i; 83 84 // Associate a data chunk to the block 85 blks[i].data = &dataBlks[blkSize*i]; 86 } 87 88 tail = &(blks[numBlocks - 1]); 89 tail->prev = &(blks[numBlocks - 2]); 90 tail->next = nullptr; 91 tail->set = 0; 92 tail->way = numBlocks - 1; 93 tail->data = &dataBlks[(numBlocks - 1) * blkSize]; 94 95 cacheTracking.init(head, tail); 96} 97 98FALRU::~FALRU() 99{ 100 delete[] blks; 101} 102 103void 104FALRU::regStats() 105{ 106 BaseTags::regStats(); 107 cacheTracking.regStats(name()); 108} 109 110FALRUBlk * 111FALRU::hashLookup(Addr addr) const 112{ 113 tagIterator iter = tagHash.find(addr); 114 if (iter != tagHash.end()) { 115 return (*iter).second; 116 } 117 return nullptr; 118} 119 120void 121FALRU::invalidate(CacheBlk *blk) 122{ 123 BaseTags::invalidate(blk); 124 125 // Decrease the number of tags in use 126 tagsInUse--; 127 128 // Move the block to the tail to make it the next victim 129 moveToTail((FALRUBlk*)blk); 130 131 // Erase block entry in the hash table 132 tagHash.erase(blk->tag); 133} 134 135CacheBlk* 136FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat) 137{ 138 return accessBlock(addr, is_secure, lat, 0); 139} 140 141CacheBlk* 142FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, 143 CachesMask *in_caches_mask) 144{ 145 CachesMask mask = 0; 146 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure)); 147 148 if (blk != nullptr) { 149 // If a cache hit 150 lat = accessLatency; 151 // Check if the block to be accessed is available. If not, 152 // apply the accessLatency on top of block->whenReady. 153 if (blk->whenReady > curTick() && 154 cache->ticksToCycles(blk->whenReady - curTick()) > 155 accessLatency) { 156 lat = cache->ticksToCycles(blk->whenReady - curTick()) + 157 accessLatency; 158 } 159 mask = blk->inCachesMask; 160 161 moveToHead(blk); 162 } else { 163 // If a cache miss 164 lat = lookupLatency; 165 } 166 if (in_caches_mask) { 167 *in_caches_mask = mask; 168 } 169 170 cacheTracking.recordAccess(blk); 171 172 return blk; 173} 174 175CacheBlk* 176FALRU::findBlock(Addr addr, bool is_secure) const 177{ 178 Addr tag = extractTag(addr); 179 FALRUBlk* blk = hashLookup(tag); 180 181 if (blk && blk->isValid()) { 182 assert(blk->tag == tag); 183 assert(blk->isSecure() == is_secure); 184 } else { 185 blk = nullptr; 186 } 187 return blk; 188} 189 190ReplaceableEntry* 191FALRU::findBlockBySetAndWay(int set, int way) const 192{ 193 assert(set == 0); 194 return &blks[way]; 195} 196 197CacheBlk* 198FALRU::findVictim(Addr addr, const bool is_secure, 199 std::vector<CacheBlk*>& evict_blks) const 200{ 201 // The victim is always stored on the tail for the FALRU 202 FALRUBlk* victim = tail; 203 204 // There is only one eviction for this replacement 205 evict_blks.push_back(victim); 206 207 return victim; 208} 209 210void
|
212{ 213 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk); 214 215 // Make sure block is not present in the cache 216 assert(falruBlk->inCachesMask == 0); 217 218 // Do common block insertion functionality 219 BaseTags::insertBlock(pkt, blk); 220 221 // Increment tag counter 222 tagsInUse++; 223 224 // New block is the MRU 225 moveToHead(falruBlk); 226 227 // Insert new block in the hash table 228 tagHash[falruBlk->tag] = falruBlk; 229} 230 231void 232FALRU::moveToHead(FALRUBlk *blk) 233{ 234 // If block is not already head, do the moving 235 if (blk != head) { 236 cacheTracking.moveBlockToHead(blk); 237 // If block is tail, set previous block as new tail 238 if (blk == tail){ 239 assert(blk->next == nullptr); 240 tail = blk->prev; 241 tail->next = nullptr; 242 // Inform block's surrounding blocks that it has been moved 243 } else { 244 blk->prev->next = blk->next; 245 blk->next->prev = blk->prev; 246 } 247 248 // Swap pointers 249 blk->next = head; 250 blk->prev = nullptr; 251 head->prev = blk; 252 head = blk; 253 254 cacheTracking.check(head, tail); 255 } 256} 257 258void 259FALRU::moveToTail(FALRUBlk *blk) 260{ 261 // If block is not already tail, do the moving 262 if (blk != tail) { 263 cacheTracking.moveBlockToTail(blk); 264 // If block is head, set next block as new head 265 if (blk == head){ 266 assert(blk->prev == nullptr); 267 head = blk->next; 268 head->prev = nullptr; 269 // Inform block's surrounding blocks that it has been moved 270 } else { 271 blk->prev->next = blk->next; 272 blk->next->prev = blk->prev; 273 } 274 275 // Swap pointers 276 blk->prev = tail; 277 blk->next = nullptr; 278 tail->next = blk; 279 tail = blk; 280 281 cacheTracking.check(head, tail); 282 } 283} 284 285FALRU * 286FALRUParams::create() 287{ 288 return new FALRU(this); 289} 290 291void 292FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail) 293{ 294#ifdef FALRU_DEBUG 295 FALRUBlk* blk = head; 296 unsigned curr_size = 0; 297 unsigned tracked_cache_size = minTrackedSize; 298 CachesMask in_caches_mask = inAllCachesMask; 299 int j = 0; 300 301 while (blk) { 302 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask " 303 "%x found %x", blk->inCachesMask, in_caches_mask); 304 305 curr_size += blkSize; 306 if (curr_size == tracked_cache_size && blk != tail) { 307 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th " 308 "cache", j); 309 tracked_cache_size <<= 1; 310 // from this point, blocks fit only in the larger caches 311 in_caches_mask &= ~(1U << j); 312 ++j; 313 } 314 blk = blk->next; 315 } 316#endif // FALRU_DEBUG 317} 318 319void 320FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail) 321{ 322 // early exit if we are not tracking any extra caches 323 FALRUBlk* blk = numTrackedCaches ? head : nullptr; 324 unsigned curr_size = 0; 325 unsigned tracked_cache_size = minTrackedSize; 326 CachesMask in_caches_mask = inAllCachesMask; 327 int j = 0; 328 329 while (blk) { 330 blk->inCachesMask = in_caches_mask; 331 332 curr_size += blkSize; 333 if (curr_size == tracked_cache_size && blk != tail) { 334 boundaries[j] = blk; 335 336 tracked_cache_size <<= 1; 337 // from this point, blocks fit only in the larger caches 338 in_caches_mask &= ~(1U << j); 339 ++j; 340 } 341 blk = blk->next; 342 } 343} 344 345 346void 347FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk) 348{ 349 // Get the mask of all caches, in which the block didn't fit 350 // before moving it to the head 351 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask; 352 353 for (int i = 0; i < numTrackedCaches; i++) { 354 CachesMask current_cache_mask = 1U << i; 355 if (current_cache_mask & update_caches_mask) { 356 // if the ith cache didn't fit the block (before it is moved to 357 // the head), move the ith boundary 1 block closer to the 358 // MRU 359 boundaries[i]->inCachesMask &= ~current_cache_mask; 360 boundaries[i] = boundaries[i]->prev; 361 } else if (boundaries[i] == blk) { 362 // Make sure the boundary doesn't point to the block 363 // we are about to move 364 boundaries[i] = blk->prev; 365 } 366 } 367 368 // Make block reside in all caches 369 blk->inCachesMask = inAllCachesMask; 370} 371 372void 373FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk) 374{ 375 CachesMask update_caches_mask = blk->inCachesMask; 376 377 for (int i = 0; i < numTrackedCaches; i++) { 378 CachesMask current_cache_mask = 1U << i; 379 if (current_cache_mask & update_caches_mask) { 380 // if the ith cache fitted the block (before it is moved to 381 // the tail), move the ith boundary 1 block closer to the 382 // LRU 383 boundaries[i] = boundaries[i]->next; 384 if (boundaries[i] == blk) { 385 // Make sure the boundary doesn't point to the block 386 // we are about to move 387 boundaries[i] = blk->next; 388 } 389 boundaries[i]->inCachesMask |= current_cache_mask; 390 } 391 } 392 393 // The block now fits only in the actual cache 394 blk->inCachesMask = 0; 395} 396 397void 398FALRU::CacheTracking::recordAccess(FALRUBlk *blk) 399{ 400 for (int i = 0; i < numTrackedCaches; i++) { 401 if (blk && ((1U << i) & blk->inCachesMask)) { 402 hits[i]++; 403 } else { 404 misses[i]++; 405 } 406 } 407 408 // Record stats for the actual cache too 409 if (blk) { 410 hits[numTrackedCaches]++; 411 } else { 412 misses[numTrackedCaches]++; 413 } 414 415 accesses++; 416} 417 418void 419printSize(std::ostream &stream, size_t size) 420{ 421 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" }; 422 int div = 0; 423 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) { 424 div++; 425 size >>= 10; 426 } 427 stream << size << SIZES[div]; 428} 429 430void 431FALRU::CacheTracking::regStats(std::string name) 432{ 433 hits 434 .init(numTrackedCaches + 1) 435 .name(name + ".falru_hits") 436 .desc("The number of hits in each cache size.") 437 ; 438 misses 439 .init(numTrackedCaches + 1) 440 .name(name + ".falru_misses") 441 .desc("The number of misses in each cache size.") 442 ; 443 accesses 444 .name(name + ".falru_accesses") 445 .desc("The number of accesses to the FA LRU cache.") 446 ; 447 448 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) { 449 std::stringstream size_str; 450 printSize(size_str, minTrackedSize << i); 451 hits.subname(i, size_str.str()); 452 hits.subdesc(i, "Hits in a " + size_str.str() + " cache"); 453 misses.subname(i, size_str.str()); 454 misses.subdesc(i, "Misses in a " + size_str.str() + " cache"); 455 } 456}
| 212{ 213 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk); 214 215 // Make sure block is not present in the cache 216 assert(falruBlk->inCachesMask == 0); 217 218 // Do common block insertion functionality 219 BaseTags::insertBlock(pkt, blk); 220 221 // Increment tag counter 222 tagsInUse++; 223 224 // New block is the MRU 225 moveToHead(falruBlk); 226 227 // Insert new block in the hash table 228 tagHash[falruBlk->tag] = falruBlk; 229} 230 231void 232FALRU::moveToHead(FALRUBlk *blk) 233{ 234 // If block is not already head, do the moving 235 if (blk != head) { 236 cacheTracking.moveBlockToHead(blk); 237 // If block is tail, set previous block as new tail 238 if (blk == tail){ 239 assert(blk->next == nullptr); 240 tail = blk->prev; 241 tail->next = nullptr; 242 // Inform block's surrounding blocks that it has been moved 243 } else { 244 blk->prev->next = blk->next; 245 blk->next->prev = blk->prev; 246 } 247 248 // Swap pointers 249 blk->next = head; 250 blk->prev = nullptr; 251 head->prev = blk; 252 head = blk; 253 254 cacheTracking.check(head, tail); 255 } 256} 257 258void 259FALRU::moveToTail(FALRUBlk *blk) 260{ 261 // If block is not already tail, do the moving 262 if (blk != tail) { 263 cacheTracking.moveBlockToTail(blk); 264 // If block is head, set next block as new head 265 if (blk == head){ 266 assert(blk->prev == nullptr); 267 head = blk->next; 268 head->prev = nullptr; 269 // Inform block's surrounding blocks that it has been moved 270 } else { 271 blk->prev->next = blk->next; 272 blk->next->prev = blk->prev; 273 } 274 275 // Swap pointers 276 blk->prev = tail; 277 blk->next = nullptr; 278 tail->next = blk; 279 tail = blk; 280 281 cacheTracking.check(head, tail); 282 } 283} 284 285FALRU * 286FALRUParams::create() 287{ 288 return new FALRU(this); 289} 290 291void 292FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail) 293{ 294#ifdef FALRU_DEBUG 295 FALRUBlk* blk = head; 296 unsigned curr_size = 0; 297 unsigned tracked_cache_size = minTrackedSize; 298 CachesMask in_caches_mask = inAllCachesMask; 299 int j = 0; 300 301 while (blk) { 302 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask " 303 "%x found %x", blk->inCachesMask, in_caches_mask); 304 305 curr_size += blkSize; 306 if (curr_size == tracked_cache_size && blk != tail) { 307 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th " 308 "cache", j); 309 tracked_cache_size <<= 1; 310 // from this point, blocks fit only in the larger caches 311 in_caches_mask &= ~(1U << j); 312 ++j; 313 } 314 blk = blk->next; 315 } 316#endif // FALRU_DEBUG 317} 318 319void 320FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail) 321{ 322 // early exit if we are not tracking any extra caches 323 FALRUBlk* blk = numTrackedCaches ? head : nullptr; 324 unsigned curr_size = 0; 325 unsigned tracked_cache_size = minTrackedSize; 326 CachesMask in_caches_mask = inAllCachesMask; 327 int j = 0; 328 329 while (blk) { 330 blk->inCachesMask = in_caches_mask; 331 332 curr_size += blkSize; 333 if (curr_size == tracked_cache_size && blk != tail) { 334 boundaries[j] = blk; 335 336 tracked_cache_size <<= 1; 337 // from this point, blocks fit only in the larger caches 338 in_caches_mask &= ~(1U << j); 339 ++j; 340 } 341 blk = blk->next; 342 } 343} 344 345 346void 347FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk) 348{ 349 // Get the mask of all caches, in which the block didn't fit 350 // before moving it to the head 351 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask; 352 353 for (int i = 0; i < numTrackedCaches; i++) { 354 CachesMask current_cache_mask = 1U << i; 355 if (current_cache_mask & update_caches_mask) { 356 // if the ith cache didn't fit the block (before it is moved to 357 // the head), move the ith boundary 1 block closer to the 358 // MRU 359 boundaries[i]->inCachesMask &= ~current_cache_mask; 360 boundaries[i] = boundaries[i]->prev; 361 } else if (boundaries[i] == blk) { 362 // Make sure the boundary doesn't point to the block 363 // we are about to move 364 boundaries[i] = blk->prev; 365 } 366 } 367 368 // Make block reside in all caches 369 blk->inCachesMask = inAllCachesMask; 370} 371 372void 373FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk) 374{ 375 CachesMask update_caches_mask = blk->inCachesMask; 376 377 for (int i = 0; i < numTrackedCaches; i++) { 378 CachesMask current_cache_mask = 1U << i; 379 if (current_cache_mask & update_caches_mask) { 380 // if the ith cache fitted the block (before it is moved to 381 // the tail), move the ith boundary 1 block closer to the 382 // LRU 383 boundaries[i] = boundaries[i]->next; 384 if (boundaries[i] == blk) { 385 // Make sure the boundary doesn't point to the block 386 // we are about to move 387 boundaries[i] = blk->next; 388 } 389 boundaries[i]->inCachesMask |= current_cache_mask; 390 } 391 } 392 393 // The block now fits only in the actual cache 394 blk->inCachesMask = 0; 395} 396 397void 398FALRU::CacheTracking::recordAccess(FALRUBlk *blk) 399{ 400 for (int i = 0; i < numTrackedCaches; i++) { 401 if (blk && ((1U << i) & blk->inCachesMask)) { 402 hits[i]++; 403 } else { 404 misses[i]++; 405 } 406 } 407 408 // Record stats for the actual cache too 409 if (blk) { 410 hits[numTrackedCaches]++; 411 } else { 412 misses[numTrackedCaches]++; 413 } 414 415 accesses++; 416} 417 418void 419printSize(std::ostream &stream, size_t size) 420{ 421 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" }; 422 int div = 0; 423 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) { 424 div++; 425 size >>= 10; 426 } 427 stream << size << SIZES[div]; 428} 429 430void 431FALRU::CacheTracking::regStats(std::string name) 432{ 433 hits 434 .init(numTrackedCaches + 1) 435 .name(name + ".falru_hits") 436 .desc("The number of hits in each cache size.") 437 ; 438 misses 439 .init(numTrackedCaches + 1) 440 .name(name + ".falru_misses") 441 .desc("The number of misses in each cache size.") 442 ; 443 accesses 444 .name(name + ".falru_accesses") 445 .desc("The number of accesses to the FA LRU cache.") 446 ; 447 448 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) { 449 std::stringstream size_str; 450 printSize(size_str, minTrackedSize << i); 451 hits.subname(i, size_str.str()); 452 hits.subdesc(i, "Hits in a " + size_str.str() + " cache"); 453 misses.subname(i, size_str.str()); 454 misses.subdesc(i, "Misses in a " + size_str.str() + " cache"); 455 } 456}
|