fa_lru.cc revision 12676
110244Satgutier@umich.edu/* 210244Satgutier@umich.edu * Copyright (c) 2013,2016-2018 ARM Limited 310244Satgutier@umich.edu * All rights reserved. 410244Satgutier@umich.edu * 510244Satgutier@umich.edu * The license below extends only to copyright in the software and shall 610244Satgutier@umich.edu * not be construed as granting a license to any other intellectual 710244Satgutier@umich.edu * property including but not limited to intellectual property relating 810244Satgutier@umich.edu * to a hardware implementation of the functionality of the software 910244Satgutier@umich.edu * licensed hereunder. You may use the software subject to the license 1010244Satgutier@umich.edu * terms below provided that you ensure that this notice is replicated 1110244Satgutier@umich.edu * unmodified and in its entirety in all distributions of the software, 1210244Satgutier@umich.edu * modified or unmodified, in source code or in binary form. 1310244Satgutier@umich.edu * 1410244Satgutier@umich.edu * Copyright (c) 2003-2005 The Regents of The University of Michigan 1510244Satgutier@umich.edu * All rights reserved. 1610244Satgutier@umich.edu * 1710244Satgutier@umich.edu * Redistribution and use in source and binary forms, with or without 1810244Satgutier@umich.edu * modification, are permitted provided that the following conditions are 1910244Satgutier@umich.edu * met: redistributions of source code must retain the above copyright 2010244Satgutier@umich.edu * notice, this list of conditions and the following disclaimer; 2110244Satgutier@umich.edu * redistributions in binary form must reproduce the above copyright 2210244Satgutier@umich.edu * notice, this list of conditions and the following disclaimer in the 2310244Satgutier@umich.edu * documentation and/or other materials provided with the distribution; 2410244Satgutier@umich.edu * neither the name of the copyright holders nor the names of its 2510244Satgutier@umich.edu * contributors may be used to endorse or promote products derived from 2610244Satgutier@umich.edu * this software without specific prior written permission. 2710244Satgutier@umich.edu * 2810244Satgutier@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2910244Satgutier@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3010244Satgutier@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 3110244Satgutier@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 3210244Satgutier@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3310244Satgutier@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3410244Satgutier@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3510244Satgutier@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3610244Satgutier@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3710244Satgutier@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 3810244Satgutier@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3910785Sgope@wisc.edu * 4010785Sgope@wisc.edu * Authors: Erik Hallnor 4111429Sandreas.sandberg@arm.com * Nikos Nikoleris 4210244Satgutier@umich.edu */ 4310244Satgutier@umich.edu 4410244Satgutier@umich.edu/** 4510244Satgutier@umich.edu * @file 4610244Satgutier@umich.edu * Definitions a fully associative LRU tagstore. 4710244Satgutier@umich.edu */ 4810244Satgutier@umich.edu 4910244Satgutier@umich.edu#include "mem/cache/tags/fa_lru.hh" 5010244Satgutier@umich.edu 5110244Satgutier@umich.edu#include <cassert> 5210244Satgutier@umich.edu#include <sstream> 5310244Satgutier@umich.edu 5410244Satgutier@umich.edu#include "base/intmath.hh" 5510244Satgutier@umich.edu#include "base/logging.hh" 5610244Satgutier@umich.edu 5710244Satgutier@umich.eduFALRU::FALRU(const Params *p) 5810244Satgutier@umich.edu : BaseTags(p), 5910244Satgutier@umich.edu 6010244Satgutier@umich.edu cacheTracking(p->min_tracked_cache_size, size, blkSize) 6110244Satgutier@umich.edu{ 6210244Satgutier@umich.edu if (!isPowerOf2(blkSize)) 6310244Satgutier@umich.edu fatal("cache block size (in bytes) `%d' must be a power of two", 6410244Satgutier@umich.edu blkSize); 6510244Satgutier@umich.edu if (!isPowerOf2(size)) 6610244Satgutier@umich.edu fatal("Cache Size must be power of 2 for now"); 6710244Satgutier@umich.edu 6810244Satgutier@umich.edu blks = new FALRUBlk[numBlocks]; 6910244Satgutier@umich.edu 7010244Satgutier@umich.edu head = &(blks[0]); 7110244Satgutier@umich.edu head->prev = nullptr; 7210244Satgutier@umich.edu head->next = &(blks[1]); 7310244Satgutier@umich.edu head->set = 0; 7410244Satgutier@umich.edu head->way = 0; 7510244Satgutier@umich.edu head->data = &dataBlks[0]; 7610244Satgutier@umich.edu 7710244Satgutier@umich.edu for (unsigned i = 1; i < numBlocks - 1; i++) { 7810244Satgutier@umich.edu blks[i].prev = &(blks[i-1]); 7910244Satgutier@umich.edu blks[i].next = &(blks[i+1]); 8011429Sandreas.sandberg@arm.com blks[i].set = 0; 8110244Satgutier@umich.edu blks[i].way = i; 8210244Satgutier@umich.edu 8311429Sandreas.sandberg@arm.com // Associate a data chunk to the block 8410244Satgutier@umich.edu blks[i].data = &dataBlks[blkSize*i]; 8510244Satgutier@umich.edu } 8610244Satgutier@umich.edu 8710244Satgutier@umich.edu tail = &(blks[numBlocks - 1]); 8810244Satgutier@umich.edu tail->prev = &(blks[numBlocks - 2]); 8911429Sandreas.sandberg@arm.com tail->next = nullptr; 9010244Satgutier@umich.edu tail->set = 0; 9110244Satgutier@umich.edu tail->way = numBlocks - 1; 9210244Satgutier@umich.edu tail->data = &dataBlks[(numBlocks - 1) * blkSize]; 9311429Sandreas.sandberg@arm.com 9410244Satgutier@umich.edu cacheTracking.init(head, tail); 9510244Satgutier@umich.edu} 9611429Sandreas.sandberg@arm.com 9710244Satgutier@umich.eduFALRU::~FALRU() 9810244Satgutier@umich.edu{ 9910244Satgutier@umich.edu delete[] blks; 10010244Satgutier@umich.edu} 10110244Satgutier@umich.edu 10210244Satgutier@umich.eduvoid 10310244Satgutier@umich.eduFALRU::regStats() 10410244Satgutier@umich.edu{ 10510244Satgutier@umich.edu BaseTags::regStats(); 10610244Satgutier@umich.edu cacheTracking.regStats(name()); 10710244Satgutier@umich.edu} 10810244Satgutier@umich.edu 10910244Satgutier@umich.eduFALRUBlk * 11010244Satgutier@umich.eduFALRU::hashLookup(Addr addr) const 11111429Sandreas.sandberg@arm.com{ 11210244Satgutier@umich.edu tagIterator iter = tagHash.find(addr); 11310244Satgutier@umich.edu if (iter != tagHash.end()) { 11410244Satgutier@umich.edu return (*iter).second; 11510244Satgutier@umich.edu } 11611429Sandreas.sandberg@arm.com return nullptr; 11710244Satgutier@umich.edu} 11810244Satgutier@umich.edu 11910244Satgutier@umich.eduvoid 12010244Satgutier@umich.eduFALRU::invalidate(CacheBlk *blk) 12110244Satgutier@umich.edu{ 12210244Satgutier@umich.edu BaseTags::invalidate(blk); 12310244Satgutier@umich.edu 12410244Satgutier@umich.edu // Move the block to the tail to make it the next victim 12510244Satgutier@umich.edu moveToTail((FALRUBlk*)blk); 12610244Satgutier@umich.edu 12710244Satgutier@umich.edu // Erase block entry in the hash table 12810244Satgutier@umich.edu tagHash.erase(blk->tag); 12910244Satgutier@umich.edu} 13010244Satgutier@umich.edu 13111429Sandreas.sandberg@arm.comCacheBlk* 13210244Satgutier@umich.eduFALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat) 13310244Satgutier@umich.edu{ 13410244Satgutier@umich.edu return accessBlock(addr, is_secure, lat, 0); 13510244Satgutier@umich.edu} 13610244Satgutier@umich.edu 13710244Satgutier@umich.eduCacheBlk* 13810244Satgutier@umich.eduFALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, 13910244Satgutier@umich.edu CachesMask *in_caches_mask) 14010244Satgutier@umich.edu{ 14110244Satgutier@umich.edu CachesMask mask = 0; 14210244Satgutier@umich.edu Addr blkAddr = blkAlign(addr); 14310244Satgutier@umich.edu FALRUBlk* blk = hashLookup(blkAddr); 14411429Sandreas.sandberg@arm.com 14510244Satgutier@umich.edu if (blk && blk->isValid()) { 14610244Satgutier@umich.edu // If a cache hit 14710244Satgutier@umich.edu lat = accessLatency; 14810244Satgutier@umich.edu // Check if the block to be accessed is available. If not, 14910244Satgutier@umich.edu // apply the accessLatency on top of block->whenReady. 15011429Sandreas.sandberg@arm.com if (blk->whenReady > curTick() && 15110244Satgutier@umich.edu cache->ticksToCycles(blk->whenReady - curTick()) > 15211429Sandreas.sandberg@arm.com accessLatency) { 15310244Satgutier@umich.edu lat = cache->ticksToCycles(blk->whenReady - curTick()) + 15410244Satgutier@umich.edu accessLatency; 15510244Satgutier@umich.edu } 15610244Satgutier@umich.edu assert(blk->tag == blkAddr); 15710244Satgutier@umich.edu mask = blk->inCachesMask; 15810244Satgutier@umich.edu moveToHead(blk); 15910244Satgutier@umich.edu } else { 16010244Satgutier@umich.edu // If a cache miss 16110244Satgutier@umich.edu lat = lookupLatency; 16211429Sandreas.sandberg@arm.com blk = nullptr; 16310244Satgutier@umich.edu } 16410244Satgutier@umich.edu if (in_caches_mask) { 16510244Satgutier@umich.edu *in_caches_mask = mask; 16610244Satgutier@umich.edu } 16710244Satgutier@umich.edu 16810244Satgutier@umich.edu cacheTracking.recordAccess(blk); 16910244Satgutier@umich.edu 17010335Sdam.sunwoo@arm.com return blk; 17110244Satgutier@umich.edu} 17210244Satgutier@umich.edu 17310244Satgutier@umich.edu 17410244Satgutier@umich.eduCacheBlk* 17510244Satgutier@umich.eduFALRU::findBlock(Addr addr, bool is_secure) const 17610244Satgutier@umich.edu{ 17710244Satgutier@umich.edu Addr blkAddr = blkAlign(addr); 17810244Satgutier@umich.edu FALRUBlk* blk = hashLookup(blkAddr); 17910244Satgutier@umich.edu 18010244Satgutier@umich.edu if (blk && blk->isValid()) { 18110244Satgutier@umich.edu assert(blk->tag == blkAddr); 18210244Satgutier@umich.edu assert(blk->isSecure() == is_secure); 18310244Satgutier@umich.edu } else { 18410244Satgutier@umich.edu blk = nullptr; 18510244Satgutier@umich.edu } 18610244Satgutier@umich.edu return blk; 18710244Satgutier@umich.edu} 18810244Satgutier@umich.edu 18910244Satgutier@umich.eduCacheBlk* 19010244Satgutier@umich.eduFALRU::findBlockBySetAndWay(int set, int way) const 19110244Satgutier@umich.edu{ 19210244Satgutier@umich.edu assert(set == 0); 19310244Satgutier@umich.edu return &blks[way]; 19410244Satgutier@umich.edu} 19510244Satgutier@umich.edu 19610244Satgutier@umich.eduCacheBlk* 19710244Satgutier@umich.eduFALRU::findVictim(Addr addr) 19810244Satgutier@umich.edu{ 19910244Satgutier@umich.edu return tail; 20010244Satgutier@umich.edu} 20110244Satgutier@umich.edu 20210244Satgutier@umich.eduvoid 20310244Satgutier@umich.eduFALRU::insertBlock(PacketPtr pkt, CacheBlk *blk) 20410244Satgutier@umich.edu{ 20510244Satgutier@umich.edu FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk); 20610244Satgutier@umich.edu 20710244Satgutier@umich.edu // Make sure block is not present in the cache 20810244Satgutier@umich.edu assert(falruBlk->inCachesMask == 0); 20910244Satgutier@umich.edu 21010244Satgutier@umich.edu // Do common block insertion functionality 21110244Satgutier@umich.edu BaseTags::insertBlock(pkt, blk); 21210244Satgutier@umich.edu 21310244Satgutier@umich.edu // New block is the MRU 21410244Satgutier@umich.edu moveToHead(falruBlk); 21510244Satgutier@umich.edu 21610244Satgutier@umich.edu // Insert new block in the hash table 21710244Satgutier@umich.edu tagHash[falruBlk->tag] = falruBlk; 21810244Satgutier@umich.edu} 21910244Satgutier@umich.edu 22010244Satgutier@umich.eduvoid 22111429Sandreas.sandberg@arm.comFALRU::moveToHead(FALRUBlk *blk) 22210244Satgutier@umich.edu{ 22311429Sandreas.sandberg@arm.com // If block is not already head, do the moving 22410244Satgutier@umich.edu if (blk != head) { 22511429Sandreas.sandberg@arm.com cacheTracking.moveBlockToHead(blk); 22610244Satgutier@umich.edu // If block is tail, set previous block as new tail 22710244Satgutier@umich.edu if (blk == tail){ 22810244Satgutier@umich.edu assert(blk->next == nullptr); 22910244Satgutier@umich.edu tail = blk->prev; 23010244Satgutier@umich.edu tail->next = nullptr; 23110244Satgutier@umich.edu // Inform block's surrounding blocks that it has been moved 23210244Satgutier@umich.edu } else { 23311429Sandreas.sandberg@arm.com blk->prev->next = blk->next; 23410244Satgutier@umich.edu blk->next->prev = blk->prev; 23510244Satgutier@umich.edu } 23610244Satgutier@umich.edu 23710244Satgutier@umich.edu // Swap pointers 23810244Satgutier@umich.edu blk->next = head; 23911429Sandreas.sandberg@arm.com blk->prev = nullptr; 24011429Sandreas.sandberg@arm.com head->prev = blk; 24111426Smitch.hayenga@arm.com head = blk; 24211429Sandreas.sandberg@arm.com 24311429Sandreas.sandberg@arm.com cacheTracking.check(head, tail); 24411429Sandreas.sandberg@arm.com } 24510244Satgutier@umich.edu} 24610785Sgope@wisc.edu 24710785Sgope@wisc.eduvoid 24810785Sgope@wisc.eduFALRU::moveToTail(FALRUBlk *blk) 24910785Sgope@wisc.edu{ 25010785Sgope@wisc.edu // If block is not already tail, do the moving 25110785Sgope@wisc.edu if (blk != tail) { 252 cacheTracking.moveBlockToTail(blk); 253 // If block is head, set next block as new head 254 if (blk == head){ 255 assert(blk->prev == nullptr); 256 head = blk->next; 257 head->prev = nullptr; 258 // Inform block's surrounding blocks that it has been moved 259 } else { 260 blk->prev->next = blk->next; 261 blk->next->prev = blk->prev; 262 } 263 264 // Swap pointers 265 blk->prev = tail; 266 blk->next = nullptr; 267 tail->next = blk; 268 tail = blk; 269 270 cacheTracking.check(head, tail); 271 } 272} 273 274FALRU * 275FALRUParams::create() 276{ 277 return new FALRU(this); 278} 279 280void 281FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail) 282{ 283#ifdef FALRU_DEBUG 284 FALRUBlk* blk = head; 285 unsigned curr_size = 0; 286 unsigned tracked_cache_size = minTrackedSize; 287 CachesMask in_caches_mask = inAllCachesMask; 288 int j = 0; 289 290 while (blk) { 291 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask " 292 "%x found %x", blk->inCachesMask, in_caches_mask); 293 294 curr_size += blkSize; 295 if (curr_size == tracked_cache_size && blk != tail) { 296 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th " 297 "cache", j); 298 tracked_cache_size <<= 1; 299 // from this point, blocks fit only in the larger caches 300 in_caches_mask &= ~(1U << j); 301 ++j; 302 } 303 blk = blk->next; 304 } 305#endif // FALRU_DEBUG 306} 307 308void 309FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail) 310{ 311 // early exit if we are not tracking any extra caches 312 FALRUBlk* blk = numTrackedCaches ? head : nullptr; 313 unsigned curr_size = 0; 314 unsigned tracked_cache_size = minTrackedSize; 315 CachesMask in_caches_mask = inAllCachesMask; 316 int j = 0; 317 318 while (blk) { 319 blk->inCachesMask = in_caches_mask; 320 321 curr_size += blkSize; 322 if (curr_size == tracked_cache_size && blk != tail) { 323 boundaries[j] = blk; 324 325 tracked_cache_size <<= 1; 326 // from this point, blocks fit only in the larger caches 327 in_caches_mask &= ~(1U << j); 328 ++j; 329 } 330 blk = blk->next; 331 } 332} 333 334 335void 336FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk) 337{ 338 // Get the mask of all caches, in which the block didn't fit 339 // before moving it to the head 340 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask; 341 342 for (int i = 0; i < numTrackedCaches; i++) { 343 CachesMask current_cache_mask = 1U << i; 344 if (current_cache_mask & update_caches_mask) { 345 // if the ith cache didn't fit the block (before it is moved to 346 // the head), move the ith boundary 1 block closer to the 347 // MRU 348 boundaries[i]->inCachesMask &= ~current_cache_mask; 349 boundaries[i] = boundaries[i]->prev; 350 } else if (boundaries[i] == blk) { 351 // Make sure the boundary doesn't point to the block 352 // we are about to move 353 boundaries[i] = blk->prev; 354 } 355 } 356 357 // Make block reside in all caches 358 blk->inCachesMask = inAllCachesMask; 359} 360 361void 362FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk) 363{ 364 CachesMask update_caches_mask = blk->inCachesMask; 365 366 for (int i = 0; i < numTrackedCaches; i++) { 367 CachesMask current_cache_mask = 1U << i; 368 if (current_cache_mask & update_caches_mask) { 369 // if the ith cache fitted the block (before it is moved to 370 // the tail), move the ith boundary 1 block closer to the 371 // LRU 372 boundaries[i] = boundaries[i]->next; 373 if (boundaries[i] == blk) { 374 // Make sure the boundary doesn't point to the block 375 // we are about to move 376 boundaries[i] = blk->next; 377 } 378 boundaries[i]->inCachesMask |= current_cache_mask; 379 } 380 } 381 382 // The block now fits only in the actual cache 383 blk->inCachesMask = 0; 384} 385 386void 387FALRU::CacheTracking::recordAccess(FALRUBlk *blk) 388{ 389 for (int i = 0; i < numTrackedCaches; i++) { 390 if (blk && ((1U << i) & blk->inCachesMask)) { 391 hits[i]++; 392 } else { 393 misses[i]++; 394 } 395 } 396 397 // Record stats for the actual cache too 398 if (blk) { 399 hits[numTrackedCaches]++; 400 } else { 401 misses[numTrackedCaches]++; 402 } 403 404 accesses++; 405} 406 407void 408printSize(std::ostream &stream, size_t size) 409{ 410 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" }; 411 int div = 0; 412 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) { 413 div++; 414 size >>= 10; 415 } 416 stream << size << SIZES[div]; 417} 418 419void 420FALRU::CacheTracking::regStats(std::string name) 421{ 422 hits 423 .init(numTrackedCaches + 1) 424 .name(name + ".falru_hits") 425 .desc("The number of hits in each cache size.") 426 ; 427 misses 428 .init(numTrackedCaches + 1) 429 .name(name + ".falru_misses") 430 .desc("The number of misses in each cache size.") 431 ; 432 accesses 433 .name(name + ".falru_accesses") 434 .desc("The number of accesses to the FA LRU cache.") 435 ; 436 437 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) { 438 std::stringstream size_str; 439 printSize(size_str, minTrackedSize << i); 440 hits.subname(i, size_str.str()); 441 hits.subdesc(i, "Hits in a " + size_str.str() + " cache"); 442 misses.subname(i, size_str.str()); 443 misses.subdesc(i, "Misses in a " + size_str.str() + " cache"); 444 } 445} 446