fa_lru.hh revision 13216
1/* 2 * Copyright (c) 2012-2013,2016,2018 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2003-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Erik Hallnor 41 * Nikos Nikoleris 42 */ 43 44/** 45 * @file 46 * Declaration of a fully associative LRU tag store. 47 */ 48 49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__ 50#define __MEM_CACHE_TAGS_FA_LRU_HH__ 51 52#include <cstdint> 53#include <functional> 54#include <string> 55#include <unordered_map> 56#include <vector> 57 58#include "base/bitfield.hh" 59#include "base/intmath.hh" 60#include "base/logging.hh" 61#include "base/statistics.hh" 62#include "base/types.hh" 63#include "mem/cache/blk.hh" 64#include "mem/cache/tags/base.hh" 65#include "params/FALRU.hh" 66 67// Uncomment to enable sanity checks for the FALRU cache and the 68// TrackedCaches class 69//#define FALRU_DEBUG 70 71class BaseCache; 72class ReplaceableEntry; 73 74// A bitmask of the caches we are keeping track of. Currently the 75// lowest bit is the smallest cache we are tracking, as it is 76// specified by the corresponding parameter. The rest of the bits are 77// for exponentially growing cache sizes. 78typedef uint32_t CachesMask; 79 80/** 81 * A fully associative cache block. 82 */ 83class FALRUBlk : public CacheBlk 84{ 85 public: 86 FALRUBlk() : CacheBlk(), prev(nullptr), next(nullptr), inCachesMask(0) {} 87 88 /** The previous block in LRU order. */ 89 FALRUBlk *prev; 90 /** The next block in LRU order. */ 91 FALRUBlk *next; 92 93 /** A bit mask of the caches that fit this block. */ 94 CachesMask inCachesMask; 95}; 96 97/** 98 * A fully associative LRU cache. Keeps statistics for accesses to a number of 99 * cache sizes at once. 100 */ 101class FALRU : public BaseTags 102{ 103 public: 104 /** Typedef the block type used in this class. */ 105 typedef FALRUBlk BlkType; 106 107 protected: 108 /** The cache blocks. */ 109 FALRUBlk *blks; 110 111 /** The MRU block. */ 112 FALRUBlk *head; 113 /** The LRU block. */ 114 FALRUBlk *tail; 115 116 /** Hash table type mapping addresses to cache block pointers. */ 117 struct PairHash 118 { 119 template <class T1, class T2> 120 std::size_t operator()(const std::pair<T1, T2> &p) const 121 { 122 return std::hash<T1>()(p.first) ^ std::hash<T2>()(p.second); 123 } 124 }; 125 typedef std::pair<Addr, bool> TagHashKey; 126 typedef std::unordered_map<TagHashKey, FALRUBlk *, PairHash> TagHash; 127 128 /** The address hash table. */ 129 TagHash tagHash; 130 131 /** 132 * Move a cache block to the MRU position. 133 * 134 * @param blk The block to promote. 135 */ 136 void moveToHead(FALRUBlk *blk); 137 138 /** 139 * Move a cache block to the LRU position. 140 * 141 * @param blk The block to demote. 142 */ 143 void moveToTail(FALRUBlk *blk); 144 145 public: 146 typedef FALRUParams Params; 147 148 /** 149 * Construct and initialize this cache tagstore. 150 */ 151 FALRU(const Params *p); 152 ~FALRU(); 153 154 /** 155 * Initialize blocks and set the parent cache back pointer. 156 * 157 * @param _cache Pointer to parent cache. 158 */ 159 void init(BaseCache *_cache) override; 160 161 /** 162 * Register the stats for this object. 163 */ 164 void regStats() override; 165 166 /** 167 * Invalidate a cache block. 168 * @param blk The block to invalidate. 169 */ 170 void invalidate(CacheBlk *blk) override; 171 172 /** 173 * Access block and update replacement data. May not succeed, in which 174 * case nullptr pointer is returned. This has all the implications of a 175 * cache access and should only be used as such. 176 * Returns the access latency and inCachesMask flags as a side effect. 177 * @param addr The address to look for. 178 * @param is_secure True if the target memory space is secure. 179 * @param lat The latency of the access. 180 * @param in_cache_mask Mask indicating the caches in which the blk fits. 181 * @return Pointer to the cache block. 182 */ 183 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat, 184 CachesMask *in_cache_mask); 185 186 /** 187 * Just a wrapper of above function to conform with the base interface. 188 */ 189 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override; 190 191 /** 192 * Find the block in the cache, do not update the replacement data. 193 * @param addr The address to look for. 194 * @param is_secure True if the target memory space is secure. 195 * @param asid The address space ID. 196 * @return Pointer to the cache block. 197 */ 198 CacheBlk* findBlock(Addr addr, bool is_secure) const override; 199 200 /** 201 * Find a block given set and way. 202 * 203 * @param set The set of the block. 204 * @param way The way of the block. 205 * @return The block. 206 */ 207 ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override; 208 209 /** 210 * Find replacement victim based on address. The list of evicted blocks 211 * only contains the victim. 212 * 213 * @param addr Address to find a victim for. 214 * @param is_secure True if the target memory space is secure. 215 * @param evict_blks Cache blocks to be evicted. 216 * @return Cache block to be replaced. 217 */ 218 CacheBlk* findVictim(Addr addr, const bool is_secure, 219 std::vector<CacheBlk*>& evict_blks) const override; 220 221 /** 222 * Insert the new block into the cache and update replacement data. 223 * 224 * @param addr Address of the block. 225 * @param is_secure Whether the block is in secure space or not. 226 * @param src_master_ID The source requestor ID. 227 * @param task_ID The new task ID. 228 * @param blk The block to update. 229 */ 230 void insertBlock(const Addr addr, const bool is_secure, 231 const int src_master_ID, const uint32_t task_ID, 232 CacheBlk *blk) override; 233 234 /** 235 * Generate the tag from the addres. For fully associative this is just the 236 * block address. 237 * @param addr The address to get the tag from. 238 * @return The tag. 239 */ 240 Addr extractTag(Addr addr) const override 241 { 242 return blkAlign(addr); 243 } 244 245 /** 246 * Regenerate the block address from the tag. 247 * 248 * @param block The block. 249 * @return the block address. 250 */ 251 Addr regenerateBlkAddr(const CacheBlk* blk) const override 252 { 253 return blk->tag; 254 } 255 256 void forEachBlk(std::function<void(CacheBlk &)> visitor) override { 257 for (int i = 0; i < numBlocks; i++) { 258 visitor(blks[i]); 259 } 260 } 261 262 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override { 263 for (int i = 0; i < numBlocks; i++) { 264 if (visitor(blks[i])) { 265 return true; 266 } 267 } 268 return false; 269 } 270 271 private: 272 /** 273 * Mechanism that allows us to simultaneously collect miss 274 * statistics for multiple caches. Currently, we keep track of 275 * caches from a set minimum size of interest up to the actual 276 * cache size. 277 */ 278 class CacheTracking 279 { 280 public: 281 CacheTracking(unsigned min_size, unsigned max_size, 282 unsigned block_size) 283 : blkSize(block_size), 284 minTrackedSize(min_size), 285 numTrackedCaches(max_size > min_size ? 286 floorLog2(max_size) - floorLog2(min_size) : 0), 287 inAllCachesMask(mask(numTrackedCaches)), 288 boundaries(numTrackedCaches) 289 { 290 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8, 291 "Not enough bits (%s) in type CachesMask type to keep " 292 "track of %d caches\n", sizeof(CachesMask), 293 numTrackedCaches); 294 } 295 296 /** 297 * Initialiaze cache blocks and the tracking mechanism 298 * 299 * All blocks in the cache need to be initialized once. 300 * 301 * @param blk the MRU block 302 * @param blk the LRU block 303 */ 304 void init(FALRUBlk *head, FALRUBlk *tail); 305 306 /** 307 * Update boundaries as a block will be moved to the MRU. 308 * 309 * For all caches that didn't fit the block before moving it, 310 * we move their boundaries one block closer to the MRU. We 311 * also update InCacheMasks as neccessary. 312 * 313 * @param blk the block that will be moved to the head 314 */ 315 void moveBlockToHead(FALRUBlk *blk); 316 317 /** 318 * Update boundaries as a block will be moved to the LRU. 319 * 320 * For all caches that fitted the block before moving it, we 321 * move their boundaries one block closer to the LRU. We 322 * also update InCacheMasks as neccessary. 323 * 324 * @param blk the block that will be moved to the head 325 */ 326 void moveBlockToTail(FALRUBlk *blk); 327 328 /** 329 * Notify of a block access. 330 * 331 * This should be called every time a block is accessed and it 332 * updates statistics. If the input block is nullptr then we 333 * treat the access as a miss. The block's InCacheMask 334 * determines the caches in which the block fits. 335 * 336 * @param blk the block to record the access for 337 */ 338 void recordAccess(FALRUBlk *blk); 339 340 /** 341 * Check that the tracking mechanism is in consistent state. 342 * 343 * Iterate from the head (MRU) to the tail (LRU) of the list 344 * of blocks and assert the inCachesMask and the boundaries 345 * are in consistent state. 346 * 347 * @param head the MRU block of the actual cache 348 * @param head the LRU block of the actual cache 349 */ 350 void check(const FALRUBlk *head, const FALRUBlk *tail) const; 351 352 /** 353 * Register the stats for this object. 354 */ 355 void regStats(std::string name); 356 357 private: 358 /** The size of the cache block */ 359 const unsigned blkSize; 360 /** The smallest cache we are tracking */ 361 const unsigned minTrackedSize; 362 /** The number of different size caches being tracked. */ 363 const int numTrackedCaches; 364 /** A mask for all cache being tracked. */ 365 const CachesMask inAllCachesMask; 366 /** Array of pointers to blocks at the cache boundaries. */ 367 std::vector<FALRUBlk*> boundaries; 368 369 protected: 370 /** 371 * @defgroup FALRUStats Fully Associative LRU specific statistics 372 * The FA lru stack lets us track multiple cache sizes at once. These 373 * statistics track the hits and misses for different cache sizes. 374 * @{ 375 */ 376 377 /** Hits in each cache */ 378 Stats::Vector hits; 379 /** Misses in each cache */ 380 Stats::Vector misses; 381 /** Total number of accesses */ 382 Stats::Scalar accesses; 383 384 /** 385 * @} 386 */ 387 }; 388 CacheTracking cacheTracking; 389}; 390 391#endif // __MEM_CACHE_TAGS_FA_LRU_HH__ 392