/gem5/src/mem/cache/tags/ |
H A D | base_set_assoc.cc | 60 if (blkSize < 4 || !isPowerOf2(blkSize)) { 77 blk->data = &dataBlks[blkSize*blk_index];
|
H A D | super_blk.cc | 115 return (compressed_size <= (blkSize * 8) / blks.size()); 121 assert(blkSize == 0); 122 blkSize = blk_size;
|
H A D | super_blk.hh | 131 std::size_t blkSize; member in class:SuperBlk 134 SuperBlk() : SectorBlk(), blkSize(0) {}
|
H A D | fa_lru.cc | 70 cacheTracking(p->min_tracked_cache_size, size, blkSize) 72 if (!isPowerOf2(blkSize)) 74 blkSize); 101 blks[i].data = &dataBlks[blkSize*i]; 108 tail->data = &dataBlks[(numBlocks - 1) * blkSize]; 306 curr_size += blkSize; 333 curr_size += blkSize;
|
H A D | compressed_tags.cc | 68 superblock->setBlkSize(blkSize); 83 blk->data = &dataBlks[blkSize*blk_index];
|
H A D | sector_tags.cc | 56 sectorShift(floorLog2(blkSize)), sectorMask(numBlocksPerSector - 1) 59 fatal_if(blkSize < 4 || !isPowerOf2(blkSize), 93 blk->data = &dataBlks[blkSize*blk_index];
|
H A D | fa_lru.hh | 287 : blkSize(block_size), 363 const unsigned blkSize; member in class:FALRU::CacheTracking
|
/gem5/src/mem/cache/compressors/ |
H A D | base.cc | 76 : SimObject(p), blkSize(p->block_size) 91 uint64_t decomp_data[blkSize/8]; 97 fatal_if(std::memcmp(data, decomp_data, blkSize), 110 blkSize*8, comp_size_bits, comp_lat, decomp_lat); 157 .init(std::log2(blkSize*8) + 2) 163 for (unsigned i = 0; i <= std::log2(blkSize*8) + 1; ++i) {
|
H A D | cpack.cc | 53 : BaseCacheCompressor(p), dictionarySize(2*blkSize/8) 123 for (std::size_t i = 0; i < blkSize/8; i++) { 150 comp_lat = Cycles(blkSize/8+5); 153 decomp_lat = Cycles(blkSize/8); 198 for (std::size_t i = 0; i < blkSize/8; i++) {
|
H A D | base.hh | 65 const std::size_t blkSize; member in class:BaseCacheCompressor
|
H A D | bdi.cc | 122 : BDICompData(UNCOMPRESSED), blkSize(blk_size), 142 size += blkSize*CHAR_BIT; 297 qwordsPerCacheLine(blkSize/BYTES_PER_QWORD) 326 new BDICompDataBaseDelta<TB, TD>(encoding, blkSize)); 329 if (temp_data->compress(data, blkSize)) { 365 new BDICompDataUncompressed(data, blkSize)); 437 comp_lat = Cycles(blkSize/base_delta_ratio);
|
/gem5/src/mem/cache/ |
H A D | queue_entry.hh | 117 unsigned blkSize; member in class:QueueEntry 124 inService(false), order(0), blkAddr(0), blkSize(0), isSecure(false)
|
H A D | write_queue_entry.cc | 95 blkSize = blk_size; 117 assert(target->matchBlockAddr(targets.front().pkt, blkSize)); 134 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr); 158 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize); 173 prefix, blkAddr, blkAddr + blkSize - 1,
|
H A D | noncoherent_cache.cc | 126 Addr blk_addr = pkt->getBlockAddr(blkSize); 161 PacketPtr pkt = new Packet(cpu_pkt->req, MemCmd::ReadReq, blkSize); 164 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 178 pkt->isWholeLineWrite(blkSize)); 249 const int initial_offset = mshr->getTarget()->pkt->getOffset(blkSize); 269 transfer_offset = tgt_pkt->getOffset(blkSize) - initial_offset; 271 transfer_offset += blkSize;
|
H A D | mshr.cc | 257 blkSize = blk_size; 268 targets.init(blkAddr, blkSize); 269 deferredTargets.init(blkAddr, blkSize); 278 assert(target->matchBlockAddr(targets.front().pkt, blkSize)); 448 blkSize, pkt->id); 499 ready_targets.init(blkAddr, blkSize); 644 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr); 662 prefix, blkAddr, blkAddr + blkSize - 1, 703 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
|
H A D | cache.cc | 89 assert(pkt->getSize() == blkSize); 308 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) && 355 Addr blk_addr = pkt->getBlockAddr(blkSize); 540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize); 556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize)); 587 pkt->isWholeLineWrite(blkSize)); 622 } else if (pkt->isWholeLineWrite(blkSize)) { 693 const int initial_offset = initial_tgt->pkt->getOffset(blkSize); 764 tgt_pkt->getOffset(blkSize) [all...] |
H A D | mshr.hh | 199 blkSize = blk_size; 246 auto offset = pkt->getOffset(blkSize); 310 Addr blkSize; member in class:MSHR
|
/gem5/src/mem/cache/prefetch/ |
H A D | tagged.cc | 53 Addr newAddr = blkAddr + d*(blkSize);
|
H A D | access_map_pattern_matching.cc | 40 : ClockedObject(p), blkSize(p->block_size), limitStride(p->limit_stride), 53 AccessMapEntry(hotZoneSize / blkSize)), 162 Addr current_block = (pfi.getAddr() % hotZoneSize) / blkSize; 163 uint64_t lines_per_zone = hotZoneSize / blkSize; 214 pf_addr = (am_addr - 1) * hotZoneSize + blk * blkSize; 220 pf_addr = am_addr * hotZoneSize + blk * blkSize; 238 pf_addr = (am_addr + 1) * hotZoneSize + blk * blkSize; 244 pf_addr = am_addr * hotZoneSize + blk * blkSize;
|
H A D | signature_path.cc | 103 stride_t num_cross_pages = 1 + (-block) / (pageBytes/blkSize); 109 pf_block = block + (pageBytes/blkSize) * num_cross_pages; 112 } else if (block >= (pageBytes/blkSize)) { 113 stride_t num_cross_pages = block / (pageBytes/blkSize); 119 pf_block = block - (pageBytes/blkSize) * num_cross_pages; 128 new_addr += pf_block * (Addr)blkSize; 231 stride_t current_block = (request_addr % pageBytes) / blkSize;
|
H A D | base.cc | 93 : ClockedObject(p), listeners(), cache(nullptr), blkSize(p->block_size), 94 lBlkSize(floorLog2(blkSize)), onMiss(p->on_miss), onRead(p->on_read), 110 blkSize = cache->getBlockSize(); 111 lBlkSize = floorLog2(blkSize); 175 return a & ~((Addr)blkSize-1);
|
H A D | spatio_temporal_memory_streaming.cc | 46 spatialRegionSize / blkSize)), 52 spatialRegionSize / blkSize)), 73 agt_entry.paddress + seq_entry.offset * blkSize; 133 Addr sr_offset = (pfi.getAddr() % spatialRegionSize) / blkSize;
|
H A D | bop.cc | 51 if (!isPowerOf2(blkSize)) { 162 return (addr >> blkSize) & tagMask;
|
H A D | access_map_pattern_matching.hh | 53 const unsigned blkSize; member in class:AccessMapPatternMatching
|
H A D | stride.cc | 193 if (abs(new_stride) < blkSize) { 194 prefetch_stride = (new_stride < 0) ? -blkSize : blkSize;
|