Searched refs:blkSize (Results 1 - 25 of 35) sorted by relevance

12

/gem5/src/mem/cache/tags/
H A Dbase_set_assoc.cc60 if (blkSize < 4 || !isPowerOf2(blkSize)) {
77 blk->data = &dataBlks[blkSize*blk_index];
H A Dsuper_blk.cc115 return (compressed_size <= (blkSize * 8) / blks.size());
121 assert(blkSize == 0);
122 blkSize = blk_size;
H A Dsuper_blk.hh131 std::size_t blkSize; member in class:SuperBlk
134 SuperBlk() : SectorBlk(), blkSize(0) {}
H A Dfa_lru.cc70 cacheTracking(p->min_tracked_cache_size, size, blkSize)
72 if (!isPowerOf2(blkSize))
74 blkSize);
101 blks[i].data = &dataBlks[blkSize*i];
108 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
306 curr_size += blkSize;
333 curr_size += blkSize;
H A Dcompressed_tags.cc68 superblock->setBlkSize(blkSize);
83 blk->data = &dataBlks[blkSize*blk_index];
H A Dsector_tags.cc56 sectorShift(floorLog2(blkSize)), sectorMask(numBlocksPerSector - 1)
59 fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
93 blk->data = &dataBlks[blkSize*blk_index];
H A Dfa_lru.hh287 : blkSize(block_size),
363 const unsigned blkSize; member in class:FALRU::CacheTracking
/gem5/src/mem/cache/compressors/
H A Dbase.cc76 : SimObject(p), blkSize(p->block_size)
91 uint64_t decomp_data[blkSize/8];
97 fatal_if(std::memcmp(data, decomp_data, blkSize),
110 blkSize*8, comp_size_bits, comp_lat, decomp_lat);
157 .init(std::log2(blkSize*8) + 2)
163 for (unsigned i = 0; i <= std::log2(blkSize*8) + 1; ++i) {
H A Dcpack.cc53 : BaseCacheCompressor(p), dictionarySize(2*blkSize/8)
123 for (std::size_t i = 0; i < blkSize/8; i++) {
150 comp_lat = Cycles(blkSize/8+5);
153 decomp_lat = Cycles(blkSize/8);
198 for (std::size_t i = 0; i < blkSize/8; i++) {
H A Dbase.hh65 const std::size_t blkSize; member in class:BaseCacheCompressor
H A Dbdi.cc122 : BDICompData(UNCOMPRESSED), blkSize(blk_size),
142 size += blkSize*CHAR_BIT;
297 qwordsPerCacheLine(blkSize/BYTES_PER_QWORD)
326 new BDICompDataBaseDelta<TB, TD>(encoding, blkSize));
329 if (temp_data->compress(data, blkSize)) {
365 new BDICompDataUncompressed(data, blkSize));
437 comp_lat = Cycles(blkSize/base_delta_ratio);
/gem5/src/mem/cache/
H A Dqueue_entry.hh117 unsigned blkSize; member in class:QueueEntry
124 inService(false), order(0), blkAddr(0), blkSize(0), isSecure(false)
H A Dwrite_queue_entry.cc95 blkSize = blk_size;
117 assert(target->matchBlockAddr(targets.front().pkt, blkSize));
134 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
158 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
173 prefix, blkAddr, blkAddr + blkSize - 1,
H A Dnoncoherent_cache.cc126 Addr blk_addr = pkt->getBlockAddr(blkSize);
161 PacketPtr pkt = new Packet(cpu_pkt->req, MemCmd::ReadReq, blkSize);
164 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
178 pkt->isWholeLineWrite(blkSize));
249 const int initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
269 transfer_offset = tgt_pkt->getOffset(blkSize) - initial_offset;
271 transfer_offset += blkSize;
H A Dmshr.cc257 blkSize = blk_size;
268 targets.init(blkAddr, blkSize);
269 deferredTargets.init(blkAddr, blkSize);
278 assert(target->matchBlockAddr(targets.front().pkt, blkSize));
448 blkSize, pkt->id);
499 ready_targets.init(blkAddr, blkSize);
644 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
662 prefix, blkAddr, blkAddr + blkSize - 1,
703 return pkt->matchBlockAddr(blkAddr, isSecure, blkSize);
H A Dcache.cc89 assert(pkt->getSize() == blkSize);
308 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
355 Addr blk_addr = pkt->getBlockAddr(blkSize);
540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
587 pkt->isWholeLineWrite(blkSize));
622 } else if (pkt->isWholeLineWrite(blkSize)) {
693 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
764 tgt_pkt->getOffset(blkSize)
[all...]
H A Dmshr.hh199 blkSize = blk_size;
246 auto offset = pkt->getOffset(blkSize);
310 Addr blkSize; member in class:MSHR
/gem5/src/mem/cache/prefetch/
H A Dtagged.cc53 Addr newAddr = blkAddr + d*(blkSize);
H A Daccess_map_pattern_matching.cc40 : ClockedObject(p), blkSize(p->block_size), limitStride(p->limit_stride),
53 AccessMapEntry(hotZoneSize / blkSize)),
162 Addr current_block = (pfi.getAddr() % hotZoneSize) / blkSize;
163 uint64_t lines_per_zone = hotZoneSize / blkSize;
214 pf_addr = (am_addr - 1) * hotZoneSize + blk * blkSize;
220 pf_addr = am_addr * hotZoneSize + blk * blkSize;
238 pf_addr = (am_addr + 1) * hotZoneSize + blk * blkSize;
244 pf_addr = am_addr * hotZoneSize + blk * blkSize;
H A Dsignature_path.cc103 stride_t num_cross_pages = 1 + (-block) / (pageBytes/blkSize);
109 pf_block = block + (pageBytes/blkSize) * num_cross_pages;
112 } else if (block >= (pageBytes/blkSize)) {
113 stride_t num_cross_pages = block / (pageBytes/blkSize);
119 pf_block = block - (pageBytes/blkSize) * num_cross_pages;
128 new_addr += pf_block * (Addr)blkSize;
231 stride_t current_block = (request_addr % pageBytes) / blkSize;
H A Dbase.cc93 : ClockedObject(p), listeners(), cache(nullptr), blkSize(p->block_size),
94 lBlkSize(floorLog2(blkSize)), onMiss(p->on_miss), onRead(p->on_read),
110 blkSize = cache->getBlockSize();
111 lBlkSize = floorLog2(blkSize);
175 return a & ~((Addr)blkSize-1);
H A Dspatio_temporal_memory_streaming.cc46 spatialRegionSize / blkSize)),
52 spatialRegionSize / blkSize)),
73 agt_entry.paddress + seq_entry.offset * blkSize;
133 Addr sr_offset = (pfi.getAddr() % spatialRegionSize) / blkSize;
H A Dbop.cc51 if (!isPowerOf2(blkSize)) {
162 return (addr >> blkSize) & tagMask;
H A Daccess_map_pattern_matching.hh53 const unsigned blkSize; member in class:AccessMapPatternMatching
H A Dstride.cc193 if (abs(new_stride) < blkSize) {
194 prefetch_stride = (new_stride < 0) ? -blkSize : blkSize;

Completed in 38 milliseconds

12