sector_tags.cc (13216:6ae030076b29) sector_tags.cc (13217:725b1701b4ee)
1/*
2 * Copyright (c) 2018 Inria
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Daniel Carvalho
29 */
30
31/**
32 * @file
33 * Definitions of a base set associative sector tag store.
34 */
35
36#include "mem/cache/tags/sector_tags.hh"
37
38#include <cassert>
39#include <memory>
40#include <string>
41
42#include "base/intmath.hh"
43#include "base/logging.hh"
44#include "base/types.hh"
45#include "debug/CacheRepl.hh"
46#include "mem/cache/base.hh"
47#include "mem/cache/replacement_policies/base.hh"
48
49SectorTags::SectorTags(const SectorTagsParams *p)
50 : BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
51 sequentialAccess(p->sequential_access),
52 replacementPolicy(p->replacement_policy),
53 numBlocksPerSector(p->num_blocks_per_sector),
54 numSectors(numBlocks / p->num_blocks_per_sector),
55 numSets(numSectors / p->assoc),
56 blks(numBlocks), secBlks(numSectors), sets(numSets),
57 sectorShift(floorLog2(blkSize)),
58 setShift(sectorShift + floorLog2(numBlocksPerSector)),
59 tagShift(setShift + floorLog2(numSets)),
60 sectorMask(numBlocksPerSector - 1), setMask(numSets - 1)
61{
62 // Check parameters
63 fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
64 "Block size must be at least 4 and a power of 2");
65 fatal_if(!isPowerOf2(numSets),
66 "# of sets must be non-zero and a power of 2");
67 fatal_if(!isPowerOf2(numBlocksPerSector),
68 "# of blocks per sector must be non-zero and a power of 2");
69 fatal_if(assoc <= 0, "associativity must be greater than zero");
70}
71
72void
73SectorTags::init(BaseCache* cache)
74{
75 // Set parent cache
76 setCache(cache);
77
78 // Initialize all sets
79 unsigned sec_blk_index = 0; // index into sector blks array
80 unsigned blk_index = 0; // index into blks array
81 for (unsigned i = 0; i < numSets; ++i) {
82 sets[i].resize(assoc);
83
84 // Initialize all sectors in this set
85 for (unsigned j = 0; j < assoc; ++j) {
86 // Select block within the set to be linked
87 SectorBlk*& sec_blk = sets[i][j];
88
89 // Locate next cache sector
90 sec_blk = &secBlks[sec_blk_index];
91
92 // Associate a replacement data entry to the sector
93 sec_blk->replacementData = replacementPolicy->instantiateEntry();
94
95 // Initialize all blocks in this sector
96 sec_blk->blks.resize(numBlocksPerSector);
97 for (unsigned k = 0; k < numBlocksPerSector; ++k){
98 // Select block within the set to be linked
99 SectorSubBlk*& blk = sec_blk->blks[k];
100
101 // Locate next cache block
102 blk = &blks[blk_index];
103
104 // Associate a data chunk to the block
105 blk->data = &dataBlks[blkSize*blk_index];
106
107 // Associate sector block to this block
108 blk->setSectorBlock(sec_blk);
109
110 // Associate the sector replacement data to this block
111 blk->replacementData = sec_blk->replacementData;
112
113 // Set its set, way and sector offset
114 blk->set = i;
115 blk->way = j;
116 blk->setSectorOffset(k);
117
118 // Update block index
119 ++blk_index;
120 }
121
122 // Update sector block index
123 ++sec_blk_index;
124 }
125 }
126}
127
128void
129SectorTags::invalidate(CacheBlk *blk)
130{
131 BaseTags::invalidate(blk);
132
133 // Get block's sector
134 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
135 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
136
137 // When a block in a sector is invalidated, it does not make the tag
138 // invalid automatically, as there might be other blocks in the sector
139 // using it. The tag is invalidated only when there is a single block
140 // in the sector.
141 if (!sector_blk->isValid()) {
142 // Decrease the number of tags in use
143 tagsInUse--;
144
145 // Invalidate replacement data, as we're invalidating the sector
146 replacementPolicy->invalidate(sector_blk->replacementData);
147 }
148}
149
150CacheBlk*
151SectorTags::accessBlock(Addr addr, bool is_secure, Cycles &lat)
152{
153 CacheBlk *blk = findBlock(addr, is_secure);
154
155 // Access all tags in parallel, hence one in each way. The data side
156 // either accesses all blocks in parallel, or one block sequentially on
157 // a hit. Sequential access with a miss doesn't access data.
158 tagAccesses += allocAssoc;
159 if (sequentialAccess) {
160 if (blk != nullptr) {
161 dataAccesses += 1;
162 }
163 } else {
164 dataAccesses += allocAssoc*numBlocksPerSector;
165 }
166
167 if (blk != nullptr) {
168 // If a cache hit
169 lat = accessLatency;
170 // Check if the block to be accessed is available. If not,
171 // apply the accessLatency on top of block->whenReady.
172 if (blk->whenReady > curTick() &&
173 cache->ticksToCycles(blk->whenReady - curTick()) >
174 accessLatency) {
175 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
176 accessLatency;
177 }
178
179 // Update number of references to accessed block
180 blk->refCount++;
181
182 // Get block's sector
183 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
184 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
185
186 // Update replacement data of accessed block, which is shared with
187 // the whole sector it belongs to
188 replacementPolicy->touch(sector_blk->replacementData);
189 } else {
190 // If a cache miss
191 lat = lookupLatency;
192 }
193
194 return blk;
195}
196
1/*
2 * Copyright (c) 2018 Inria
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Daniel Carvalho
29 */
30
31/**
32 * @file
33 * Definitions of a base set associative sector tag store.
34 */
35
36#include "mem/cache/tags/sector_tags.hh"
37
38#include <cassert>
39#include <memory>
40#include <string>
41
42#include "base/intmath.hh"
43#include "base/logging.hh"
44#include "base/types.hh"
45#include "debug/CacheRepl.hh"
46#include "mem/cache/base.hh"
47#include "mem/cache/replacement_policies/base.hh"
48
49SectorTags::SectorTags(const SectorTagsParams *p)
50 : BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
51 sequentialAccess(p->sequential_access),
52 replacementPolicy(p->replacement_policy),
53 numBlocksPerSector(p->num_blocks_per_sector),
54 numSectors(numBlocks / p->num_blocks_per_sector),
55 numSets(numSectors / p->assoc),
56 blks(numBlocks), secBlks(numSectors), sets(numSets),
57 sectorShift(floorLog2(blkSize)),
58 setShift(sectorShift + floorLog2(numBlocksPerSector)),
59 tagShift(setShift + floorLog2(numSets)),
60 sectorMask(numBlocksPerSector - 1), setMask(numSets - 1)
61{
62 // Check parameters
63 fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
64 "Block size must be at least 4 and a power of 2");
65 fatal_if(!isPowerOf2(numSets),
66 "# of sets must be non-zero and a power of 2");
67 fatal_if(!isPowerOf2(numBlocksPerSector),
68 "# of blocks per sector must be non-zero and a power of 2");
69 fatal_if(assoc <= 0, "associativity must be greater than zero");
70}
71
72void
73SectorTags::init(BaseCache* cache)
74{
75 // Set parent cache
76 setCache(cache);
77
78 // Initialize all sets
79 unsigned sec_blk_index = 0; // index into sector blks array
80 unsigned blk_index = 0; // index into blks array
81 for (unsigned i = 0; i < numSets; ++i) {
82 sets[i].resize(assoc);
83
84 // Initialize all sectors in this set
85 for (unsigned j = 0; j < assoc; ++j) {
86 // Select block within the set to be linked
87 SectorBlk*& sec_blk = sets[i][j];
88
89 // Locate next cache sector
90 sec_blk = &secBlks[sec_blk_index];
91
92 // Associate a replacement data entry to the sector
93 sec_blk->replacementData = replacementPolicy->instantiateEntry();
94
95 // Initialize all blocks in this sector
96 sec_blk->blks.resize(numBlocksPerSector);
97 for (unsigned k = 0; k < numBlocksPerSector; ++k){
98 // Select block within the set to be linked
99 SectorSubBlk*& blk = sec_blk->blks[k];
100
101 // Locate next cache block
102 blk = &blks[blk_index];
103
104 // Associate a data chunk to the block
105 blk->data = &dataBlks[blkSize*blk_index];
106
107 // Associate sector block to this block
108 blk->setSectorBlock(sec_blk);
109
110 // Associate the sector replacement data to this block
111 blk->replacementData = sec_blk->replacementData;
112
113 // Set its set, way and sector offset
114 blk->set = i;
115 blk->way = j;
116 blk->setSectorOffset(k);
117
118 // Update block index
119 ++blk_index;
120 }
121
122 // Update sector block index
123 ++sec_blk_index;
124 }
125 }
126}
127
128void
129SectorTags::invalidate(CacheBlk *blk)
130{
131 BaseTags::invalidate(blk);
132
133 // Get block's sector
134 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
135 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
136
137 // When a block in a sector is invalidated, it does not make the tag
138 // invalid automatically, as there might be other blocks in the sector
139 // using it. The tag is invalidated only when there is a single block
140 // in the sector.
141 if (!sector_blk->isValid()) {
142 // Decrease the number of tags in use
143 tagsInUse--;
144
145 // Invalidate replacement data, as we're invalidating the sector
146 replacementPolicy->invalidate(sector_blk->replacementData);
147 }
148}
149
150CacheBlk*
151SectorTags::accessBlock(Addr addr, bool is_secure, Cycles &lat)
152{
153 CacheBlk *blk = findBlock(addr, is_secure);
154
155 // Access all tags in parallel, hence one in each way. The data side
156 // either accesses all blocks in parallel, or one block sequentially on
157 // a hit. Sequential access with a miss doesn't access data.
158 tagAccesses += allocAssoc;
159 if (sequentialAccess) {
160 if (blk != nullptr) {
161 dataAccesses += 1;
162 }
163 } else {
164 dataAccesses += allocAssoc*numBlocksPerSector;
165 }
166
167 if (blk != nullptr) {
168 // If a cache hit
169 lat = accessLatency;
170 // Check if the block to be accessed is available. If not,
171 // apply the accessLatency on top of block->whenReady.
172 if (blk->whenReady > curTick() &&
173 cache->ticksToCycles(blk->whenReady - curTick()) >
174 accessLatency) {
175 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
176 accessLatency;
177 }
178
179 // Update number of references to accessed block
180 blk->refCount++;
181
182 // Get block's sector
183 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
184 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
185
186 // Update replacement data of accessed block, which is shared with
187 // the whole sector it belongs to
188 replacementPolicy->touch(sector_blk->replacementData);
189 } else {
190 // If a cache miss
191 lat = lookupLatency;
192 }
193
194 return blk;
195}
196
197const std::vector<SectorBlk*>
198SectorTags::getPossibleLocations(Addr addr) const
197std::vector<ReplaceableEntry*>
198SectorTags::getPossibleLocations(const Addr addr) const
199{
199{
200 return sets[extractSet(addr)];
200 std::vector<ReplaceableEntry*> locations;
201 for (const auto& blk : sets[extractSet(addr)]) {
202 locations.push_back(static_cast<ReplaceableEntry*>(blk));
203 }
204 return locations;
201}
202
203void
204SectorTags::insertBlock(const Addr addr, const bool is_secure,
205 const int src_master_ID, const uint32_t task_ID,
206 CacheBlk *blk)
207{
208 // Do common block insertion functionality
209 BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
210
211 // Get block's sector
212 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
213 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
214
215 // When a block is inserted, the tag is only a newly used tag if the
216 // sector was not previously present in the cache.
217 // This assumes BaseTags::insertBlock does not set the valid bit.
218 if (sector_blk->isValid()) {
219 // An existing entry's replacement data is just updated
220 replacementPolicy->touch(sector_blk->replacementData);
221 } else {
222 // Increment tag counter
223 tagsInUse++;
224
225 // A new entry resets the replacement data
226 replacementPolicy->reset(sector_blk->replacementData);
227 }
228}
229
230CacheBlk*
231SectorTags::findBlock(Addr addr, bool is_secure) const
232{
233 // Extract sector tag
234 const Addr tag = extractTag(addr);
235
236 // The address can only be mapped to a specific location of a sector
237 // due to sectors being composed of contiguous-address entries
238 const Addr offset = extractSectorOffset(addr);
239
240 // Find all possible sector locations for the given address
205}
206
207void
208SectorTags::insertBlock(const Addr addr, const bool is_secure,
209 const int src_master_ID, const uint32_t task_ID,
210 CacheBlk *blk)
211{
212 // Do common block insertion functionality
213 BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
214
215 // Get block's sector
216 SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
217 const SectorBlk* sector_blk = sub_blk->getSectorBlock();
218
219 // When a block is inserted, the tag is only a newly used tag if the
220 // sector was not previously present in the cache.
221 // This assumes BaseTags::insertBlock does not set the valid bit.
222 if (sector_blk->isValid()) {
223 // An existing entry's replacement data is just updated
224 replacementPolicy->touch(sector_blk->replacementData);
225 } else {
226 // Increment tag counter
227 tagsInUse++;
228
229 // A new entry resets the replacement data
230 replacementPolicy->reset(sector_blk->replacementData);
231 }
232}
233
234CacheBlk*
235SectorTags::findBlock(Addr addr, bool is_secure) const
236{
237 // Extract sector tag
238 const Addr tag = extractTag(addr);
239
240 // The address can only be mapped to a specific location of a sector
241 // due to sectors being composed of contiguous-address entries
242 const Addr offset = extractSectorOffset(addr);
243
244 // Find all possible sector locations for the given address
241 const std::vector<SectorBlk*> locations = getPossibleLocations(addr);
245 const std::vector<ReplaceableEntry*> locations =
246 getPossibleLocations(addr);
242
243 // Search for block
244 for (const auto& sector : locations) {
247
248 // Search for block
249 for (const auto& sector : locations) {
245 auto blk = sector->blks[offset];
250 auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
246 if (blk->getTag() == tag && blk->isValid() &&
247 blk->isSecure() == is_secure) {
248 return blk;
249 }
250 }
251
252 // Did not find block
253 return nullptr;
254}
255
256ReplaceableEntry*
257SectorTags::findBlockBySetAndWay(int set, int way) const
258{
259 return sets[set][way];
260}
261
262CacheBlk*
263SectorTags::findVictim(Addr addr, const bool is_secure,
264 std::vector<CacheBlk*>& evict_blks) const
265{
266 // Get all possible locations of this sector
251 if (blk->getTag() == tag && blk->isValid() &&
252 blk->isSecure() == is_secure) {
253 return blk;
254 }
255 }
256
257 // Did not find block
258 return nullptr;
259}
260
261ReplaceableEntry*
262SectorTags::findBlockBySetAndWay(int set, int way) const
263{
264 return sets[set][way];
265}
266
267CacheBlk*
268SectorTags::findVictim(Addr addr, const bool is_secure,
269 std::vector<CacheBlk*>& evict_blks) const
270{
271 // Get all possible locations of this sector
267 const std::vector<SectorBlk*> sector_locations =
272 const std::vector<ReplaceableEntry*> sector_locations =
268 getPossibleLocations(addr);
269
270 // Check if the sector this address belongs to has been allocated
271 Addr tag = extractTag(addr);
272 SectorBlk* victim_sector = nullptr;
273 getPossibleLocations(addr);
274
275 // Check if the sector this address belongs to has been allocated
276 Addr tag = extractTag(addr);
277 SectorBlk* victim_sector = nullptr;
273 for (const auto& sector : sector_locations){
274 if ((tag == sector->getTag()) && sector->isValid() &&
275 (is_secure == sector->isSecure())){
276 victim_sector = sector;
278 for (const auto& sector : sector_locations) {
279 SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
280 if ((tag == sector_blk->getTag()) && sector_blk->isValid() &&
281 (is_secure == sector_blk->isSecure())){
282 victim_sector = sector_blk;
277 break;
278 }
279 }
280
281 // If the sector is not present
282 if (victim_sector == nullptr){
283 // Choose replacement victim from replacement candidates
284 victim_sector = static_cast<SectorBlk*>(replacementPolicy->getVictim(
283 break;
284 }
285 }
286
287 // If the sector is not present
288 if (victim_sector == nullptr){
289 // Choose replacement victim from replacement candidates
290 victim_sector = static_cast<SectorBlk*>(replacementPolicy->getVictim(
285 std::vector<ReplaceableEntry*>(
286 sector_locations.begin(), sector_locations.end())));
291 sector_locations));
287 }
288
289 // Get the location of the victim block within the sector
290 SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
291
292 // Get evicted blocks. Blocks are only evicted if the sectors mismatch and
293 // the currently existing sector is valid.
294 if ((tag == victim_sector->getTag()) &&
295 (is_secure == victim_sector->isSecure())){
296 // It would be a hit if victim was valid, and upgrades do not call
297 // findVictim, so it cannot happen
298 assert(!victim->isValid());
299 } else {
300 // The whole sector must be evicted to make room for the new sector
301 for (const auto& blk : victim_sector->blks){
302 evict_blks.push_back(blk);
303 }
304 }
305
306 DPRINTF(CacheRepl, "set %x, way %x, sector offset %x: %s\n",
307 "selecting blk for replacement\n", victim->set, victim->way,
308 victim->getSectorOffset());
309
310 return victim;
311}
312
313Addr
314SectorTags::extractTag(Addr addr) const
315{
316 return addr >> tagShift;
317}
318
319int
320SectorTags::extractSet(Addr addr) const
321{
322 return (addr >> setShift) & setMask;
323}
324
325int
326SectorTags::extractSectorOffset(Addr addr) const
327{
328 return (addr >> sectorShift) & sectorMask;
329}
330
331Addr
332SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
333{
334 const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
335 return ((blk_cast->getTag() << tagShift) | ((Addr)blk->set << setShift) |
336 ((Addr)blk_cast->getSectorOffset() << sectorShift));
337}
338
339void
340SectorTags::forEachBlk(std::function<void(CacheBlk &)> visitor)
341{
342 for (SectorSubBlk& blk : blks) {
343 visitor(blk);
344 }
345}
346
347bool
348SectorTags::anyBlk(std::function<bool(CacheBlk &)> visitor)
349{
350 for (SectorSubBlk& blk : blks) {
351 if (visitor(blk)) {
352 return true;
353 }
354 }
355 return false;
356}
357
358SectorTags *
359SectorTagsParams::create()
360{
361 return new SectorTags(this);
362}
292 }
293
294 // Get the location of the victim block within the sector
295 SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
296
297 // Get evicted blocks. Blocks are only evicted if the sectors mismatch and
298 // the currently existing sector is valid.
299 if ((tag == victim_sector->getTag()) &&
300 (is_secure == victim_sector->isSecure())){
301 // It would be a hit if victim was valid, and upgrades do not call
302 // findVictim, so it cannot happen
303 assert(!victim->isValid());
304 } else {
305 // The whole sector must be evicted to make room for the new sector
306 for (const auto& blk : victim_sector->blks){
307 evict_blks.push_back(blk);
308 }
309 }
310
311 DPRINTF(CacheRepl, "set %x, way %x, sector offset %x: %s\n",
312 "selecting blk for replacement\n", victim->set, victim->way,
313 victim->getSectorOffset());
314
315 return victim;
316}
317
318Addr
319SectorTags::extractTag(Addr addr) const
320{
321 return addr >> tagShift;
322}
323
324int
325SectorTags::extractSet(Addr addr) const
326{
327 return (addr >> setShift) & setMask;
328}
329
330int
331SectorTags::extractSectorOffset(Addr addr) const
332{
333 return (addr >> sectorShift) & sectorMask;
334}
335
336Addr
337SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
338{
339 const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
340 return ((blk_cast->getTag() << tagShift) | ((Addr)blk->set << setShift) |
341 ((Addr)blk_cast->getSectorOffset() << sectorShift));
342}
343
344void
345SectorTags::forEachBlk(std::function<void(CacheBlk &)> visitor)
346{
347 for (SectorSubBlk& blk : blks) {
348 visitor(blk);
349 }
350}
351
352bool
353SectorTags::anyBlk(std::function<bool(CacheBlk &)> visitor)
354{
355 for (SectorSubBlk& blk : blks) {
356 if (visitor(blk)) {
357 return true;
358 }
359 }
360 return false;
361}
362
363SectorTags *
364SectorTagsParams::create()
365{
366 return new SectorTags(this);
367}