fa_lru.cc (12648:78941f188bb3) fa_lru.cc (12665:4ca9fc117b95)
1/*
1/*
2 * Copyright (c) 2013,2016-2017 ARM Limited
2 * Copyright (c) 2013,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 22 unchanged lines hidden (view full) ---

33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 22 unchanged lines hidden (view full) ---

33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
41 */
42
43/**
44 * @file
45 * Definitions a fully associative LRU tagstore.
46 */
47
48#include "mem/cache/tags/fa_lru.hh"
49
50#include <cassert>
51#include <sstream>
52
53#include "base/intmath.hh"
54#include "base/logging.hh"
55
56FALRU::FALRU(const Params *p)
42 */
43
44/**
45 * @file
46 * Definitions a fully associative LRU tagstore.
47 */
48
49#include "mem/cache/tags/fa_lru.hh"
50
51#include <cassert>
52#include <sstream>
53
54#include "base/intmath.hh"
55#include "base/logging.hh"
56
57FALRU::FALRU(const Params *p)
57 : BaseTags(p), cacheBoundaries(nullptr)
58 : BaseTags(p),
59
60 cacheTracking(p->min_tracked_cache_size, size, blkSize)
58{
59 if (!isPowerOf2(blkSize))
60 fatal("cache block size (in bytes) `%d' must be a power of two",
61 blkSize);
62 if (!isPowerOf2(size))
63 fatal("Cache Size must be power of 2 for now");
64
61{
62 if (!isPowerOf2(blkSize))
63 fatal("cache block size (in bytes) `%d' must be a power of two",
64 blkSize);
65 if (!isPowerOf2(size))
66 fatal("Cache Size must be power of 2 for now");
67
65 // Track all cache sizes from 128K up by powers of 2
66 numCaches = floorLog2(size) - 17;
67 if (numCaches > 0){
68 cacheBoundaries = new FALRUBlk *[numCaches];
69 cacheMask = (ULL(1) << numCaches) - 1;
70 } else {
71 cacheMask = 0;
72 }
73
74 blks = new FALRUBlk[numBlocks];
68 blks = new FALRUBlk[numBlocks];
75 head = &(blks[0]);
76 tail = &(blks[numBlocks-1]);
77
69
70 head = &(blks[0]);
78 head->prev = nullptr;
79 head->next = &(blks[1]);
71 head->prev = nullptr;
72 head->next = &(blks[1]);
80 head->inCache = cacheMask;
73 head->set = 0;
74 head->way = 0;
81 head->data = &dataBlks[0];
82
75 head->data = &dataBlks[0];
76
83 tail->prev = &(blks[numBlocks-2]);
84 tail->next = nullptr;
85 tail->inCache = 0;
86 tail->data = &dataBlks[(numBlocks-1)*blkSize];
87
88 unsigned index = (1 << 17) / blkSize;
89 unsigned j = 0;
90 int flags = cacheMask;
91 for (unsigned i = 1; i < numBlocks - 1; i++) {
77 for (unsigned i = 1; i < numBlocks - 1; i++) {
92 blks[i].inCache = flags;
93 if (i == index - 1){
94 cacheBoundaries[j] = &(blks[i]);
95 flags &= ~ (1<<j);
96 ++j;
97 index = index << 1;
98 }
99 blks[i].prev = &(blks[i-1]);
100 blks[i].next = &(blks[i+1]);
101 blks[i].set = 0;
102 blks[i].way = i;
103
104 // Associate a data chunk to the block
105 blks[i].data = &dataBlks[blkSize*i];
106 }
78 blks[i].prev = &(blks[i-1]);
79 blks[i].next = &(blks[i+1]);
80 blks[i].set = 0;
81 blks[i].way = i;
82
83 // Associate a data chunk to the block
84 blks[i].data = &dataBlks[blkSize*i];
85 }
107 assert(j == numCaches);
108 assert(index == numBlocks);
109 //assert(check());
86
87 tail = &(blks[numBlocks - 1]);
88 tail->prev = &(blks[numBlocks - 2]);
89 tail->next = nullptr;
90 tail->set = 0;
91 tail->way = numBlocks - 1;
92 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
93
94 cacheTracking.init(head, tail);
110}
111
112FALRU::~FALRU()
113{
95}
96
97FALRU::~FALRU()
98{
114 if (numCaches)
115 delete[] cacheBoundaries;
116
117 delete[] blks;
118}
119
120void
121FALRU::regStats()
122{
123 BaseTags::regStats();
99 delete[] blks;
100}
101
102void
103FALRU::regStats()
104{
105 BaseTags::regStats();
124 hits
125 .init(numCaches+1)
126 .name(name() + ".falru_hits")
127 .desc("The number of hits in each cache size.")
128 ;
129 misses
130 .init(numCaches+1)
131 .name(name() + ".falru_misses")
132 .desc("The number of misses in each cache size.")
133 ;
134 accesses
135 .name(name() + ".falru_accesses")
136 .desc("The number of accesses to the FA LRU cache.")
137 ;
138
139 for (unsigned i = 0; i <= numCaches; ++i) {
140 std::stringstream size_str;
141 if (i < 3){
142 size_str << (1<<(i+7)) <<"K";
143 } else {
144 size_str << (1<<(i-3)) <<"M";
145 }
146
147 hits.subname(i, size_str.str());
148 hits.subdesc(i, "Hits in a " + size_str.str() +" cache");
149 misses.subname(i, size_str.str());
150 misses.subdesc(i, "Misses in a " + size_str.str() +" cache");
151 }
106 cacheTracking.regStats(name());
152}
153
154FALRUBlk *
155FALRU::hashLookup(Addr addr) const
156{
157 tagIterator iter = tagHash.find(addr);
158 if (iter != tagHash.end()) {
159 return (*iter).second;

--- 15 unchanged lines hidden (view full) ---

175
176CacheBlk*
177FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
178{
179 return accessBlock(addr, is_secure, lat, 0);
180}
181
182CacheBlk*
107}
108
109FALRUBlk *
110FALRU::hashLookup(Addr addr) const
111{
112 tagIterator iter = tagHash.find(addr);
113 if (iter != tagHash.end()) {
114 return (*iter).second;

--- 15 unchanged lines hidden (view full) ---

130
131CacheBlk*
132FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
133{
134 return accessBlock(addr, is_secure, lat, 0);
135}
136
137CacheBlk*
183FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
138FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
139 CachesMask *in_caches_mask)
184{
140{
185 accesses++;
186 int tmp_in_cache = 0;
141 CachesMask mask = 0;
187 Addr blkAddr = blkAlign(addr);
188 FALRUBlk* blk = hashLookup(blkAddr);
189
190 if (blk && blk->isValid()) {
191 // If a cache hit
192 lat = accessLatency;
193 // Check if the block to be accessed is available. If not,
194 // apply the accessLatency on top of block->whenReady.
195 if (blk->whenReady > curTick() &&
196 cache->ticksToCycles(blk->whenReady - curTick()) >
197 accessLatency) {
198 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
199 accessLatency;
200 }
201 assert(blk->tag == blkAddr);
142 Addr blkAddr = blkAlign(addr);
143 FALRUBlk* blk = hashLookup(blkAddr);
144
145 if (blk && blk->isValid()) {
146 // If a cache hit
147 lat = accessLatency;
148 // Check if the block to be accessed is available. If not,
149 // apply the accessLatency on top of block->whenReady.
150 if (blk->whenReady > curTick() &&
151 cache->ticksToCycles(blk->whenReady - curTick()) >
152 accessLatency) {
153 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
154 accessLatency;
155 }
156 assert(blk->tag == blkAddr);
202 tmp_in_cache = blk->inCache;
203 for (unsigned i = 0; i < numCaches; i++) {
204 if (1<<i & blk->inCache) {
205 hits[i]++;
206 } else {
207 misses[i]++;
208 }
209 }
210 hits[numCaches]++;
211 if (blk != head){
212 moveToHead(blk);
213 }
157 mask = blk->inCachesMask;
158 moveToHead(blk);
214 } else {
215 // If a cache miss
216 lat = lookupLatency;
217 blk = nullptr;
159 } else {
160 // If a cache miss
161 lat = lookupLatency;
162 blk = nullptr;
218 for (unsigned i = 0; i <= numCaches; ++i) {
219 misses[i]++;
220 }
221 }
163 }
222 if (inCache) {
223 *inCache = tmp_in_cache;
164 if (in_caches_mask) {
165 *in_caches_mask = mask;
224 }
225
166 }
167
226 //assert(check());
168 cacheTracking.recordAccess(blk);
169
227 return blk;
228}
229
230
231CacheBlk*
232FALRU::findBlock(Addr addr, bool is_secure) const
233{
234 Addr blkAddr = blkAlign(addr);

--- 21 unchanged lines hidden (view full) ---

256}
257
258void
259FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
260{
261 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
262
263 // Make sure block is not present in the cache
170 return blk;
171}
172
173
174CacheBlk*
175FALRU::findBlock(Addr addr, bool is_secure) const
176{
177 Addr blkAddr = blkAlign(addr);

--- 21 unchanged lines hidden (view full) ---

199}
200
201void
202FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
203{
204 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
205
206 // Make sure block is not present in the cache
264 assert(falruBlk->inCache == 0);
207 assert(falruBlk->inCachesMask == 0);
265
266 // Do common block insertion functionality
267 BaseTags::insertBlock(pkt, blk);
268
269 // New block is the MRU
270 moveToHead(falruBlk);
271
272 // Insert new block in the hash table
273 tagHash[falruBlk->tag] = falruBlk;
208
209 // Do common block insertion functionality
210 BaseTags::insertBlock(pkt, blk);
211
212 // New block is the MRU
213 moveToHead(falruBlk);
214
215 // Insert new block in the hash table
216 tagHash[falruBlk->tag] = falruBlk;
274
275 //assert(check());
276}
277
278void
279FALRU::moveToHead(FALRUBlk *blk)
280{
281 // If block is not already head, do the moving
282 if (blk != head) {
217}
218
219void
220FALRU::moveToHead(FALRUBlk *blk)
221{
222 // If block is not already head, do the moving
223 if (blk != head) {
283 // Get all caches that this block does not reside in
284 int updateMask = blk->inCache ^ cacheMask;
285
286 // Update boundaries for all cache sizes
287 for (unsigned i = 0; i < numCaches; i++){
288 // If block has been moved to a place before this boundary,
289 // all blocks in it will be pushed towards the LRU position,
290 // making one leave the boundary
291 if ((1U<<i) & updateMask) {
292 cacheBoundaries[i]->inCache &= ~(1U<<i);
293 cacheBoundaries[i] = cacheBoundaries[i]->prev;
294 // If the block resides exactly at this boundary, the previous
295 // block is pushed to its position
296 } else if (cacheBoundaries[i] == blk) {
297 cacheBoundaries[i] = blk->prev;
298 }
299 }
300
301 // Make block reside in all caches
302 blk->inCache = cacheMask;
303
224 cacheTracking.moveBlockToHead(blk);
304 // If block is tail, set previous block as new tail
305 if (blk == tail){
306 assert(blk->next == nullptr);
307 tail = blk->prev;
308 tail->next = nullptr;
309 // Inform block's surrounding blocks that it has been moved
310 } else {
311 blk->prev->next = blk->next;
312 blk->next->prev = blk->prev;
313 }
314
315 // Swap pointers
316 blk->next = head;
317 blk->prev = nullptr;
318 head->prev = blk;
319 head = blk;
225 // If block is tail, set previous block as new tail
226 if (blk == tail){
227 assert(blk->next == nullptr);
228 tail = blk->prev;
229 tail->next = nullptr;
230 // Inform block's surrounding blocks that it has been moved
231 } else {
232 blk->prev->next = blk->next;
233 blk->next->prev = blk->prev;
234 }
235
236 // Swap pointers
237 blk->next = head;
238 blk->prev = nullptr;
239 head->prev = blk;
240 head = blk;
241
242 cacheTracking.check(head, tail);
320 }
321}
322
323void
324FALRU::moveToTail(FALRUBlk *blk)
325{
326 // If block is not already tail, do the moving
327 if (blk != tail) {
243 }
244}
245
246void
247FALRU::moveToTail(FALRUBlk *blk)
248{
249 // If block is not already tail, do the moving
250 if (blk != tail) {
328 // Update boundaries for all cache sizes
329 for (unsigned i = 0; i < numCaches; i++){
330 // If block has been moved to a place after this boundary,
331 // all blocks in it will be pushed towards the MRU position,
332 // making one enter the boundary
333 if ((1U<<i) & blk->inCache) {
334 // If the first block after the boundary is the block,
335 // get its successor
336 if (cacheBoundaries[i]->next == blk){
337 cacheBoundaries[i] = cacheBoundaries[i]->next->next;
338 } else {
339 cacheBoundaries[i] = cacheBoundaries[i]->next;
340 }
341 cacheBoundaries[i]->inCache |= (1U<<i);
342 }
343 }
344
345 // Make block reside in the last cache only
346 blk->inCache = 0;
347
251 cacheTracking.moveBlockToTail(blk);
348 // If block is head, set next block as new head
349 if (blk == head){
350 assert(blk->prev == nullptr);
351 head = blk->next;
352 head->prev = nullptr;
353 // Inform block's surrounding blocks that it has been moved
354 } else {
355 blk->prev->next = blk->next;
356 blk->next->prev = blk->prev;
357 }
358
359 // Swap pointers
360 blk->prev = tail;
361 blk->next = nullptr;
362 tail->next = blk;
363 tail = blk;
252 // If block is head, set next block as new head
253 if (blk == head){
254 assert(blk->prev == nullptr);
255 head = blk->next;
256 head->prev = nullptr;
257 // Inform block's surrounding blocks that it has been moved
258 } else {
259 blk->prev->next = blk->next;
260 blk->next->prev = blk->prev;
261 }
262
263 // Swap pointers
264 blk->prev = tail;
265 blk->next = nullptr;
266 tail->next = blk;
267 tail = blk;
268
269 cacheTracking.check(head, tail);
364 }
365}
366
270 }
271}
272
367bool
368FALRU::check()
273FALRU *
274FALRUParams::create()
369{
275{
276 return new FALRU(this);
277}
278
279void
280FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
281{
282#ifdef FALRU_DEBUG
370 FALRUBlk* blk = head;
283 FALRUBlk* blk = head;
371 int tot_size = 0;
372 int boundary = 1<<17;
284 unsigned curr_size = 0;
285 unsigned tracked_cache_size = minTrackedSize;
286 CachesMask in_caches_mask = inAllCachesMask;
373 int j = 0;
287 int j = 0;
374 int flags = cacheMask;
288
375 while (blk) {
289 while (blk) {
376 tot_size += blkSize;
377 if (blk->inCache != flags) {
378 return false;
290 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
291 "%x found %x", blk->inCachesMask, in_caches_mask);
292
293 curr_size += blkSize;
294 if (curr_size == tracked_cache_size && blk != tail) {
295 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
296 "cache", j);
297 tracked_cache_size <<= 1;
298 // from this point, blocks fit only in the larger caches
299 in_caches_mask &= ~(1U << j);
300 ++j;
379 }
301 }
380 if (tot_size == boundary && blk != tail) {
381 if (cacheBoundaries[j] != blk) {
382 return false;
383 }
384 flags &=~(1 << j);
385 boundary = boundary<<1;
302 blk = blk->next;
303 }
304#endif // FALRU_DEBUG
305}
306
307void
308FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
309{
310 // early exit if we are not tracking any extra caches
311 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
312 unsigned curr_size = 0;
313 unsigned tracked_cache_size = minTrackedSize;
314 CachesMask in_caches_mask = inAllCachesMask;
315 int j = 0;
316
317 while (blk) {
318 blk->inCachesMask = in_caches_mask;
319
320 curr_size += blkSize;
321 if (curr_size == tracked_cache_size && blk != tail) {
322 boundaries[j] = blk;
323
324 tracked_cache_size <<= 1;
325 // from this point, blocks fit only in the larger caches
326 in_caches_mask &= ~(1U << j);
386 ++j;
387 }
388 blk = blk->next;
389 }
327 ++j;
328 }
329 blk = blk->next;
330 }
390 return true;
391}
392
331}
332
393FALRU *
394FALRUParams::create()
333
334void
335FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
395{
336{
396 return new FALRU(this);
337 // Get the mask of all caches, in which the block didn't fit
338 // before moving it to the head
339 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
340
341 for (int i = 0; i < numTrackedCaches; i++) {
342 CachesMask current_cache_mask = 1U << i;
343 if (current_cache_mask & update_caches_mask) {
344 // if the ith cache didn't fit the block (before it is moved to
345 // the head), move the ith boundary 1 block closer to the
346 // MRU
347 boundaries[i]->inCachesMask &= ~current_cache_mask;
348 boundaries[i] = boundaries[i]->prev;
349 } else if (boundaries[i] == blk) {
350 // Make sure the boundary doesn't point to the block
351 // we are about to move
352 boundaries[i] = blk->prev;
353 }
354 }
355
356 // Make block reside in all caches
357 blk->inCachesMask = inAllCachesMask;
397}
398
358}
359
360void
361FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
362{
363 CachesMask update_caches_mask = blk->inCachesMask;
364
365 for (int i = 0; i < numTrackedCaches; i++) {
366 CachesMask current_cache_mask = 1U << i;
367 if (current_cache_mask & update_caches_mask) {
368 // if the ith cache fitted the block (before it is moved to
369 // the tail), move the ith boundary 1 block closer to the
370 // LRU
371 boundaries[i] = boundaries[i]->next;
372 if (boundaries[i] == blk) {
373 // Make sure the boundary doesn't point to the block
374 // we are about to move
375 boundaries[i] = blk->next;
376 }
377 boundaries[i]->inCachesMask |= current_cache_mask;
378 }
379 }
380
381 // The block now fits only in the actual cache
382 blk->inCachesMask = 0;
383}
384
385void
386FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
387{
388 for (int i = 0; i < numTrackedCaches; i++) {
389 if (blk && ((1U << i) & blk->inCachesMask)) {
390 hits[i]++;
391 } else {
392 misses[i]++;
393 }
394 }
395
396 // Record stats for the actual cache too
397 if (blk) {
398 hits[numTrackedCaches]++;
399 } else {
400 misses[numTrackedCaches]++;
401 }
402
403 accesses++;
404}
405
406void
407printSize(std::ostream &stream, size_t size)
408{
409 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
410 int div = 0;
411 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
412 div++;
413 size >>= 10;
414 }
415 stream << size << SIZES[div];
416}
417
418void
419FALRU::CacheTracking::regStats(std::string name)
420{
421 hits
422 .init(numTrackedCaches + 1)
423 .name(name + ".falru_hits")
424 .desc("The number of hits in each cache size.")
425 ;
426 misses
427 .init(numTrackedCaches + 1)
428 .name(name + ".falru_misses")
429 .desc("The number of misses in each cache size.")
430 ;
431 accesses
432 .name(name + ".falru_accesses")
433 .desc("The number of accesses to the FA LRU cache.")
434 ;
435
436 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
437 std::stringstream size_str;
438 printSize(size_str, minTrackedSize << i);
439 hits.subname(i, size_str.str());
440 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
441 misses.subname(i, size_str.str());
442 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
443 }
444}