fa_lru.cc revision 13216:6ae030076b29
1/*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder.  You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Nikos Nikoleris
43 *          Daniel Carvalho
44 */
45
46/**
47 * @file
48 * Definitions a fully associative LRU tagstore.
49 */
50
51#include "mem/cache/tags/fa_lru.hh"
52
53#include <cassert>
54#include <sstream>
55
56#include "base/intmath.hh"
57#include "base/logging.hh"
58#include "mem/cache/base.hh"
59
60FALRU::FALRU(const Params *p)
61    : BaseTags(p),
62
63      cacheTracking(p->min_tracked_cache_size, size, blkSize)
64{
65    if (!isPowerOf2(blkSize))
66        fatal("cache block size (in bytes) `%d' must be a power of two",
67              blkSize);
68    if (!isPowerOf2(size))
69        fatal("Cache Size must be power of 2 for now");
70
71    blks = new FALRUBlk[numBlocks];
72}
73
74FALRU::~FALRU()
75{
76    delete[] blks;
77}
78
79void
80FALRU::init(BaseCache* cache)
81{
82    // Set parent cache
83    setCache(cache);
84
85    head = &(blks[0]);
86    head->prev = nullptr;
87    head->next = &(blks[1]);
88    head->set = 0;
89    head->way = 0;
90    head->data = &dataBlks[0];
91
92    for (unsigned i = 1; i < numBlocks - 1; i++) {
93        blks[i].prev = &(blks[i-1]);
94        blks[i].next = &(blks[i+1]);
95        blks[i].set = 0;
96        blks[i].way = i;
97
98        // Associate a data chunk to the block
99        blks[i].data = &dataBlks[blkSize*i];
100    }
101
102    tail = &(blks[numBlocks - 1]);
103    tail->prev = &(blks[numBlocks - 2]);
104    tail->next = nullptr;
105    tail->set = 0;
106    tail->way = numBlocks - 1;
107    tail->data = &dataBlks[(numBlocks - 1) * blkSize];
108
109    cacheTracking.init(head, tail);
110}
111
112void
113FALRU::regStats()
114{
115    BaseTags::regStats();
116    cacheTracking.regStats(name());
117}
118
119void
120FALRU::invalidate(CacheBlk *blk)
121{
122    // Erase block entry reference in the hash table
123    auto num_erased = tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
124
125    // Sanity check; only one block reference should be erased
126    assert(num_erased == 1);
127
128    // Invalidate block entry. Must be done after the hash is erased
129    BaseTags::invalidate(blk);
130
131    // Decrease the number of tags in use
132    tagsInUse--;
133
134    // Move the block to the tail to make it the next victim
135    moveToTail((FALRUBlk*)blk);
136}
137
138CacheBlk*
139FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
140{
141    return accessBlock(addr, is_secure, lat, 0);
142}
143
144CacheBlk*
145FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
146                   CachesMask *in_caches_mask)
147{
148    CachesMask mask = 0;
149    FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
150
151    if (blk && blk->isValid()) {
152        // If a cache hit
153        lat = accessLatency;
154        // Check if the block to be accessed is available. If not,
155        // apply the accessLatency on top of block->whenReady.
156        if (blk->whenReady > curTick() &&
157            cache->ticksToCycles(blk->whenReady - curTick()) >
158            accessLatency) {
159            lat = cache->ticksToCycles(blk->whenReady - curTick()) +
160            accessLatency;
161        }
162        mask = blk->inCachesMask;
163
164        moveToHead(blk);
165    } else {
166        // If a cache miss
167        lat = lookupLatency;
168    }
169    if (in_caches_mask) {
170        *in_caches_mask = mask;
171    }
172
173    cacheTracking.recordAccess(blk);
174
175    return blk;
176}
177
178CacheBlk*
179FALRU::findBlock(Addr addr, bool is_secure) const
180{
181    FALRUBlk* blk = nullptr;
182
183    Addr tag = extractTag(addr);
184    auto iter = tagHash.find(std::make_pair(tag, is_secure));
185    if (iter != tagHash.end()) {
186        blk = (*iter).second;
187    }
188
189    if (blk && blk->isValid()) {
190        assert(blk->tag == tag);
191        assert(blk->isSecure() == is_secure);
192    }
193
194    return blk;
195}
196
197ReplaceableEntry*
198FALRU::findBlockBySetAndWay(int set, int way) const
199{
200    assert(set == 0);
201    return &blks[way];
202}
203
204CacheBlk*
205FALRU::findVictim(Addr addr, const bool is_secure,
206                  std::vector<CacheBlk*>& evict_blks) const
207{
208    // The victim is always stored on the tail for the FALRU
209    FALRUBlk* victim = tail;
210
211    // There is only one eviction for this replacement
212    evict_blks.push_back(victim);
213
214    return victim;
215}
216
217void
218FALRU::insertBlock(const Addr addr, const bool is_secure,
219                   const int src_master_ID, const uint32_t task_ID,
220                   CacheBlk *blk)
221{
222    FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
223
224    // Make sure block is not present in the cache
225    assert(falruBlk->inCachesMask == 0);
226
227    // Do common block insertion functionality
228    BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
229
230    // Increment tag counter
231    tagsInUse++;
232
233    // New block is the MRU
234    moveToHead(falruBlk);
235
236    // Insert new block in the hash table
237    tagHash[std::make_pair(blk->tag, blk->isSecure())] = falruBlk;
238}
239
240void
241FALRU::moveToHead(FALRUBlk *blk)
242{
243    // If block is not already head, do the moving
244    if (blk != head) {
245        cacheTracking.moveBlockToHead(blk);
246        // If block is tail, set previous block as new tail
247        if (blk == tail){
248            assert(blk->next == nullptr);
249            tail = blk->prev;
250            tail->next = nullptr;
251        // Inform block's surrounding blocks that it has been moved
252        } else {
253            blk->prev->next = blk->next;
254            blk->next->prev = blk->prev;
255        }
256
257        // Swap pointers
258        blk->next = head;
259        blk->prev = nullptr;
260        head->prev = blk;
261        head = blk;
262
263        cacheTracking.check(head, tail);
264    }
265}
266
267void
268FALRU::moveToTail(FALRUBlk *blk)
269{
270    // If block is not already tail, do the moving
271    if (blk != tail) {
272        cacheTracking.moveBlockToTail(blk);
273        // If block is head, set next block as new head
274        if (blk == head){
275            assert(blk->prev == nullptr);
276            head = blk->next;
277            head->prev = nullptr;
278        // Inform block's surrounding blocks that it has been moved
279        } else {
280            blk->prev->next = blk->next;
281            blk->next->prev = blk->prev;
282        }
283
284        // Swap pointers
285        blk->prev = tail;
286        blk->next = nullptr;
287        tail->next = blk;
288        tail = blk;
289
290        cacheTracking.check(head, tail);
291    }
292}
293
294FALRU *
295FALRUParams::create()
296{
297    return new FALRU(this);
298}
299
300void
301FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
302{
303#ifdef FALRU_DEBUG
304    const FALRUBlk* blk = head;
305    unsigned curr_size = 0;
306    unsigned tracked_cache_size = minTrackedSize;
307    CachesMask in_caches_mask = inAllCachesMask;
308    int j = 0;
309
310    while (blk) {
311        panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
312                 "%x found %x", blk->inCachesMask, in_caches_mask);
313
314        curr_size += blkSize;
315        if (curr_size == tracked_cache_size && blk != tail) {
316            panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
317                     "cache", j);
318            tracked_cache_size <<= 1;
319            // from this point, blocks fit only in the larger caches
320            in_caches_mask &= ~(1U << j);
321            ++j;
322        }
323        blk = blk->next;
324    }
325#endif // FALRU_DEBUG
326}
327
328void
329FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
330{
331    // early exit if we are not tracking any extra caches
332    FALRUBlk* blk = numTrackedCaches ? head : nullptr;
333    unsigned curr_size = 0;
334    unsigned tracked_cache_size = minTrackedSize;
335    CachesMask in_caches_mask = inAllCachesMask;
336    int j = 0;
337
338    while (blk) {
339        blk->inCachesMask = in_caches_mask;
340
341        curr_size += blkSize;
342        if (curr_size == tracked_cache_size && blk != tail) {
343            boundaries[j] = blk;
344
345            tracked_cache_size <<= 1;
346            // from this point, blocks fit only in the larger caches
347            in_caches_mask &= ~(1U << j);
348            ++j;
349        }
350        blk = blk->next;
351    }
352}
353
354
355void
356FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
357{
358    // Get the mask of all caches, in which the block didn't fit
359    // before moving it to the head
360    CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
361
362    for (int i = 0; i < numTrackedCaches; i++) {
363        CachesMask current_cache_mask = 1U << i;
364        if (current_cache_mask & update_caches_mask) {
365            // if the ith cache didn't fit the block (before it is moved to
366            // the head), move the ith boundary 1 block closer to the
367            // MRU
368            boundaries[i]->inCachesMask &= ~current_cache_mask;
369            boundaries[i] = boundaries[i]->prev;
370        } else if (boundaries[i] == blk) {
371            // Make sure the boundary doesn't point to the block
372            // we are about to move
373            boundaries[i] = blk->prev;
374        }
375    }
376
377    // Make block reside in all caches
378    blk->inCachesMask = inAllCachesMask;
379}
380
381void
382FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
383{
384    CachesMask update_caches_mask = blk->inCachesMask;
385
386    for (int i = 0; i < numTrackedCaches; i++) {
387        CachesMask current_cache_mask = 1U << i;
388        if (current_cache_mask & update_caches_mask) {
389            // if the ith cache fitted the block (before it is moved to
390            // the tail), move the ith boundary 1 block closer to the
391            // LRU
392            boundaries[i] = boundaries[i]->next;
393            if (boundaries[i] == blk) {
394                // Make sure the boundary doesn't point to the block
395                // we are about to move
396                boundaries[i] = blk->next;
397            }
398            boundaries[i]->inCachesMask |= current_cache_mask;
399        }
400    }
401
402    // The block now fits only in the actual cache
403    blk->inCachesMask = 0;
404}
405
406void
407FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
408{
409    for (int i = 0; i < numTrackedCaches; i++) {
410        if (blk && ((1U << i) & blk->inCachesMask)) {
411            hits[i]++;
412        } else {
413            misses[i]++;
414        }
415    }
416
417    // Record stats for the actual cache too
418    if (blk && blk->isValid()) {
419        hits[numTrackedCaches]++;
420    } else {
421        misses[numTrackedCaches]++;
422    }
423
424    accesses++;
425}
426
427void
428printSize(std::ostream &stream, size_t size)
429{
430    static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
431    int div = 0;
432    while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
433        div++;
434        size >>= 10;
435    }
436    stream << size << SIZES[div];
437}
438
439void
440FALRU::CacheTracking::regStats(std::string name)
441{
442    hits
443        .init(numTrackedCaches + 1)
444        .name(name + ".falru_hits")
445        .desc("The number of hits in each cache size.")
446        ;
447    misses
448        .init(numTrackedCaches + 1)
449        .name(name + ".falru_misses")
450        .desc("The number of misses in each cache size.")
451        ;
452    accesses
453        .name(name + ".falru_accesses")
454        .desc("The number of accesses to the FA LRU cache.")
455        ;
456
457    for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
458        std::stringstream size_str;
459        printSize(size_str, minTrackedSize << i);
460        hits.subname(i, size_str.str());
461        hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
462        misses.subname(i, size_str.str());
463        misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
464    }
465}
466