fa_lru.cc revision 12745:e28c117a9806
1/*
2 * Copyright (c) 2013,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Definitions a fully associative LRU tagstore.
47 */
48
49#include "mem/cache/tags/fa_lru.hh"
50
51#include <cassert>
52#include <sstream>
53
54#include "base/intmath.hh"
55#include "base/logging.hh"
56#include "mem/cache/base.hh"
57
58FALRU::FALRU(const Params *p)
59    : BaseTags(p),
60
61      cacheTracking(p->min_tracked_cache_size, size, blkSize)
62{
63    if (!isPowerOf2(blkSize))
64        fatal("cache block size (in bytes) `%d' must be a power of two",
65              blkSize);
66    if (!isPowerOf2(size))
67        fatal("Cache Size must be power of 2 for now");
68
69    blks = new FALRUBlk[numBlocks];
70
71    head = &(blks[0]);
72    head->prev = nullptr;
73    head->next = &(blks[1]);
74    head->set = 0;
75    head->way = 0;
76    head->data = &dataBlks[0];
77
78    for (unsigned i = 1; i < numBlocks - 1; i++) {
79        blks[i].prev = &(blks[i-1]);
80        blks[i].next = &(blks[i+1]);
81        blks[i].set = 0;
82        blks[i].way = i;
83
84        // Associate a data chunk to the block
85        blks[i].data = &dataBlks[blkSize*i];
86    }
87
88    tail = &(blks[numBlocks - 1]);
89    tail->prev = &(blks[numBlocks - 2]);
90    tail->next = nullptr;
91    tail->set = 0;
92    tail->way = numBlocks - 1;
93    tail->data = &dataBlks[(numBlocks - 1) * blkSize];
94
95    cacheTracking.init(head, tail);
96}
97
98FALRU::~FALRU()
99{
100    delete[] blks;
101}
102
103void
104FALRU::regStats()
105{
106    BaseTags::regStats();
107    cacheTracking.regStats(name());
108}
109
110FALRUBlk *
111FALRU::hashLookup(Addr addr) const
112{
113    tagIterator iter = tagHash.find(addr);
114    if (iter != tagHash.end()) {
115        return (*iter).second;
116    }
117    return nullptr;
118}
119
120void
121FALRU::invalidate(CacheBlk *blk)
122{
123    BaseTags::invalidate(blk);
124
125    // Decrease the number of tags in use
126    tagsInUse--;
127
128    // Move the block to the tail to make it the next victim
129    moveToTail((FALRUBlk*)blk);
130
131    // Erase block entry in the hash table
132    tagHash.erase(blk->tag);
133}
134
135CacheBlk*
136FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
137{
138    return accessBlock(addr, is_secure, lat, 0);
139}
140
141CacheBlk*
142FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
143                   CachesMask *in_caches_mask)
144{
145    CachesMask mask = 0;
146    FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
147
148    if (blk != nullptr) {
149        // If a cache hit
150        lat = accessLatency;
151        // Check if the block to be accessed is available. If not,
152        // apply the accessLatency on top of block->whenReady.
153        if (blk->whenReady > curTick() &&
154            cache->ticksToCycles(blk->whenReady - curTick()) >
155            accessLatency) {
156            lat = cache->ticksToCycles(blk->whenReady - curTick()) +
157            accessLatency;
158        }
159        mask = blk->inCachesMask;
160
161        moveToHead(blk);
162    } else {
163        // If a cache miss
164        lat = lookupLatency;
165    }
166    if (in_caches_mask) {
167        *in_caches_mask = mask;
168    }
169
170    cacheTracking.recordAccess(blk);
171
172    return blk;
173}
174
175CacheBlk*
176FALRU::findBlock(Addr addr, bool is_secure) const
177{
178    Addr tag = extractTag(addr);
179    FALRUBlk* blk = hashLookup(tag);
180
181    if (blk && blk->isValid()) {
182        assert(blk->tag == tag);
183        assert(blk->isSecure() == is_secure);
184    } else {
185        blk = nullptr;
186    }
187    return blk;
188}
189
190ReplaceableEntry*
191FALRU::findBlockBySetAndWay(int set, int way) const
192{
193    assert(set == 0);
194    return &blks[way];
195}
196
197CacheBlk*
198FALRU::findVictim(Addr addr, std::vector<CacheBlk*>& evict_blks) const
199{
200    // The victim is always stored on the tail for the FALRU
201    FALRUBlk* victim = tail;
202
203    // There is only one eviction for this replacement
204    evict_blks.push_back(victim);
205
206    return victim;
207}
208
209void
210FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
211{
212    FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
213
214    // Make sure block is not present in the cache
215    assert(falruBlk->inCachesMask == 0);
216
217    // Do common block insertion functionality
218    BaseTags::insertBlock(pkt, blk);
219
220    // Increment tag counter
221    tagsInUse++;
222
223    // New block is the MRU
224    moveToHead(falruBlk);
225
226    // Insert new block in the hash table
227    tagHash[falruBlk->tag] = falruBlk;
228}
229
230void
231FALRU::moveToHead(FALRUBlk *blk)
232{
233    // If block is not already head, do the moving
234    if (blk != head) {
235        cacheTracking.moveBlockToHead(blk);
236        // If block is tail, set previous block as new tail
237        if (blk == tail){
238            assert(blk->next == nullptr);
239            tail = blk->prev;
240            tail->next = nullptr;
241        // Inform block's surrounding blocks that it has been moved
242        } else {
243            blk->prev->next = blk->next;
244            blk->next->prev = blk->prev;
245        }
246
247        // Swap pointers
248        blk->next = head;
249        blk->prev = nullptr;
250        head->prev = blk;
251        head = blk;
252
253        cacheTracking.check(head, tail);
254    }
255}
256
257void
258FALRU::moveToTail(FALRUBlk *blk)
259{
260    // If block is not already tail, do the moving
261    if (blk != tail) {
262        cacheTracking.moveBlockToTail(blk);
263        // If block is head, set next block as new head
264        if (blk == head){
265            assert(blk->prev == nullptr);
266            head = blk->next;
267            head->prev = nullptr;
268        // Inform block's surrounding blocks that it has been moved
269        } else {
270            blk->prev->next = blk->next;
271            blk->next->prev = blk->prev;
272        }
273
274        // Swap pointers
275        blk->prev = tail;
276        blk->next = nullptr;
277        tail->next = blk;
278        tail = blk;
279
280        cacheTracking.check(head, tail);
281    }
282}
283
284FALRU *
285FALRUParams::create()
286{
287    return new FALRU(this);
288}
289
290void
291FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
292{
293#ifdef FALRU_DEBUG
294    FALRUBlk* blk = head;
295    unsigned curr_size = 0;
296    unsigned tracked_cache_size = minTrackedSize;
297    CachesMask in_caches_mask = inAllCachesMask;
298    int j = 0;
299
300    while (blk) {
301        panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
302                 "%x found %x", blk->inCachesMask, in_caches_mask);
303
304        curr_size += blkSize;
305        if (curr_size == tracked_cache_size && blk != tail) {
306            panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
307                     "cache", j);
308            tracked_cache_size <<= 1;
309            // from this point, blocks fit only in the larger caches
310            in_caches_mask &= ~(1U << j);
311            ++j;
312        }
313        blk = blk->next;
314    }
315#endif // FALRU_DEBUG
316}
317
318void
319FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
320{
321    // early exit if we are not tracking any extra caches
322    FALRUBlk* blk = numTrackedCaches ? head : nullptr;
323    unsigned curr_size = 0;
324    unsigned tracked_cache_size = minTrackedSize;
325    CachesMask in_caches_mask = inAllCachesMask;
326    int j = 0;
327
328    while (blk) {
329        blk->inCachesMask = in_caches_mask;
330
331        curr_size += blkSize;
332        if (curr_size == tracked_cache_size && blk != tail) {
333            boundaries[j] = blk;
334
335            tracked_cache_size <<= 1;
336            // from this point, blocks fit only in the larger caches
337            in_caches_mask &= ~(1U << j);
338            ++j;
339        }
340        blk = blk->next;
341    }
342}
343
344
345void
346FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
347{
348    // Get the mask of all caches, in which the block didn't fit
349    // before moving it to the head
350    CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
351
352    for (int i = 0; i < numTrackedCaches; i++) {
353        CachesMask current_cache_mask = 1U << i;
354        if (current_cache_mask & update_caches_mask) {
355            // if the ith cache didn't fit the block (before it is moved to
356            // the head), move the ith boundary 1 block closer to the
357            // MRU
358            boundaries[i]->inCachesMask &= ~current_cache_mask;
359            boundaries[i] = boundaries[i]->prev;
360        } else if (boundaries[i] == blk) {
361            // Make sure the boundary doesn't point to the block
362            // we are about to move
363            boundaries[i] = blk->prev;
364        }
365    }
366
367    // Make block reside in all caches
368    blk->inCachesMask = inAllCachesMask;
369}
370
371void
372FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
373{
374    CachesMask update_caches_mask = blk->inCachesMask;
375
376    for (int i = 0; i < numTrackedCaches; i++) {
377        CachesMask current_cache_mask = 1U << i;
378        if (current_cache_mask & update_caches_mask) {
379            // if the ith cache fitted the block (before it is moved to
380            // the tail), move the ith boundary 1 block closer to the
381            // LRU
382            boundaries[i] = boundaries[i]->next;
383            if (boundaries[i] == blk) {
384                // Make sure the boundary doesn't point to the block
385                // we are about to move
386                boundaries[i] = blk->next;
387            }
388            boundaries[i]->inCachesMask |= current_cache_mask;
389        }
390    }
391
392    // The block now fits only in the actual cache
393    blk->inCachesMask = 0;
394}
395
396void
397FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
398{
399    for (int i = 0; i < numTrackedCaches; i++) {
400        if (blk && ((1U << i) & blk->inCachesMask)) {
401            hits[i]++;
402        } else {
403            misses[i]++;
404        }
405    }
406
407    // Record stats for the actual cache too
408    if (blk) {
409        hits[numTrackedCaches]++;
410    } else {
411        misses[numTrackedCaches]++;
412    }
413
414    accesses++;
415}
416
417void
418printSize(std::ostream &stream, size_t size)
419{
420    static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
421    int div = 0;
422    while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
423        div++;
424        size >>= 10;
425    }
426    stream << size << SIZES[div];
427}
428
429void
430FALRU::CacheTracking::regStats(std::string name)
431{
432    hits
433        .init(numTrackedCaches + 1)
434        .name(name + ".falru_hits")
435        .desc("The number of hits in each cache size.")
436        ;
437    misses
438        .init(numTrackedCaches + 1)
439        .name(name + ".falru_misses")
440        .desc("The number of misses in each cache size.")
441        ;
442    accesses
443        .name(name + ".falru_accesses")
444        .desc("The number of accesses to the FA LRU cache.")
445        ;
446
447    for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
448        std::stringstream size_str;
449        printSize(size_str, minTrackedSize << i);
450        hits.subname(i, size_str.str());
451        hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
452        misses.subname(i, size_str.str());
453        misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
454    }
455}
456