fa_lru.cc revision 13225
1/*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder.  You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Nikos Nikoleris
43 *          Daniel Carvalho
44 */
45
46/**
47 * @file
48 * Definitions a fully associative LRU tagstore.
49 */
50
51#include "mem/cache/tags/fa_lru.hh"
52
53#include <cassert>
54#include <sstream>
55
56#include "base/intmath.hh"
57#include "base/logging.hh"
58#include "mem/cache/base.hh"
59#include "mem/cache/replacement_policies/replaceable_entry.hh"
60
61std::string
62FALRUBlk::print() const
63{
64    return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask);
65}
66
67FALRU::FALRU(const Params *p)
68    : BaseTags(p),
69
70      cacheTracking(p->min_tracked_cache_size, size, blkSize)
71{
72    if (!isPowerOf2(blkSize))
73        fatal("cache block size (in bytes) `%d' must be a power of two",
74              blkSize);
75    if (!isPowerOf2(size))
76        fatal("Cache Size must be power of 2 for now");
77
78    blks = new FALRUBlk[numBlocks];
79}
80
81FALRU::~FALRU()
82{
83    delete[] blks;
84}
85
86void
87FALRU::init(BaseCache* cache)
88{
89    // Set parent cache
90    setCache(cache);
91
92    head = &(blks[0]);
93    head->prev = nullptr;
94    head->next = &(blks[1]);
95    head->setPosition(0, 0);
96    head->data = &dataBlks[0];
97
98    for (unsigned i = 1; i < numBlocks - 1; i++) {
99        blks[i].prev = &(blks[i-1]);
100        blks[i].next = &(blks[i+1]);
101        blks[i].setPosition(0, i);
102
103        // Associate a data chunk to the block
104        blks[i].data = &dataBlks[blkSize*i];
105    }
106
107    tail = &(blks[numBlocks - 1]);
108    tail->prev = &(blks[numBlocks - 2]);
109    tail->next = nullptr;
110    tail->setPosition(0, numBlocks - 1);
111    tail->data = &dataBlks[(numBlocks - 1) * blkSize];
112
113    cacheTracking.init(head, tail);
114}
115
116void
117FALRU::regStats()
118{
119    BaseTags::regStats();
120    cacheTracking.regStats(name());
121}
122
123void
124FALRU::invalidate(CacheBlk *blk)
125{
126    // Erase block entry reference in the hash table
127    auto num_erased = tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
128
129    // Sanity check; only one block reference should be erased
130    assert(num_erased == 1);
131
132    // Invalidate block entry. Must be done after the hash is erased
133    BaseTags::invalidate(blk);
134
135    // Decrease the number of tags in use
136    tagsInUse--;
137
138    // Move the block to the tail to make it the next victim
139    moveToTail((FALRUBlk*)blk);
140}
141
142CacheBlk*
143FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
144{
145    return accessBlock(addr, is_secure, lat, 0);
146}
147
148CacheBlk*
149FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
150                   CachesMask *in_caches_mask)
151{
152    CachesMask mask = 0;
153    FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
154
155    if (blk && blk->isValid()) {
156        // If a cache hit
157        lat = accessLatency;
158        // Check if the block to be accessed is available. If not,
159        // apply the accessLatency on top of block->whenReady.
160        if (blk->whenReady > curTick() &&
161            cache->ticksToCycles(blk->whenReady - curTick()) >
162            accessLatency) {
163            lat = cache->ticksToCycles(blk->whenReady - curTick()) +
164            accessLatency;
165        }
166        mask = blk->inCachesMask;
167
168        moveToHead(blk);
169    } else {
170        // If a cache miss
171        lat = lookupLatency;
172    }
173    if (in_caches_mask) {
174        *in_caches_mask = mask;
175    }
176
177    cacheTracking.recordAccess(blk);
178
179    return blk;
180}
181
182CacheBlk*
183FALRU::findBlock(Addr addr, bool is_secure) const
184{
185    FALRUBlk* blk = nullptr;
186
187    Addr tag = extractTag(addr);
188    auto iter = tagHash.find(std::make_pair(tag, is_secure));
189    if (iter != tagHash.end()) {
190        blk = (*iter).second;
191    }
192
193    if (blk && blk->isValid()) {
194        assert(blk->tag == tag);
195        assert(blk->isSecure() == is_secure);
196    }
197
198    return blk;
199}
200
201ReplaceableEntry*
202FALRU::findBlockBySetAndWay(int set, int way) const
203{
204    assert(set == 0);
205    return &blks[way];
206}
207
208CacheBlk*
209FALRU::findVictim(Addr addr, const bool is_secure,
210                  std::vector<CacheBlk*>& evict_blks) const
211{
212    // The victim is always stored on the tail for the FALRU
213    FALRUBlk* victim = tail;
214
215    // There is only one eviction for this replacement
216    evict_blks.push_back(victim);
217
218    return victim;
219}
220
221void
222FALRU::insertBlock(const Addr addr, const bool is_secure,
223                   const int src_master_ID, const uint32_t task_ID,
224                   CacheBlk *blk)
225{
226    FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
227
228    // Make sure block is not present in the cache
229    assert(falruBlk->inCachesMask == 0);
230
231    // Do common block insertion functionality
232    BaseTags::insertBlock(addr, is_secure, src_master_ID, task_ID, blk);
233
234    // Increment tag counter
235    tagsInUse++;
236
237    // New block is the MRU
238    moveToHead(falruBlk);
239
240    // Insert new block in the hash table
241    tagHash[std::make_pair(blk->tag, blk->isSecure())] = falruBlk;
242}
243
244void
245FALRU::moveToHead(FALRUBlk *blk)
246{
247    // If block is not already head, do the moving
248    if (blk != head) {
249        cacheTracking.moveBlockToHead(blk);
250        // If block is tail, set previous block as new tail
251        if (blk == tail){
252            assert(blk->next == nullptr);
253            tail = blk->prev;
254            tail->next = nullptr;
255        // Inform block's surrounding blocks that it has been moved
256        } else {
257            blk->prev->next = blk->next;
258            blk->next->prev = blk->prev;
259        }
260
261        // Swap pointers
262        blk->next = head;
263        blk->prev = nullptr;
264        head->prev = blk;
265        head = blk;
266
267        cacheTracking.check(head, tail);
268    }
269}
270
271void
272FALRU::moveToTail(FALRUBlk *blk)
273{
274    // If block is not already tail, do the moving
275    if (blk != tail) {
276        cacheTracking.moveBlockToTail(blk);
277        // If block is head, set next block as new head
278        if (blk == head){
279            assert(blk->prev == nullptr);
280            head = blk->next;
281            head->prev = nullptr;
282        // Inform block's surrounding blocks that it has been moved
283        } else {
284            blk->prev->next = blk->next;
285            blk->next->prev = blk->prev;
286        }
287
288        // Swap pointers
289        blk->prev = tail;
290        blk->next = nullptr;
291        tail->next = blk;
292        tail = blk;
293
294        cacheTracking.check(head, tail);
295    }
296}
297
298FALRU *
299FALRUParams::create()
300{
301    return new FALRU(this);
302}
303
304void
305FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
306{
307#ifdef FALRU_DEBUG
308    const FALRUBlk* blk = head;
309    unsigned curr_size = 0;
310    unsigned tracked_cache_size = minTrackedSize;
311    CachesMask in_caches_mask = inAllCachesMask;
312    int j = 0;
313
314    while (blk) {
315        panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
316                 "%x found %x", blk->inCachesMask, in_caches_mask);
317
318        curr_size += blkSize;
319        if (curr_size == tracked_cache_size && blk != tail) {
320            panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
321                     "cache", j);
322            tracked_cache_size <<= 1;
323            // from this point, blocks fit only in the larger caches
324            in_caches_mask &= ~(1U << j);
325            ++j;
326        }
327        blk = blk->next;
328    }
329#endif // FALRU_DEBUG
330}
331
332void
333FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
334{
335    // early exit if we are not tracking any extra caches
336    FALRUBlk* blk = numTrackedCaches ? head : nullptr;
337    unsigned curr_size = 0;
338    unsigned tracked_cache_size = minTrackedSize;
339    CachesMask in_caches_mask = inAllCachesMask;
340    int j = 0;
341
342    while (blk) {
343        blk->inCachesMask = in_caches_mask;
344
345        curr_size += blkSize;
346        if (curr_size == tracked_cache_size && blk != tail) {
347            boundaries[j] = blk;
348
349            tracked_cache_size <<= 1;
350            // from this point, blocks fit only in the larger caches
351            in_caches_mask &= ~(1U << j);
352            ++j;
353        }
354        blk = blk->next;
355    }
356}
357
358
359void
360FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
361{
362    // Get the mask of all caches, in which the block didn't fit
363    // before moving it to the head
364    CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
365
366    for (int i = 0; i < numTrackedCaches; i++) {
367        CachesMask current_cache_mask = 1U << i;
368        if (current_cache_mask & update_caches_mask) {
369            // if the ith cache didn't fit the block (before it is moved to
370            // the head), move the ith boundary 1 block closer to the
371            // MRU
372            boundaries[i]->inCachesMask &= ~current_cache_mask;
373            boundaries[i] = boundaries[i]->prev;
374        } else if (boundaries[i] == blk) {
375            // Make sure the boundary doesn't point to the block
376            // we are about to move
377            boundaries[i] = blk->prev;
378        }
379    }
380
381    // Make block reside in all caches
382    blk->inCachesMask = inAllCachesMask;
383}
384
385void
386FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
387{
388    CachesMask update_caches_mask = blk->inCachesMask;
389
390    for (int i = 0; i < numTrackedCaches; i++) {
391        CachesMask current_cache_mask = 1U << i;
392        if (current_cache_mask & update_caches_mask) {
393            // if the ith cache fitted the block (before it is moved to
394            // the tail), move the ith boundary 1 block closer to the
395            // LRU
396            boundaries[i] = boundaries[i]->next;
397            if (boundaries[i] == blk) {
398                // Make sure the boundary doesn't point to the block
399                // we are about to move
400                boundaries[i] = blk->next;
401            }
402            boundaries[i]->inCachesMask |= current_cache_mask;
403        }
404    }
405
406    // The block now fits only in the actual cache
407    blk->inCachesMask = 0;
408}
409
410void
411FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
412{
413    for (int i = 0; i < numTrackedCaches; i++) {
414        if (blk && ((1U << i) & blk->inCachesMask)) {
415            hits[i]++;
416        } else {
417            misses[i]++;
418        }
419    }
420
421    // Record stats for the actual cache too
422    if (blk && blk->isValid()) {
423        hits[numTrackedCaches]++;
424    } else {
425        misses[numTrackedCaches]++;
426    }
427
428    accesses++;
429}
430
431void
432printSize(std::ostream &stream, size_t size)
433{
434    static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
435    int div = 0;
436    while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
437        div++;
438        size >>= 10;
439    }
440    stream << size << SIZES[div];
441}
442
443void
444FALRU::CacheTracking::regStats(std::string name)
445{
446    hits
447        .init(numTrackedCaches + 1)
448        .name(name + ".falru_hits")
449        .desc("The number of hits in each cache size.")
450        ;
451    misses
452        .init(numTrackedCaches + 1)
453        .name(name + ".falru_misses")
454        .desc("The number of misses in each cache size.")
455        ;
456    accesses
457        .name(name + ".falru_accesses")
458        .desc("The number of accesses to the FA LRU cache.")
459        ;
460
461    for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
462        std::stringstream size_str;
463        printSize(size_str, minTrackedSize << i);
464        hits.subname(i, size_str.str());
465        hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
466        misses.subname(i, size_str.str());
467        misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
468    }
469}
470