1/*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder.  You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Nikos Nikoleris
43 *          Daniel Carvalho
44 */
45
46/**
47 * @file
48 * Definitions a fully associative LRU tagstore.
49 */
50
51#include "mem/cache/tags/fa_lru.hh"
52
53#include <cassert>
54#include <sstream>
55
56#include "base/intmath.hh"
57#include "base/logging.hh"
58#include "mem/cache/base.hh"
59#include "mem/cache/replacement_policies/replaceable_entry.hh"
60
61std::string
62FALRUBlk::print() const
63{
64    return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask);
65}
66
67FALRU::FALRU(const Params *p)
68    : BaseTags(p),
69
70      cacheTracking(p->min_tracked_cache_size, size, blkSize)
71{
72    if (!isPowerOf2(blkSize))
73        fatal("cache block size (in bytes) `%d' must be a power of two",
74              blkSize);
75    if (!isPowerOf2(size))
76        fatal("Cache Size must be power of 2 for now");
77
78    blks = new FALRUBlk[numBlocks];
79}
80
81FALRU::~FALRU()
82{
83    delete[] blks;
84}
85
86void
87FALRU::tagsInit()
88{
89    head = &(blks[0]);
90    head->prev = nullptr;
91    head->next = &(blks[1]);
92    head->setPosition(0, 0);
93    head->data = &dataBlks[0];
94
95    for (unsigned i = 1; i < numBlocks - 1; i++) {
96        blks[i].prev = &(blks[i-1]);
97        blks[i].next = &(blks[i+1]);
98        blks[i].setPosition(0, i);
99
100        // Associate a data chunk to the block
101        blks[i].data = &dataBlks[blkSize*i];
102    }
103
104    tail = &(blks[numBlocks - 1]);
105    tail->prev = &(blks[numBlocks - 2]);
106    tail->next = nullptr;
107    tail->setPosition(0, numBlocks - 1);
108    tail->data = &dataBlks[(numBlocks - 1) * blkSize];
109
110    cacheTracking.init(head, tail);
111}
112
113void
114FALRU::regStats()
115{
116    BaseTags::regStats();
117    cacheTracking.regStats(name());
118}
119
120void
121FALRU::invalidate(CacheBlk *blk)
122{
123    // Erase block entry reference in the hash table
124    auto num_erased M5_VAR_USED =
125        tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
126
127    // Sanity check; only one block reference should be erased
128    assert(num_erased == 1);
129
130    // Invalidate block entry. Must be done after the hash is erased
131    BaseTags::invalidate(blk);
132
133    // Decrease the number of tags in use
134    tagsInUse--;
135
136    // Move the block to the tail to make it the next victim
137    moveToTail((FALRUBlk*)blk);
138}
139
140CacheBlk*
141FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
142{
143    return accessBlock(addr, is_secure, lat, 0);
144}
145
146CacheBlk*
147FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
148                   CachesMask *in_caches_mask)
149{
150    CachesMask mask = 0;
151    FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
152
153    // If a cache hit
154    if (blk && blk->isValid()) {
155        mask = blk->inCachesMask;
156
157        moveToHead(blk);
158    }
159
160    if (in_caches_mask) {
161        *in_caches_mask = mask;
162    }
163
164    cacheTracking.recordAccess(blk);
165
166    // The tag lookup latency is the same for a hit or a miss
167    lat = lookupLatency;
168
169    return blk;
170}
171
172CacheBlk*
173FALRU::findBlock(Addr addr, bool is_secure) const
174{
175    FALRUBlk* blk = nullptr;
176
177    Addr tag = extractTag(addr);
178    auto iter = tagHash.find(std::make_pair(tag, is_secure));
179    if (iter != tagHash.end()) {
180        blk = (*iter).second;
181    }
182
183    if (blk && blk->isValid()) {
184        assert(blk->tag == tag);
185        assert(blk->isSecure() == is_secure);
186    }
187
188    return blk;
189}
190
191ReplaceableEntry*
192FALRU::findBlockBySetAndWay(int set, int way) const
193{
194    assert(set == 0);
195    return &blks[way];
196}
197
198CacheBlk*
199FALRU::findVictim(Addr addr, const bool is_secure, const std::size_t size,
200                  std::vector<CacheBlk*>& evict_blks) const
201{
202    // The victim is always stored on the tail for the FALRU
203    FALRUBlk* victim = tail;
204
205    // There is only one eviction for this replacement
206    evict_blks.push_back(victim);
207
208    return victim;
209}
210
211void
212FALRU::insertBlock(const PacketPtr pkt, CacheBlk *blk)
213{
214    FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
215
216    // Make sure block is not present in the cache
217    assert(falruBlk->inCachesMask == 0);
218
219    // Do common block insertion functionality
220    BaseTags::insertBlock(pkt, blk);
221
222    // Increment tag counter
223    tagsInUse++;
224
225    // New block is the MRU
226    moveToHead(falruBlk);
227
228    // Insert new block in the hash table
229    tagHash[std::make_pair(blk->tag, blk->isSecure())] = falruBlk;
230}
231
232void
233FALRU::moveToHead(FALRUBlk *blk)
234{
235    // If block is not already head, do the moving
236    if (blk != head) {
237        cacheTracking.moveBlockToHead(blk);
238        // If block is tail, set previous block as new tail
239        if (blk == tail){
240            assert(blk->next == nullptr);
241            tail = blk->prev;
242            tail->next = nullptr;
243        // Inform block's surrounding blocks that it has been moved
244        } else {
245            blk->prev->next = blk->next;
246            blk->next->prev = blk->prev;
247        }
248
249        // Swap pointers
250        blk->next = head;
251        blk->prev = nullptr;
252        head->prev = blk;
253        head = blk;
254
255        cacheTracking.check(head, tail);
256    }
257}
258
259void
260FALRU::moveToTail(FALRUBlk *blk)
261{
262    // If block is not already tail, do the moving
263    if (blk != tail) {
264        cacheTracking.moveBlockToTail(blk);
265        // If block is head, set next block as new head
266        if (blk == head){
267            assert(blk->prev == nullptr);
268            head = blk->next;
269            head->prev = nullptr;
270        // Inform block's surrounding blocks that it has been moved
271        } else {
272            blk->prev->next = blk->next;
273            blk->next->prev = blk->prev;
274        }
275
276        // Swap pointers
277        blk->prev = tail;
278        blk->next = nullptr;
279        tail->next = blk;
280        tail = blk;
281
282        cacheTracking.check(head, tail);
283    }
284}
285
286FALRU *
287FALRUParams::create()
288{
289    return new FALRU(this);
290}
291
292void
293FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
294{
295#ifdef FALRU_DEBUG
296    const FALRUBlk* blk = head;
297    unsigned curr_size = 0;
298    unsigned tracked_cache_size = minTrackedSize;
299    CachesMask in_caches_mask = inAllCachesMask;
300    int j = 0;
301
302    while (blk) {
303        panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
304                 "%x found %x", blk->inCachesMask, in_caches_mask);
305
306        curr_size += blkSize;
307        if (curr_size == tracked_cache_size && blk != tail) {
308            panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
309                     "cache", j);
310            tracked_cache_size <<= 1;
311            // from this point, blocks fit only in the larger caches
312            in_caches_mask &= ~(1U << j);
313            ++j;
314        }
315        blk = blk->next;
316    }
317#endif // FALRU_DEBUG
318}
319
320void
321FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
322{
323    // early exit if we are not tracking any extra caches
324    FALRUBlk* blk = numTrackedCaches ? head : nullptr;
325    unsigned curr_size = 0;
326    unsigned tracked_cache_size = minTrackedSize;
327    CachesMask in_caches_mask = inAllCachesMask;
328    int j = 0;
329
330    while (blk) {
331        blk->inCachesMask = in_caches_mask;
332
333        curr_size += blkSize;
334        if (curr_size == tracked_cache_size && blk != tail) {
335            boundaries[j] = blk;
336
337            tracked_cache_size <<= 1;
338            // from this point, blocks fit only in the larger caches
339            in_caches_mask &= ~(1U << j);
340            ++j;
341        }
342        blk = blk->next;
343    }
344}
345
346
347void
348FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
349{
350    // Get the mask of all caches, in which the block didn't fit
351    // before moving it to the head
352    CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
353
354    for (int i = 0; i < numTrackedCaches; i++) {
355        CachesMask current_cache_mask = 1U << i;
356        if (current_cache_mask & update_caches_mask) {
357            // if the ith cache didn't fit the block (before it is moved to
358            // the head), move the ith boundary 1 block closer to the
359            // MRU
360            boundaries[i]->inCachesMask &= ~current_cache_mask;
361            boundaries[i] = boundaries[i]->prev;
362        } else if (boundaries[i] == blk) {
363            // Make sure the boundary doesn't point to the block
364            // we are about to move
365            boundaries[i] = blk->prev;
366        }
367    }
368
369    // Make block reside in all caches
370    blk->inCachesMask = inAllCachesMask;
371}
372
373void
374FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
375{
376    CachesMask update_caches_mask = blk->inCachesMask;
377
378    for (int i = 0; i < numTrackedCaches; i++) {
379        CachesMask current_cache_mask = 1U << i;
380        if (current_cache_mask & update_caches_mask) {
381            // if the ith cache fitted the block (before it is moved to
382            // the tail), move the ith boundary 1 block closer to the
383            // LRU
384            boundaries[i] = boundaries[i]->next;
385            if (boundaries[i] == blk) {
386                // Make sure the boundary doesn't point to the block
387                // we are about to move
388                boundaries[i] = blk->next;
389            }
390            boundaries[i]->inCachesMask |= current_cache_mask;
391        }
392    }
393
394    // The block now fits only in the actual cache
395    blk->inCachesMask = 0;
396}
397
398void
399FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
400{
401    for (int i = 0; i < numTrackedCaches; i++) {
402        if (blk && ((1U << i) & blk->inCachesMask)) {
403            hits[i]++;
404        } else {
405            misses[i]++;
406        }
407    }
408
409    // Record stats for the actual cache too
410    if (blk && blk->isValid()) {
411        hits[numTrackedCaches]++;
412    } else {
413        misses[numTrackedCaches]++;
414    }
415
416    accesses++;
417}
418
419void
420printSize(std::ostream &stream, size_t size)
421{
422    static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
423    int div = 0;
424    while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
425        div++;
426        size >>= 10;
427    }
428    stream << size << SIZES[div];
429}
430
431void
432FALRU::CacheTracking::regStats(std::string name)
433{
434    hits
435        .init(numTrackedCaches + 1)
436        .name(name + ".falru_hits")
437        .desc("The number of hits in each cache size.")
438        ;
439    misses
440        .init(numTrackedCaches + 1)
441        .name(name + ".falru_misses")
442        .desc("The number of misses in each cache size.")
443        ;
444    accesses
445        .name(name + ".falru_accesses")
446        .desc("The number of accesses to the FA LRU cache.")
447        ;
448
449    for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
450        std::stringstream size_str;
451        printSize(size_str, minTrackedSize << i);
452        hits.subname(i, size_str.str());
453        hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
454        misses.subname(i, size_str.str());
455        misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
456    }
457}
458