Deleted Added
sdiff udiff text old ( 12648:78941f188bb3 ) new ( 12665:4ca9fc117b95 )
full compact
1/*
2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Declaration of a fully associative LRU tag store.
47 */
48
49#ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50#define __MEM_CACHE_TAGS_FA_LRU_HH__
51
52#include <list>
53#include <unordered_map>
54
55#include "base/intmath.hh"
56#include "mem/cache/base.hh"
57#include "mem/cache/blk.hh"
58#include "mem/cache/tags/base.hh"
59#include "mem/packet.hh"
60#include "params/FALRU.hh"
61
62// Uncomment to enable sanity checks for the FALRU cache and the
63// TrackedCaches class
64//#define FALRU_DEBUG
65
66// A bitmask of the caches we are keeping track of. Currently the
67// lowest bit is the smallest cache we are tracking, as it is
68// specified by the corresponding parameter. The rest of the bits are
69// for exponentially growing cache sizes.
70typedef uint32_t CachesMask;
71
72/**
73 * A fully associative cache block.
74 */
75class FALRUBlk : public CacheBlk
76{
77 public:
78 /** The previous block in LRU order. */
79 FALRUBlk *prev;
80 /** The next block in LRU order. */
81 FALRUBlk *next;
82
83 /** A bit mask of the caches that fit this block. */
84 CachesMask inCachesMask;
85};
86
87/**
88 * A fully associative LRU cache. Keeps statistics for accesses to a number of
89 * cache sizes at once.
90 */
91class FALRU : public BaseTags
92{
93 public:
94 /** Typedef the block type used in this class. */
95 typedef FALRUBlk BlkType;
96
97 protected:
98 /** The cache blocks. */
99 FALRUBlk *blks;
100
101 /** The MRU block. */
102 FALRUBlk *head;
103 /** The LRU block. */
104 FALRUBlk *tail;
105
106 /** Hash table type mapping addresses to cache block pointers. */
107 typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
108 /** Iterator into the address hash table. */
109 typedef hash_t::const_iterator tagIterator;
110
111 /** The address hash table. */
112 hash_t tagHash;
113
114 /**
115 * Find the cache block for the given address.
116 * @param addr The address to find.
117 * @return The cache block of the address, if any.
118 */
119 FALRUBlk * hashLookup(Addr addr) const;
120
121 /**
122 * Move a cache block to the MRU position.
123 *
124 * @param blk The block to promote.
125 */
126 void moveToHead(FALRUBlk *blk);
127
128 /**
129 * Move a cache block to the LRU position.
130 *
131 * @param blk The block to demote.
132 */
133 void moveToTail(FALRUBlk *blk);
134
135 public:
136 typedef FALRUParams Params;
137
138 /**
139 * Construct and initialize this cache tagstore.
140 */
141 FALRU(const Params *p);
142 ~FALRU();
143
144 /**
145 * Register the stats for this object.
146 */
147 void regStats() override;
148
149 /**
150 * Invalidate a cache block.
151 * @param blk The block to invalidate.
152 */
153 void invalidate(CacheBlk *blk) override;
154
155 /**
156 * Access block and update replacement data. May not succeed, in which
157 * case nullptr pointer is returned. This has all the implications of a
158 * cache access and should only be used as such.
159 * Returns the access latency and inCachesMask flags as a side effect.
160 * @param addr The address to look for.
161 * @param is_secure True if the target memory space is secure.
162 * @param lat The latency of the access.
163 * @param in_cache_mask Mask indicating the caches in which the blk fits.
164 * @return Pointer to the cache block.
165 */
166 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
167 CachesMask *in_cache_mask);
168
169 /**
170 * Just a wrapper of above function to conform with the base interface.
171 */
172 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
173
174 /**
175 * Find the block in the cache, do not update the replacement data.
176 * @param addr The address to look for.
177 * @param is_secure True if the target memory space is secure.
178 * @param asid The address space ID.
179 * @return Pointer to the cache block.
180 */
181 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
182
183 /**
184 * Find replacement victim based on address.
185 *
186 * @param addr Address to find a victim for.
187 * @return Cache block to be replaced.
188 */
189 CacheBlk* findVictim(Addr addr) override;
190
191 /**
192 * Insert the new block into the cache and update replacement data.
193 *
194 * @param pkt Packet holding the address to update
195 * @param blk The block to update.
196 */
197 void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
198
199 /**
200 * Find the cache block given set and way
201 * @param set The set of the block.
202 * @param way The way of the block.
203 * @return The cache block.
204 */
205 CacheBlk* findBlockBySetAndWay(int set, int way) const override;
206
207 /**
208 * Generate the tag from the addres. For fully associative this is just the
209 * block address.
210 * @param addr The address to get the tag from.
211 * @return The tag.
212 */
213 Addr extractTag(Addr addr) const override
214 {
215 return blkAlign(addr);
216 }
217
218 /**
219 * Return the set of an address. Only one set in a fully associative cache.
220 * @param addr The address to get the set from.
221 * @return 0.
222 */
223 int extractSet(Addr addr) const override
224 {
225 return 0;
226 }
227
228 /**
229 * Regenerate the block address from the tag.
230 *
231 * @param block The block.
232 * @return the block address.
233 */
234 Addr regenerateBlkAddr(const CacheBlk* blk) const override
235 {
236 return blk->tag;
237 }
238
239 /**
240 * @todo Implement as in lru. Currently not used
241 */
242 virtual std::string print() const override { return ""; }
243
244 /**
245 * Visit each block in the tag store and apply a visitor to the
246 * block.
247 *
248 * The visitor should be a function (or object that behaves like a
249 * function) that takes a cache block reference as its parameter
250 * and returns a bool. A visitor can request the traversal to be
251 * stopped by returning false, returning true causes it to be
252 * called for the next block in the tag store.
253 *
254 * \param visitor Visitor to call on each block.
255 */
256 void forEachBlk(CacheBlkVisitor &visitor) override {
257 for (int i = 0; i < numBlocks; i++) {
258 if (!visitor(blks[i]))
259 return;
260 }
261 }
262
263 private:
264 /**
265 * Mechanism that allows us to simultaneously collect miss
266 * statistics for multiple caches. Currently, we keep track of
267 * caches from a set minimum size of interest up to the actual
268 * cache size.
269 */
270 class CacheTracking
271 {
272 public:
273 CacheTracking(unsigned min_size, unsigned max_size,
274 unsigned block_size)
275 : blkSize(block_size),
276 minTrackedSize(min_size),
277 numTrackedCaches(max_size > min_size ?
278 floorLog2(max_size) - floorLog2(min_size) : 0),
279 inAllCachesMask(mask(numTrackedCaches)),
280 boundaries(new FALRUBlk *[numTrackedCaches])
281 {
282 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
283 "Not enough bits (%s) in type CachesMask type to keep "
284 "track of %d caches\n", sizeof(CachesMask),
285 numTrackedCaches);
286 }
287
288 ~CacheTracking()
289 {
290 delete[] boundaries;
291 }
292
293 /**
294 * Initialiaze cache blocks and the tracking mechanism
295 *
296 * All blocks in the cache need to be initialized once.
297 *
298 * @param blk the MRU block
299 * @param blk the LRU block
300 */
301 void init(FALRUBlk *head, FALRUBlk *tail);
302
303 /**
304 * Update boundaries as a block will be moved to the MRU.
305 *
306 * For all caches that didn't fit the block before moving it,
307 * we move their boundaries one block closer to the MRU. We
308 * also update InCacheMasks as neccessary.
309 *
310 * @param blk the block that will be moved to the head
311 */
312 void moveBlockToHead(FALRUBlk *blk);
313
314 /**
315 * Update boundaries as a block will be moved to the LRU.
316 *
317 * For all caches that fitted the block before moving it, we
318 * move their boundaries one block closer to the LRU. We
319 * also update InCacheMasks as neccessary.
320 *
321 * @param blk the block that will be moved to the head
322 */
323 void moveBlockToTail(FALRUBlk *blk);
324
325 /**
326 * Notify of a block access.
327 *
328 * This should be called every time a block is accessed and it
329 * updates statistics. If the input block is nullptr then we
330 * treat the access as a miss. The block's InCacheMask
331 * determines the caches in which the block fits.
332 *
333 * @param blk the block to record the access for
334 */
335 void recordAccess(FALRUBlk *blk);
336
337 /**
338 * Check that the tracking mechanism is in consistent state.
339 *
340 * Iterate from the head (MRU) to the tail (LRU) of the list
341 * of blocks and assert the inCachesMask and the boundaries
342 * are in consistent state.
343 *
344 * @param head the MRU block of the actual cache
345 * @param head the LRU block of the actual cache
346 */
347 void check(FALRUBlk *head, FALRUBlk *tail);
348
349 /**
350 * Register the stats for this object.
351 */
352 void regStats(std::string name);
353
354 private:
355 /** The size of the cache block */
356 const unsigned blkSize;
357 /** The smallest cache we are tracking */
358 const unsigned minTrackedSize;
359 /** The number of different size caches being tracked. */
360 const int numTrackedCaches;
361 /** A mask for all cache being tracked. */
362 const CachesMask inAllCachesMask;
363 /** Array of pointers to blocks at the cache boundaries. */
364 FALRUBlk** boundaries;
365
366 protected:
367 /**
368 * @defgroup FALRUStats Fully Associative LRU specific statistics
369 * The FA lru stack lets us track multiple cache sizes at once. These
370 * statistics track the hits and misses for different cache sizes.
371 * @{
372 */
373
374 /** Hits in each cache */
375 Stats::Vector hits;
376 /** Misses in each cache */
377 Stats::Vector misses;
378 /** Total number of accesses */
379 Stats::Scalar accesses;
380
381 /**
382 * @}
383 */
384 };
385 CacheTracking cacheTracking;
386};
387
388#endif // __MEM_CACHE_TAGS_FA_LRU_HH__