1/*
2 * Copyright (c) 2012-2014,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Ron Dreslinski
42 */
43
44/**
45 * @file
46 * Declaration of a common base class for cache tagstore objects.
47 */
48
49#ifndef __MEM_CACHE_TAGS_BASE_HH__
50#define __MEM_CACHE_TAGS_BASE_HH__
51
52#include <cassert>
53#include <cstdint>
54#include <functional>
55#include <string>
56
57#include "base/callback.hh"
58#include "base/logging.hh"
59#include "base/statistics.hh"
60#include "base/types.hh"
61#include "mem/cache/cache_blk.hh"
62#include "mem/packet.hh"
63#include "params/BaseTags.hh"
64#include "sim/clocked_object.hh"
65
66class System;
67class IndexingPolicy;
68class ReplaceableEntry;
69
70/**
71 * A common base class of Cache tagstore objects.
72 */
73class BaseTags : public ClockedObject
74{
75 protected:
76 /** The block size of the cache. */
77 const unsigned blkSize;
78 /** Mask out all bits that aren't part of the block offset. */
79 const Addr blkMask;
80 /** The size of the cache. */
81 const unsigned size;
82 /** The tag lookup latency of the cache. */
83 const Cycles lookupLatency;
84
85 /** System we are currently operating in. */
86 System *system;
87
88 /** Indexing policy */
89 BaseIndexingPolicy *indexingPolicy;
90
91 /**
92 * The number of tags that need to be touched to meet the warmup
93 * percentage.
94 */
95 const unsigned warmupBound;
96 /** Marked true when the cache is warmed up. */
97 bool warmedUp;
98
99 /** the number of blocks in the cache */
100 const unsigned numBlocks;
101
102 /** The data blocks, 1 per cache block. */
103 std::unique_ptr<uint8_t[]> dataBlks;
104
105 // Statistics
106 /**
107 * TODO: It would be good if these stats were acquired after warmup.
108 * @addtogroup CacheStatistics
109 * @{
110 */
111
112 /** Per cycle average of the number of tags that hold valid data. */
113 Stats::Average tagsInUse;
114
115 /** The total number of references to a block before it is replaced. */
116 Stats::Scalar totalRefs;
117
118 /**
119 * The number of reference counts sampled. This is different from
120 * replacements because we sample all the valid blocks when the simulator
121 * exits.
122 */
123 Stats::Scalar sampledRefs;
124
125 /**
126 * Average number of references to a block before is was replaced.
127 * @todo This should change to an average stat once we have them.
128 */
129 Stats::Formula avgRefs;
130
131 /** The cycle that the warmup percentage was hit. 0 on failure. */
132 Stats::Scalar warmupCycle;
133
134 /** Average occupancy of each requestor using the cache */
135 Stats::AverageVector occupancies;
136
137 /** Average occ % of each requestor using the cache */
138 Stats::Formula avgOccs;
139
140 /** Occupancy of each context/cpu using the cache */
141 Stats::Vector occupanciesTaskId;
142
143 /** Occupancy of each context/cpu using the cache */
144 Stats::Vector2d ageTaskId;
145
146 /** Occ % of each context/cpu using the cache */
147 Stats::Formula percentOccsTaskId;
148
149 /** Number of tags consulted over all accesses. */
150 Stats::Scalar tagAccesses;
151 /** Number of data blocks consulted over all accesses. */
152 Stats::Scalar dataAccesses;
153
154 /**
155 * @}
156 */
157
158 public:
159 typedef BaseTagsParams Params;
160 BaseTags(const Params *p);
161
162 /**
163 * Destructor.
164 */
165 virtual ~BaseTags() {}
166
167 /**
168 * Initialize blocks. Must be overriden by every subclass that uses
169 * a block type different from its parent's, as the current Python
170 * code generation does not allow templates.
171 */
172 virtual void tagsInit() = 0;
173
174 /**
175 * Register local statistics.
176 */
177 void regStats();
178
179 /**
180 * Average in the reference count for valid blocks when the simulation
181 * exits.
182 */
183 void cleanupRefs();
184
185 /**
186 * Computes stats just prior to dump event
187 */
188 void computeStats();
189
190 /**
191 * Print all tags used
192 */
193 std::string print();
194
195 /**
196 * Finds the block in the cache without touching it.
197 *
198 * @param addr The address to look for.
199 * @param is_secure True if the target memory space is secure.
200 * @return Pointer to the cache block.
201 */
202 virtual CacheBlk *findBlock(Addr addr, bool is_secure) const;
203
204 /**
205 * Find a block given set and way.
206 *
207 * @param set The set of the block.
208 * @param way The way of the block.
209 * @return The block.
210 */
211 virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const;
212
213 /**
214 * Align an address to the block size.
215 * @param addr the address to align.
216 * @return The block address.
217 */
218 Addr blkAlign(Addr addr) const
219 {
220 return addr & ~blkMask;
221 }
222
223 /**
224 * Calculate the block offset of an address.
225 * @param addr the address to get the offset of.
226 * @return the block offset.
227 */
228 int extractBlkOffset(Addr addr) const
229 {
230 return (addr & blkMask);
231 }
232
233 /**
234 * Limit the allocation for the cache ways.
235 * @param ways The maximum number of ways available for replacement.
236 */
237 virtual void setWayAllocationMax(int ways)
238 {
239 panic("This tag class does not implement way allocation limit!\n");
240 }
241
242 /**
243 * Get the way allocation mask limit.
244 * @return The maximum number of ways available for replacement.
245 */
246 virtual int getWayAllocationMax() const
247 {
248 panic("This tag class does not implement way allocation limit!\n");
249 return -1;
250 }
251
252 /**
253 * This function updates the tags when a block is invalidated
254 *
255 * @param blk A valid block to invalidate.
256 */
257 virtual void invalidate(CacheBlk *blk)
258 {
259 assert(blk);
260 assert(blk->isValid());
261
262 occupancies[blk->srcMasterId]--;
263 totalRefs += blk->refCount;
264 sampledRefs++;
265
266 blk->invalidate();
267 }
268
269 /**
270 * Find replacement victim based on address. If the address requires
271 * blocks to be evicted, their locations are listed for eviction. If a
272 * conventional cache is being used, the list only contains the victim.
273 * However, if using sector or compressed caches, the victim is one of
274 * the blocks to be evicted, but its location is the only one that will
275 * be assigned to the newly allocated block associated to this address.
276 * @sa insertBlock
277 *
278 * @param addr Address to find a victim for.
279 * @param is_secure True if the target memory space is secure.
280 * @param size Size, in bits, of new block to allocate.
281 * @param evict_blks Cache blocks to be evicted.
282 * @return Cache block to be replaced.
283 */
284 virtual CacheBlk* findVictim(Addr addr, const bool is_secure,
285 const std::size_t size,
286 std::vector<CacheBlk*>& evict_blks) const = 0;
287
288 /**
289 * Access block and update replacement data. May not succeed, in which case
290 * nullptr is returned. This has all the implications of a cache access and
291 * should only be used as such. Returns the tag lookup latency as a side
292 * effect.
293 *
294 * @param addr The address to find.
295 * @param is_secure True if the target memory space is secure.
296 * @param lat The latency of the tag lookup.
297 * @return Pointer to the cache block if found.
298 */
299 virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) = 0;
300
301 /**
302 * Generate the tag from the given address.
303 *
304 * @param addr The address to get the tag from.
305 * @return The tag of the address.
306 */
307 virtual Addr extractTag(const Addr addr) const;
308
309 /**
310 * Insert the new block into the cache and update stats.
311 *
312 * @param pkt Packet holding the address to update
313 * @param blk The block to update.
314 */
315 virtual void insertBlock(const PacketPtr pkt, CacheBlk *blk);
316
317 /**
318 * Regenerate the block address.
319 *
320 * @param block The block.
321 * @return the block address.
322 */
323 virtual Addr regenerateBlkAddr(const CacheBlk* blk) const = 0;
324
325 /**
326 * Visit each block in the tags and apply a visitor
327 *
328 * The visitor should be a std::function that takes a cache block
329 * reference as its parameter.
330 *
331 * @param visitor Visitor to call on each block.
332 */
333 virtual void forEachBlk(std::function<void(CacheBlk &)> visitor) = 0;
334
335 /**
336 * Find if any of the blocks satisfies a condition
337 *
338 * The visitor should be a std::function that takes a cache block
339 * reference as its parameter. The visitor will terminate the
340 * traversal early if the condition is satisfied.
341 *
342 * @param visitor Visitor to call on each block.
343 */
344 virtual bool anyBlk(std::function<bool(CacheBlk &)> visitor) = 0;
345
346 private:
347 /**
348 * Update the reference stats using data from the input block
349 *
350 * @param blk The input block
351 */
352 void cleanupRefsVisitor(CacheBlk &blk);
353
354 /**
355 * Update the occupancy and age stats using data from the input block
356 *
357 * @param blk The input block
358 */
359 void computeStatsVisitor(CacheBlk &blk);
360};
361
362class BaseTagsCallback : public Callback
363{
364 BaseTags *tags;
365 public:
366 BaseTagsCallback(BaseTags *t) : tags(t) {}
367 virtual void process() { tags->cleanupRefs(); };
368};
369
370class BaseTagsDumpCallback : public Callback
371{
372 BaseTags *tags;
373 public:
374 BaseTagsDumpCallback(BaseTags *t) : tags(t) {}
375 virtual void process() { tags->computeStats(); };
376};
377
378#endif //__MEM_CACHE_TAGS_BASE_HH__