1/*
2 * Copyright (c) 2012-2014,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Ron Dreslinski
42 */
43
44/**
45 * @file
46 * Declaration of a common base class for cache tagstore objects.
47 */
48
49#ifndef __MEM_CACHE_TAGS_BASE_HH__
50#define __MEM_CACHE_TAGS_BASE_HH__
51
52#include <cassert>
53#include <functional>
54#include <string>
55
56#include "base/callback.hh"
57#include "base/logging.hh"
58#include "base/statistics.hh"
59#include "base/types.hh"
60#include "mem/cache/cache_blk.hh"
61#include "mem/packet.hh"
62#include "params/BaseTags.hh"
63#include "sim/clocked_object.hh"
64
65class System;
66class IndexingPolicy;
67class ReplaceableEntry;
68
69/**
70 * A common base class of Cache tagstore objects.
71 */
72class BaseTags : public ClockedObject
73{
74 protected:
75 /** The block size of the cache. */
76 const unsigned blkSize;
77 /** Mask out all bits that aren't part of the block offset. */
78 const Addr blkMask;
79 /** The size of the cache. */
80 const unsigned size;
81 /** The tag lookup latency of the cache. */
82 const Cycles lookupLatency;
83
84 /** System we are currently operating in. */
85 System *system;
86
87 /** Indexing policy */
88 BaseIndexingPolicy *indexingPolicy;
89
90 /**
91 * The number of tags that need to be touched to meet the warmup
92 * percentage.
93 */
94 const unsigned warmupBound;
95 /** Marked true when the cache is warmed up. */
96 bool warmedUp;
97
98 /** the number of blocks in the cache */
99 const unsigned numBlocks;
100
101 /** The data blocks, 1 per cache block. */
102 std::unique_ptr<uint8_t[]> dataBlks;
103
104 // Statistics
105 /**
106 * TODO: It would be good if these stats were acquired after warmup.
107 * @addtogroup CacheStatistics
108 * @{
109 */
110
111 /** Per cycle average of the number of tags that hold valid data. */
112 Stats::Average tagsInUse;
113
114 /** The total number of references to a block before it is replaced. */
115 Stats::Scalar totalRefs;
116
117 /**
118 * The number of reference counts sampled. This is different from
119 * replacements because we sample all the valid blocks when the simulator
120 * exits.
121 */
122 Stats::Scalar sampledRefs;
123
124 /**
125 * Average number of references to a block before is was replaced.
126 * @todo This should change to an average stat once we have them.
127 */
128 Stats::Formula avgRefs;
129
130 /** The cycle that the warmup percentage was hit. 0 on failure. */
131 Stats::Scalar warmupCycle;
132
133 /** Average occupancy of each requestor using the cache */
134 Stats::AverageVector occupancies;
135
136 /** Average occ % of each requestor using the cache */
137 Stats::Formula avgOccs;
138
139 /** Occupancy of each context/cpu using the cache */
140 Stats::Vector occupanciesTaskId;
141
142 /** Occupancy of each context/cpu using the cache */
143 Stats::Vector2d ageTaskId;
144
145 /** Occ % of each context/cpu using the cache */
146 Stats::Formula percentOccsTaskId;
147
148 /** Number of tags consulted over all accesses. */
149 Stats::Scalar tagAccesses;
150 /** Number of data blocks consulted over all accesses. */
151 Stats::Scalar dataAccesses;
152
153 /**
154 * @}
155 */
156
157 public:
158 typedef BaseTagsParams Params;
159 BaseTags(const Params *p);
160
161 /**
162 * Destructor.
163 */
164 virtual ~BaseTags() {}
165
166 /**
167 * Initialize blocks. Must be overriden by every subclass that uses
168 * a block type different from its parent's, as the current Python
169 * code generation does not allow templates.
170 */
171 virtual void tagsInit() = 0;
172
173 /**
174 * Register local statistics.
175 */
176 void regStats();
177
178 /**
179 * Average in the reference count for valid blocks when the simulation
180 * exits.
181 */
182 void cleanupRefs();
183
184 /**
185 * Computes stats just prior to dump event
186 */
187 void computeStats();
188
189 /**
190 * Print all tags used
191 */
192 std::string print();
193
194 /**
195 * Finds the block in the cache without touching it.
196 *
197 * @param addr The address to look for.
198 * @param is_secure True if the target memory space is secure.
199 * @return Pointer to the cache block.
200 */
201 virtual CacheBlk *findBlock(Addr addr, bool is_secure) const;
202
203 /**
204 * Find a block given set and way.
205 *
206 * @param set The set of the block.
207 * @param way The way of the block.
208 * @return The block.
209 */
210 virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const;
211
212 /**
213 * Align an address to the block size.
214 * @param addr the address to align.
215 * @return The block address.
216 */
217 Addr blkAlign(Addr addr) const
218 {
219 return addr & ~blkMask;
220 }
221
222 /**
223 * Calculate the block offset of an address.
224 * @param addr the address to get the offset of.
225 * @return the block offset.
226 */
227 int extractBlkOffset(Addr addr) const
228 {
229 return (addr & blkMask);
230 }
231
232 /**
233 * Limit the allocation for the cache ways.
234 * @param ways The maximum number of ways available for replacement.
235 */
236 virtual void setWayAllocationMax(int ways)
237 {
238 panic("This tag class does not implement way allocation limit!\n");
239 }
240
241 /**
242 * Get the way allocation mask limit.
243 * @return The maximum number of ways available for replacement.
244 */
245 virtual int getWayAllocationMax() const
246 {
247 panic("This tag class does not implement way allocation limit!\n");
248 return -1;
249 }
250
251 /**
252 * This function updates the tags when a block is invalidated
253 *
254 * @param blk A valid block to invalidate.
255 */
256 virtual void invalidate(CacheBlk *blk)
257 {
258 assert(blk);
259 assert(blk->isValid());
260
261 occupancies[blk->srcMasterId]--;
262 totalRefs += blk->refCount;
263 sampledRefs++;
264
265 blk->invalidate();
266 }
267
268 /**
269 * Find replacement victim based on address. If the address requires
270 * blocks to be evicted, their locations are listed for eviction. If a
271 * conventional cache is being used, the list only contains the victim.
272 * However, if using sector or compressed caches, the victim is one of
273 * the blocks to be evicted, but its location is the only one that will
274 * be assigned to the newly allocated block associated to this address.
275 * @sa insertBlock
276 *
277 * @param addr Address to find a victim for.
278 * @param is_secure True if the target memory space is secure.
279 * @param evict_blks Cache blocks to be evicted.
280 * @return Cache block to be replaced.
281 */
282 virtual CacheBlk* findVictim(Addr addr, const bool is_secure,
283 std::vector<CacheBlk*>& evict_blks) const = 0;
284
285 /**
286 * Access block and update replacement data. May not succeed, in which case
287 * nullptr is returned. This has all the implications of a cache access and
288 * should only be used as such. Returns the tag lookup latency as a side
289 * effect.
290 *
291 * @param addr The address to find.
292 * @param is_secure True if the target memory space is secure.
293 * @param lat The latency of the tag lookup.
294 * @return Pointer to the cache block if found.
295 */
296 virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) = 0;
297
298 /**
299 * Generate the tag from the given address.
300 *
301 * @param addr The address to get the tag from.
302 * @return The tag of the address.
303 */
304 virtual Addr extractTag(const Addr addr) const;
305
306 /**
307 * Insert the new block into the cache and update stats.
308 *
308 * @param addr Address of the block.
309 * @param is_secure Whether the block is in secure space or not.
310 * @param src_master_ID The source requestor ID.
311 * @param task_ID The new task ID.
309 * @param pkt Packet holding the address to update
310 * @param blk The block to update.
311 */
314 virtual void insertBlock(const Addr addr, const bool is_secure,
315 const int src_master_ID, const uint32_t task_ID,
316 CacheBlk *blk);
312 virtual void insertBlock(const PacketPtr pkt, CacheBlk *blk);
313
314 /**
315 * Regenerate the block address.
316 *
317 * @param block The block.
318 * @return the block address.
319 */
320 virtual Addr regenerateBlkAddr(const CacheBlk* blk) const = 0;
321
322 /**
323 * Visit each block in the tags and apply a visitor
324 *
325 * The visitor should be a std::function that takes a cache block
326 * reference as its parameter.
327 *
328 * @param visitor Visitor to call on each block.
329 */
330 virtual void forEachBlk(std::function<void(CacheBlk &)> visitor) = 0;
331
332 /**
333 * Find if any of the blocks satisfies a condition
334 *
335 * The visitor should be a std::function that takes a cache block
336 * reference as its parameter. The visitor will terminate the
337 * traversal early if the condition is satisfied.
338 *
339 * @param visitor Visitor to call on each block.
340 */
341 virtual bool anyBlk(std::function<bool(CacheBlk &)> visitor) = 0;
342
343 private:
344 /**
345 * Update the reference stats using data from the input block
346 *
347 * @param blk The input block
348 */
349 void cleanupRefsVisitor(CacheBlk &blk);
350
351 /**
352 * Update the occupancy and age stats using data from the input block
353 *
354 * @param blk The input block
355 */
356 void computeStatsVisitor(CacheBlk &blk);
357};
358
359class BaseTagsCallback : public Callback
360{
361 BaseTags *tags;
362 public:
363 BaseTagsCallback(BaseTags *t) : tags(t) {}
364 virtual void process() { tags->cleanupRefs(); };
365};
366
367class BaseTagsDumpCallback : public Callback
368{
369 BaseTags *tags;
370 public:
371 BaseTagsDumpCallback(BaseTags *t) : tags(t) {}
372 virtual void process() { tags->computeStats(); };
373};
374
375#endif //__MEM_CACHE_TAGS_BASE_HH__