1/*
2 * Copyright (c) 2012-2014 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 */
42
43/**
44 * @file
45 * Declaration of a base set associative tag store.
46 */
47
48#ifndef __MEM_CACHE_TAGS_BASESETASSOC_HH__
49#define __MEM_CACHE_TAGS_BASESETASSOC_HH__
50
51#include <cassert>
52#include <cstring>
53#include <list>
54
55#include "mem/cache/base.hh"
56#include "mem/cache/blk.hh"
57#include "mem/cache/tags/base.hh"
58#include "mem/cache/tags/cacheset.hh"
59#include "mem/packet.hh"
60#include "params/BaseSetAssoc.hh"
61
62/**
63 * A BaseSetAssoc cache tag store.
64 * @sa \ref gem5MemorySystem "gem5 Memory System"
65 *
66 * The BaseSetAssoc tags provide a base, as well as the functionality
67 * common to any set associative tags. Any derived class must implement
68 * the methods related to the specifics of the actual replacment policy.
69 * These are:
70 *
71 * BlkType* accessBlock();
72 * BlkType* findVictim();
73 * void insertBlock();
74 * void invalidate();
75 */
76class BaseSetAssoc : public BaseTags
77{
78 public:
79 /** Typedef the block type used in this tag store. */
80 typedef CacheBlk BlkType;
81 /** Typedef the set type used in this tag store. */
82 typedef CacheSet<CacheBlk> SetType;
83
84
85 protected:
86 /** The associativity of the cache. */
87 const unsigned assoc;
88 /** The allocatable associativity of the cache (alloc mask). */
89 unsigned allocAssoc;
90 /** The number of sets in the cache. */
91 const unsigned numSets;
92 /** Whether tags and data are accessed sequentially. */
93 const bool sequentialAccess;
94
95 /** The cache sets. */
96 SetType *sets;
97
98 /** The cache blocks. */
99 BlkType *blks;
100 /** The data blocks, 1 per cache block. */
101 uint8_t *dataBlks;
102
103 /** The amount to shift the address to get the set. */
104 int setShift;
105 /** The amount to shift the address to get the tag. */
106 int tagShift;
107 /** Mask out all bits that aren't part of the set index. */
108 unsigned setMask;
109 /** Mask out all bits that aren't part of the block offset. */
110 unsigned blkMask;
109
110public:
111
112 /** Convenience typedef. */
113 typedef BaseSetAssocParams Params;
114
115 /**
116 * Construct and initialize this tag store.
117 */
118 BaseSetAssoc(const Params *p);
119
120 /**
121 * Destructor
122 */
123 virtual ~BaseSetAssoc();
124
125 /**
126 * Find the cache block given set and way
127 * @param set The set of the block.
128 * @param way The way of the block.
129 * @return The cache block.
130 */
131 CacheBlk *findBlockBySetAndWay(int set, int way) const override;
132
133 /**
134 * Invalidate the given block.
135 * @param blk The block to invalidate.
136 */
137 void invalidate(CacheBlk *blk) override
138 {
139 assert(blk);
140 assert(blk->isValid());
141 tagsInUse--;
142 assert(blk->srcMasterId < cache->system->maxMasters());
143 occupancies[blk->srcMasterId]--;
144 blk->srcMasterId = Request::invldMasterId;
145 blk->task_id = ContextSwitchTaskId::Unknown;
146 blk->tickInserted = curTick();
147 }
148
149 /**
150 * Access block and update replacement data. May not succeed, in which case
151 * nullptr is returned. This has all the implications of a cache
152 * access and should only be used as such. Returns the access latency as a
153 * side effect.
154 * @param addr The address to find.
155 * @param is_secure True if the target memory space is secure.
156 * @param lat The access latency.
157 * @return Pointer to the cache block if found.
158 */
159 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override
160 {
161 Addr tag = extractTag(addr);
162 int set = extractSet(addr);
163 BlkType *blk = sets[set].findBlk(tag, is_secure);
164
165 // Access all tags in parallel, hence one in each way. The data side
166 // either accesses all blocks in parallel, or one block sequentially on
167 // a hit. Sequential access with a miss doesn't access data.
168 tagAccesses += allocAssoc;
169 if (sequentialAccess) {
170 if (blk != nullptr) {
171 dataAccesses += 1;
172 }
173 } else {
174 dataAccesses += allocAssoc;
175 }
176
177 if (blk != nullptr) {
178 // If a cache hit
179 lat = accessLatency;
180 // Check if the block to be accessed is available. If not,
181 // apply the accessLatency on top of block->whenReady.
182 if (blk->whenReady > curTick() &&
183 cache->ticksToCycles(blk->whenReady - curTick()) >
184 accessLatency) {
185 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
186 accessLatency;
187 }
188 blk->refCount += 1;
189 } else {
190 // If a cache miss
191 lat = lookupLatency;
192 }
193
194 return blk;
195 }
196
197 /**
198 * Finds the given address in the cache, do not update replacement data.
199 * i.e. This is a no-side-effect find of a block.
200 * @param addr The address to find.
201 * @param is_secure True if the target memory space is secure.
202 * @param asid The address space ID.
203 * @return Pointer to the cache block if found.
204 */
205 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
206
207 /**
208 * Find an invalid block to evict for the address provided.
209 * If there are no invalid blocks, this will return the block
210 * in the least-recently-used position.
211 * @param addr The addr to a find a replacement candidate for.
212 * @return The candidate block.
213 */
214 CacheBlk* findVictim(Addr addr) override
215 {
216 BlkType *blk = nullptr;
217 int set = extractSet(addr);
218
219 // prefer to evict an invalid block
220 for (int i = 0; i < allocAssoc; ++i) {
221 blk = sets[set].blks[i];
222 if (!blk->isValid())
223 break;
224 }
225
226 return blk;
227 }
228
229 /**
230 * Insert the new block into the cache.
231 * @param pkt Packet holding the address to update
232 * @param blk The block to update.
233 */
234 void insertBlock(PacketPtr pkt, CacheBlk *blk) override
235 {
236 Addr addr = pkt->getAddr();
237 MasterID master_id = pkt->req->masterId();
238 uint32_t task_id = pkt->req->taskId();
239
240 if (!blk->isTouched) {
241 tagsInUse++;
242 blk->isTouched = true;
243 if (!warmedUp && tagsInUse.value() >= warmupBound) {
244 warmedUp = true;
245 warmupCycle = curTick();
246 }
247 }
248
249 // If we're replacing a block that was previously valid update
250 // stats for it. This can't be done in findBlock() because a
251 // found block might not actually be replaced there if the
252 // coherence protocol says it can't be.
253 if (blk->isValid()) {
254 replacements[0]++;
255 totalRefs += blk->refCount;
256 ++sampledRefs;
257 blk->refCount = 0;
258
259 // deal with evicted block
260 assert(blk->srcMasterId < cache->system->maxMasters());
261 occupancies[blk->srcMasterId]--;
262
263 blk->invalidate();
264 }
265
266 blk->isTouched = true;
267
268 // Set tag for new block. Caller is responsible for setting status.
269 blk->tag = extractTag(addr);
270
271 // deal with what we are bringing in
272 assert(master_id < cache->system->maxMasters());
273 occupancies[master_id]++;
274 blk->srcMasterId = master_id;
275 blk->task_id = task_id;
276 blk->tickInserted = curTick();
277
278 // We only need to write into one tag and one data block.
279 tagAccesses += 1;
280 dataAccesses += 1;
281 }
282
283 /**
284 * Limit the allocation for the cache ways.
285 * @param ways The maximum number of ways available for replacement.
286 */
287 virtual void setWayAllocationMax(int ways) override
288 {
289 fatal_if(ways < 1, "Allocation limit must be greater than zero");
290 allocAssoc = ways;
291 }
292
293 /**
294 * Get the way allocation mask limit.
295 * @return The maximum number of ways available for replacement.
296 */
297 virtual int getWayAllocationMax() const override
298 {
299 return allocAssoc;
300 }
301
302 /**
303 * Generate the tag from the given address.
304 * @param addr The address to get the tag from.
305 * @return The tag of the address.
306 */
307 Addr extractTag(Addr addr) const override
308 {
309 return (addr >> tagShift);
310 }
311
312 /**
313 * Calculate the set index from the address.
314 * @param addr The address to get the set from.
315 * @return The set index of the address.
316 */
317 int extractSet(Addr addr) const override
318 {
319 return ((addr >> setShift) & setMask);
320 }
321
322 /**
325 * Align an address to the block size.
326 * @param addr the address to align.
327 * @return The block address.
328 */
329 Addr blkAlign(Addr addr) const
330 {
331 return (addr & ~(Addr)blkMask);
332 }
333
334 /**
323 * Regenerate the block address from the tag.
324 * @param tag The tag of the block.
325 * @param set The set of the block.
326 * @return The block address.
327 */
328 Addr regenerateBlkAddr(Addr tag, unsigned set) const override
329 {
330 return ((tag << tagShift) | ((Addr)set << setShift));
331 }
332
333 /**
334 * Called at end of simulation to complete average block reference stats.
335 */
336 void cleanupRefs() override;
337
338 /**
339 * Print all tags used
340 */
341 std::string print() const override;
342
343 /**
344 * Called prior to dumping stats to compute task occupancy
345 */
346 void computeStats() override;
347
348 /**
349 * Visit each block in the tag store and apply a visitor to the
350 * block.
351 *
352 * The visitor should be a function (or object that behaves like a
353 * function) that takes a cache block reference as its parameter
354 * and returns a bool. A visitor can request the traversal to be
355 * stopped by returning false, returning true causes it to be
356 * called for the next block in the tag store.
357 *
358 * \param visitor Visitor to call on each block.
359 */
360 void forEachBlk(CacheBlkVisitor &visitor) override {
361 for (unsigned i = 0; i < numSets * assoc; ++i) {
362 if (!visitor(blks[i]))
363 return;
364 }
365 }
366};
367
368#endif // __MEM_CACHE_TAGS_BASESETASSOC_HH__