CacheMemory.cc (11793:ef606668d247) CacheMemory.cc (13449:2f7efa89c58b)
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/structures/CacheMemory.hh"
31
32#include "base/intmath.hh"
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/structures/CacheMemory.hh"
31
32#include "base/intmath.hh"
33#include "base/logging.hh"
33#include "debug/RubyCache.hh"
34#include "debug/RubyCacheTrace.hh"
35#include "debug/RubyResourceStalls.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/AccessPermission.hh"
38#include "mem/ruby/system/RubySystem.hh"
39#include "mem/ruby/system/WeightedLRUPolicy.hh"
40
41using namespace std;
42
43ostream&
44operator<<(ostream& out, const CacheMemory& obj)
45{
46 obj.print(out);
47 out << flush;
48 return out;
49}
50
51CacheMemory *
52RubyCacheParams::create()
53{
54 return new CacheMemory(this);
55}
56
57CacheMemory::CacheMemory(const Params *p)
58 : SimObject(p),
59 dataArray(p->dataArrayBanks, p->dataAccessLatency,
60 p->start_index_bit, p->ruby_system),
61 tagArray(p->tagArrayBanks, p->tagAccessLatency,
62 p->start_index_bit, p->ruby_system)
63{
64 m_cache_size = p->size;
65 m_cache_assoc = p->assoc;
66 m_replacementPolicy_ptr = p->replacement_policy;
67 m_replacementPolicy_ptr->setCache(this);
68 m_start_index_bit = p->start_index_bit;
69 m_is_instruction_only_cache = p->is_icache;
70 m_resource_stalls = p->resourceStalls;
71 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
72}
73
74void
75CacheMemory::init()
76{
77 if (m_block_size == 0) {
78 m_block_size = RubySystem::getBlockSizeBytes();
79 }
80 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
81 assert(m_cache_num_sets > 1);
82 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
83 assert(m_cache_num_set_bits > 0);
84
85 m_cache.resize(m_cache_num_sets,
86 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
87}
88
89CacheMemory::~CacheMemory()
90{
91 if (m_replacementPolicy_ptr)
92 delete m_replacementPolicy_ptr;
93 for (int i = 0; i < m_cache_num_sets; i++) {
94 for (int j = 0; j < m_cache_assoc; j++) {
95 delete m_cache[i][j];
96 }
97 }
98}
99
100// convert a Address to its location in the cache
101int64_t
102CacheMemory::addressToCacheSet(Addr address) const
103{
104 assert(address == makeLineAddress(address));
105 return bitSelect(address, m_start_index_bit,
106 m_start_index_bit + m_cache_num_set_bits - 1);
107}
108
109// Given a cache index: returns the index of the tag in a set.
110// returns -1 if the tag is not found.
111int
112CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
113{
114 assert(tag == makeLineAddress(tag));
115 // search the set for the tags
116 auto it = m_tag_index.find(tag);
117 if (it != m_tag_index.end())
118 if (m_cache[cacheSet][it->second]->m_Permission !=
119 AccessPermission_NotPresent)
120 return it->second;
121 return -1; // Not found
122}
123
124// Given a cache index: returns the index of the tag in a set.
125// returns -1 if the tag is not found.
126int
127CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
128 Addr tag) const
129{
130 assert(tag == makeLineAddress(tag));
131 // search the set for the tags
132 auto it = m_tag_index.find(tag);
133 if (it != m_tag_index.end())
134 return it->second;
135 return -1; // Not found
136}
137
138// Given an unique cache block identifier (idx): return the valid address
139// stored by the cache block. If the block is invalid/notpresent, the
140// function returns the 0 address
141Addr
142CacheMemory::getAddressAtIdx(int idx) const
143{
144 Addr tmp(0);
145
146 int set = idx / m_cache_assoc;
147 assert(set < m_cache_num_sets);
148
149 int way = idx - set * m_cache_assoc;
150 assert (way < m_cache_assoc);
151
152 AbstractCacheEntry* entry = m_cache[set][way];
153 if (entry == NULL ||
154 entry->m_Permission == AccessPermission_Invalid ||
155 entry->m_Permission == AccessPermission_NotPresent) {
156 return tmp;
157 }
158 return entry->m_Address;
159}
160
161bool
162CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
163 DataBlock*& data_ptr)
164{
165 assert(address == makeLineAddress(address));
166 DPRINTF(RubyCache, "address: %#x\n", address);
167 int64_t cacheSet = addressToCacheSet(address);
168 int loc = findTagInSet(cacheSet, address);
169 if (loc != -1) {
170 // Do we even have a tag match?
171 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
172 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
173 data_ptr = &(entry->getDataBlk());
174
175 if (entry->m_Permission == AccessPermission_Read_Write) {
176 return true;
177 }
178 if ((entry->m_Permission == AccessPermission_Read_Only) &&
179 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
180 return true;
181 }
182 // The line must not be accessible
183 }
184 data_ptr = NULL;
185 return false;
186}
187
188bool
189CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
190 DataBlock*& data_ptr)
191{
192 assert(address == makeLineAddress(address));
193 DPRINTF(RubyCache, "address: %#x\n", address);
194 int64_t cacheSet = addressToCacheSet(address);
195 int loc = findTagInSet(cacheSet, address);
196
197 if (loc != -1) {
198 // Do we even have a tag match?
199 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
200 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
201 data_ptr = &(entry->getDataBlk());
202
203 return m_cache[cacheSet][loc]->m_Permission !=
204 AccessPermission_NotPresent;
205 }
206
207 data_ptr = NULL;
208 return false;
209}
210
211// tests to see if an address is present in the cache
212bool
213CacheMemory::isTagPresent(Addr address) const
214{
215 assert(address == makeLineAddress(address));
216 int64_t cacheSet = addressToCacheSet(address);
217 int loc = findTagInSet(cacheSet, address);
218
219 if (loc == -1) {
220 // We didn't find the tag
221 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
222 return false;
223 }
224 DPRINTF(RubyCache, "address: %#x found\n", address);
225 return true;
226}
227
228// Returns true if there is:
229// a) a tag match on this address or there is
230// b) an unused line in the same cache "way"
231bool
232CacheMemory::cacheAvail(Addr address) const
233{
234 assert(address == makeLineAddress(address));
235
236 int64_t cacheSet = addressToCacheSet(address);
237
238 for (int i = 0; i < m_cache_assoc; i++) {
239 AbstractCacheEntry* entry = m_cache[cacheSet][i];
240 if (entry != NULL) {
241 if (entry->m_Address == address ||
242 entry->m_Permission == AccessPermission_NotPresent) {
243 // Already in the cache or we found an empty entry
244 return true;
245 }
246 } else {
247 return true;
248 }
249 }
250 return false;
251}
252
253AbstractCacheEntry*
254CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
255{
256 assert(address == makeLineAddress(address));
257 assert(!isTagPresent(address));
258 assert(cacheAvail(address));
259 DPRINTF(RubyCache, "address: %#x\n", address);
260
261 // Find the first open slot
262 int64_t cacheSet = addressToCacheSet(address);
263 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
264 for (int i = 0; i < m_cache_assoc; i++) {
265 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
266 if (set[i] && (set[i] != entry)) {
267 warn_once("This protocol contains a cache entry handling bug: "
268 "Entries in the cache should never be NotPresent! If\n"
269 "this entry (%#x) is not tracked elsewhere, it will memory "
270 "leak here. Fix your protocol to eliminate these!",
271 address);
272 }
273 set[i] = entry; // Init entry
274 set[i]->m_Address = address;
275 set[i]->m_Permission = AccessPermission_Invalid;
276 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
277 address);
278 set[i]->m_locked = -1;
279 m_tag_index[address] = i;
280 entry->setSetIndex(cacheSet);
281 entry->setWayIndex(i);
282
283 if (touch) {
284 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
285 }
286
287 return entry;
288 }
289 }
290 panic("Allocate didn't find an available entry");
291}
292
293void
294CacheMemory::deallocate(Addr address)
295{
296 assert(address == makeLineAddress(address));
297 assert(isTagPresent(address));
298 DPRINTF(RubyCache, "address: %#x\n", address);
299 int64_t cacheSet = addressToCacheSet(address);
300 int loc = findTagInSet(cacheSet, address);
301 if (loc != -1) {
302 delete m_cache[cacheSet][loc];
303 m_cache[cacheSet][loc] = NULL;
304 m_tag_index.erase(address);
305 }
306}
307
308// Returns with the physical address of the conflicting cache line
309Addr
310CacheMemory::cacheProbe(Addr address) const
311{
312 assert(address == makeLineAddress(address));
313 assert(!cacheAvail(address));
314
315 int64_t cacheSet = addressToCacheSet(address);
316 return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
317 m_Address;
318}
319
320// looks an address up in the cache
321AbstractCacheEntry*
322CacheMemory::lookup(Addr address)
323{
324 assert(address == makeLineAddress(address));
325 int64_t cacheSet = addressToCacheSet(address);
326 int loc = findTagInSet(cacheSet, address);
327 if (loc == -1) return NULL;
328 return m_cache[cacheSet][loc];
329}
330
331// looks an address up in the cache
332const AbstractCacheEntry*
333CacheMemory::lookup(Addr address) const
334{
335 assert(address == makeLineAddress(address));
336 int64_t cacheSet = addressToCacheSet(address);
337 int loc = findTagInSet(cacheSet, address);
338 if (loc == -1) return NULL;
339 return m_cache[cacheSet][loc];
340}
341
342// Sets the most recently used bit for a cache block
343void
344CacheMemory::setMRU(Addr address)
345{
346 int64_t cacheSet = addressToCacheSet(address);
347 int loc = findTagInSet(cacheSet, address);
348
349 if (loc != -1)
350 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
351}
352
353void
354CacheMemory::setMRU(const AbstractCacheEntry *e)
355{
356 uint32_t cacheSet = e->getSetIndex();
357 uint32_t loc = e->getWayIndex();
358 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
359}
360
361void
362CacheMemory::setMRU(Addr address, int occupancy)
363{
364 int64_t cacheSet = addressToCacheSet(address);
365 int loc = findTagInSet(cacheSet, address);
366
367 if (loc != -1) {
368 if (m_replacementPolicy_ptr->useOccupancy()) {
369 (static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
370 touch(cacheSet, loc, curTick(), occupancy);
371 } else {
372 m_replacementPolicy_ptr->
373 touch(cacheSet, loc, curTick());
374 }
375 }
376}
377
378int
379CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
380{
381 assert(set < m_cache_num_sets);
382 assert(loc < m_cache_assoc);
383 int ret = 0;
384 if (m_cache[set][loc] != NULL) {
385 ret = m_cache[set][loc]->getNumValidBlocks();
386 assert(ret >= 0);
387 }
388
389 return ret;
390}
391
392void
393CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
394{
395 uint64_t warmedUpBlocks = 0;
396 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
397 (uint64_t)m_cache_assoc;
398
399 for (int i = 0; i < m_cache_num_sets; i++) {
400 for (int j = 0; j < m_cache_assoc; j++) {
401 if (m_cache[i][j] != NULL) {
402 AccessPermission perm = m_cache[i][j]->m_Permission;
403 RubyRequestType request_type = RubyRequestType_NULL;
404 if (perm == AccessPermission_Read_Only) {
405 if (m_is_instruction_only_cache) {
406 request_type = RubyRequestType_IFETCH;
407 } else {
408 request_type = RubyRequestType_LD;
409 }
410 } else if (perm == AccessPermission_Read_Write) {
411 request_type = RubyRequestType_ST;
412 }
413
414 if (request_type != RubyRequestType_NULL) {
415 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
416 0, request_type,
417 m_replacementPolicy_ptr->getLastAccess(i, j),
418 m_cache[i][j]->getDataBlk());
419 warmedUpBlocks++;
420 }
421 }
422 }
423 }
424
425 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
426 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
427 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
428}
429
430void
431CacheMemory::print(ostream& out) const
432{
433 out << "Cache dump: " << name() << endl;
434 for (int i = 0; i < m_cache_num_sets; i++) {
435 for (int j = 0; j < m_cache_assoc; j++) {
436 if (m_cache[i][j] != NULL) {
437 out << " Index: " << i
438 << " way: " << j
439 << " entry: " << *m_cache[i][j] << endl;
440 } else {
441 out << " Index: " << i
442 << " way: " << j
443 << " entry: NULL" << endl;
444 }
445 }
446 }
447}
448
449void
450CacheMemory::printData(ostream& out) const
451{
452 out << "printData() not supported" << endl;
453}
454
455void
456CacheMemory::setLocked(Addr address, int context)
457{
458 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
459 assert(address == makeLineAddress(address));
460 int64_t cacheSet = addressToCacheSet(address);
461 int loc = findTagInSet(cacheSet, address);
462 assert(loc != -1);
463 m_cache[cacheSet][loc]->setLocked(context);
464}
465
466void
467CacheMemory::clearLocked(Addr address)
468{
469 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
470 assert(address == makeLineAddress(address));
471 int64_t cacheSet = addressToCacheSet(address);
472 int loc = findTagInSet(cacheSet, address);
473 assert(loc != -1);
474 m_cache[cacheSet][loc]->clearLocked();
475}
476
477bool
478CacheMemory::isLocked(Addr address, int context)
479{
480 assert(address == makeLineAddress(address));
481 int64_t cacheSet = addressToCacheSet(address);
482 int loc = findTagInSet(cacheSet, address);
483 assert(loc != -1);
484 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
485 address, m_cache[cacheSet][loc]->m_locked, context);
486 return m_cache[cacheSet][loc]->isLocked(context);
487}
488
489void
490CacheMemory::regStats()
491{
492 SimObject::regStats();
493
494 m_demand_hits
495 .name(name() + ".demand_hits")
496 .desc("Number of cache demand hits")
497 ;
498
499 m_demand_misses
500 .name(name() + ".demand_misses")
501 .desc("Number of cache demand misses")
502 ;
503
504 m_demand_accesses
505 .name(name() + ".demand_accesses")
506 .desc("Number of cache demand accesses")
507 ;
508
509 m_demand_accesses = m_demand_hits + m_demand_misses;
510
511 m_sw_prefetches
512 .name(name() + ".total_sw_prefetches")
513 .desc("Number of software prefetches")
514 .flags(Stats::nozero)
515 ;
516
517 m_hw_prefetches
518 .name(name() + ".total_hw_prefetches")
519 .desc("Number of hardware prefetches")
520 .flags(Stats::nozero)
521 ;
522
523 m_prefetches
524 .name(name() + ".total_prefetches")
525 .desc("Number of prefetches")
526 .flags(Stats::nozero)
527 ;
528
529 m_prefetches = m_sw_prefetches + m_hw_prefetches;
530
531 m_accessModeType
532 .init(RubyRequestType_NUM)
533 .name(name() + ".access_mode")
534 .flags(Stats::pdf | Stats::total)
535 ;
536 for (int i = 0; i < RubyAccessMode_NUM; i++) {
537 m_accessModeType
538 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
539 .flags(Stats::nozero)
540 ;
541 }
542
543 numDataArrayReads
544 .name(name() + ".num_data_array_reads")
545 .desc("number of data array reads")
546 .flags(Stats::nozero)
547 ;
548
549 numDataArrayWrites
550 .name(name() + ".num_data_array_writes")
551 .desc("number of data array writes")
552 .flags(Stats::nozero)
553 ;
554
555 numTagArrayReads
556 .name(name() + ".num_tag_array_reads")
557 .desc("number of tag array reads")
558 .flags(Stats::nozero)
559 ;
560
561 numTagArrayWrites
562 .name(name() + ".num_tag_array_writes")
563 .desc("number of tag array writes")
564 .flags(Stats::nozero)
565 ;
566
567 numTagArrayStalls
568 .name(name() + ".num_tag_array_stalls")
569 .desc("number of stalls caused by tag array")
570 .flags(Stats::nozero)
571 ;
572
573 numDataArrayStalls
574 .name(name() + ".num_data_array_stalls")
575 .desc("number of stalls caused by data array")
576 .flags(Stats::nozero)
577 ;
578}
579
580// assumption: SLICC generated files will only call this function
581// once **all** resources are granted
582void
583CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
584{
585 DPRINTF(RubyStats, "Recorded statistic: %s\n",
586 CacheRequestType_to_string(requestType));
587 switch(requestType) {
588 case CacheRequestType_DataArrayRead:
589 if (m_resource_stalls)
590 dataArray.reserve(addressToCacheSet(addr));
591 numDataArrayReads++;
592 return;
593 case CacheRequestType_DataArrayWrite:
594 if (m_resource_stalls)
595 dataArray.reserve(addressToCacheSet(addr));
596 numDataArrayWrites++;
597 return;
598 case CacheRequestType_TagArrayRead:
599 if (m_resource_stalls)
600 tagArray.reserve(addressToCacheSet(addr));
601 numTagArrayReads++;
602 return;
603 case CacheRequestType_TagArrayWrite:
604 if (m_resource_stalls)
605 tagArray.reserve(addressToCacheSet(addr));
606 numTagArrayWrites++;
607 return;
608 default:
609 warn("CacheMemory access_type not found: %s",
610 CacheRequestType_to_string(requestType));
611 }
612}
613
614bool
615CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
616{
617 if (!m_resource_stalls) {
618 return true;
619 }
620
621 if (res == CacheResourceType_TagArray) {
622 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
623 else {
624 DPRINTF(RubyResourceStalls,
625 "Tag array stall on addr %#x in set %d\n",
626 addr, addressToCacheSet(addr));
627 numTagArrayStalls++;
628 return false;
629 }
630 } else if (res == CacheResourceType_DataArray) {
631 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
632 else {
633 DPRINTF(RubyResourceStalls,
634 "Data array stall on addr %#x in set %d\n",
635 addr, addressToCacheSet(addr));
636 numDataArrayStalls++;
637 return false;
638 }
639 } else {
34#include "debug/RubyCache.hh"
35#include "debug/RubyCacheTrace.hh"
36#include "debug/RubyResourceStalls.hh"
37#include "debug/RubyStats.hh"
38#include "mem/protocol/AccessPermission.hh"
39#include "mem/ruby/system/RubySystem.hh"
40#include "mem/ruby/system/WeightedLRUPolicy.hh"
41
42using namespace std;
43
44ostream&
45operator<<(ostream& out, const CacheMemory& obj)
46{
47 obj.print(out);
48 out << flush;
49 return out;
50}
51
52CacheMemory *
53RubyCacheParams::create()
54{
55 return new CacheMemory(this);
56}
57
58CacheMemory::CacheMemory(const Params *p)
59 : SimObject(p),
60 dataArray(p->dataArrayBanks, p->dataAccessLatency,
61 p->start_index_bit, p->ruby_system),
62 tagArray(p->tagArrayBanks, p->tagAccessLatency,
63 p->start_index_bit, p->ruby_system)
64{
65 m_cache_size = p->size;
66 m_cache_assoc = p->assoc;
67 m_replacementPolicy_ptr = p->replacement_policy;
68 m_replacementPolicy_ptr->setCache(this);
69 m_start_index_bit = p->start_index_bit;
70 m_is_instruction_only_cache = p->is_icache;
71 m_resource_stalls = p->resourceStalls;
72 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
73}
74
75void
76CacheMemory::init()
77{
78 if (m_block_size == 0) {
79 m_block_size = RubySystem::getBlockSizeBytes();
80 }
81 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
82 assert(m_cache_num_sets > 1);
83 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
84 assert(m_cache_num_set_bits > 0);
85
86 m_cache.resize(m_cache_num_sets,
87 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
88}
89
90CacheMemory::~CacheMemory()
91{
92 if (m_replacementPolicy_ptr)
93 delete m_replacementPolicy_ptr;
94 for (int i = 0; i < m_cache_num_sets; i++) {
95 for (int j = 0; j < m_cache_assoc; j++) {
96 delete m_cache[i][j];
97 }
98 }
99}
100
101// convert a Address to its location in the cache
102int64_t
103CacheMemory::addressToCacheSet(Addr address) const
104{
105 assert(address == makeLineAddress(address));
106 return bitSelect(address, m_start_index_bit,
107 m_start_index_bit + m_cache_num_set_bits - 1);
108}
109
110// Given a cache index: returns the index of the tag in a set.
111// returns -1 if the tag is not found.
112int
113CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
114{
115 assert(tag == makeLineAddress(tag));
116 // search the set for the tags
117 auto it = m_tag_index.find(tag);
118 if (it != m_tag_index.end())
119 if (m_cache[cacheSet][it->second]->m_Permission !=
120 AccessPermission_NotPresent)
121 return it->second;
122 return -1; // Not found
123}
124
125// Given a cache index: returns the index of the tag in a set.
126// returns -1 if the tag is not found.
127int
128CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
129 Addr tag) const
130{
131 assert(tag == makeLineAddress(tag));
132 // search the set for the tags
133 auto it = m_tag_index.find(tag);
134 if (it != m_tag_index.end())
135 return it->second;
136 return -1; // Not found
137}
138
139// Given an unique cache block identifier (idx): return the valid address
140// stored by the cache block. If the block is invalid/notpresent, the
141// function returns the 0 address
142Addr
143CacheMemory::getAddressAtIdx(int idx) const
144{
145 Addr tmp(0);
146
147 int set = idx / m_cache_assoc;
148 assert(set < m_cache_num_sets);
149
150 int way = idx - set * m_cache_assoc;
151 assert (way < m_cache_assoc);
152
153 AbstractCacheEntry* entry = m_cache[set][way];
154 if (entry == NULL ||
155 entry->m_Permission == AccessPermission_Invalid ||
156 entry->m_Permission == AccessPermission_NotPresent) {
157 return tmp;
158 }
159 return entry->m_Address;
160}
161
162bool
163CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
164 DataBlock*& data_ptr)
165{
166 assert(address == makeLineAddress(address));
167 DPRINTF(RubyCache, "address: %#x\n", address);
168 int64_t cacheSet = addressToCacheSet(address);
169 int loc = findTagInSet(cacheSet, address);
170 if (loc != -1) {
171 // Do we even have a tag match?
172 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
173 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
174 data_ptr = &(entry->getDataBlk());
175
176 if (entry->m_Permission == AccessPermission_Read_Write) {
177 return true;
178 }
179 if ((entry->m_Permission == AccessPermission_Read_Only) &&
180 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
181 return true;
182 }
183 // The line must not be accessible
184 }
185 data_ptr = NULL;
186 return false;
187}
188
189bool
190CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
191 DataBlock*& data_ptr)
192{
193 assert(address == makeLineAddress(address));
194 DPRINTF(RubyCache, "address: %#x\n", address);
195 int64_t cacheSet = addressToCacheSet(address);
196 int loc = findTagInSet(cacheSet, address);
197
198 if (loc != -1) {
199 // Do we even have a tag match?
200 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
201 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
202 data_ptr = &(entry->getDataBlk());
203
204 return m_cache[cacheSet][loc]->m_Permission !=
205 AccessPermission_NotPresent;
206 }
207
208 data_ptr = NULL;
209 return false;
210}
211
212// tests to see if an address is present in the cache
213bool
214CacheMemory::isTagPresent(Addr address) const
215{
216 assert(address == makeLineAddress(address));
217 int64_t cacheSet = addressToCacheSet(address);
218 int loc = findTagInSet(cacheSet, address);
219
220 if (loc == -1) {
221 // We didn't find the tag
222 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
223 return false;
224 }
225 DPRINTF(RubyCache, "address: %#x found\n", address);
226 return true;
227}
228
229// Returns true if there is:
230// a) a tag match on this address or there is
231// b) an unused line in the same cache "way"
232bool
233CacheMemory::cacheAvail(Addr address) const
234{
235 assert(address == makeLineAddress(address));
236
237 int64_t cacheSet = addressToCacheSet(address);
238
239 for (int i = 0; i < m_cache_assoc; i++) {
240 AbstractCacheEntry* entry = m_cache[cacheSet][i];
241 if (entry != NULL) {
242 if (entry->m_Address == address ||
243 entry->m_Permission == AccessPermission_NotPresent) {
244 // Already in the cache or we found an empty entry
245 return true;
246 }
247 } else {
248 return true;
249 }
250 }
251 return false;
252}
253
254AbstractCacheEntry*
255CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
256{
257 assert(address == makeLineAddress(address));
258 assert(!isTagPresent(address));
259 assert(cacheAvail(address));
260 DPRINTF(RubyCache, "address: %#x\n", address);
261
262 // Find the first open slot
263 int64_t cacheSet = addressToCacheSet(address);
264 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
265 for (int i = 0; i < m_cache_assoc; i++) {
266 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
267 if (set[i] && (set[i] != entry)) {
268 warn_once("This protocol contains a cache entry handling bug: "
269 "Entries in the cache should never be NotPresent! If\n"
270 "this entry (%#x) is not tracked elsewhere, it will memory "
271 "leak here. Fix your protocol to eliminate these!",
272 address);
273 }
274 set[i] = entry; // Init entry
275 set[i]->m_Address = address;
276 set[i]->m_Permission = AccessPermission_Invalid;
277 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
278 address);
279 set[i]->m_locked = -1;
280 m_tag_index[address] = i;
281 entry->setSetIndex(cacheSet);
282 entry->setWayIndex(i);
283
284 if (touch) {
285 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
286 }
287
288 return entry;
289 }
290 }
291 panic("Allocate didn't find an available entry");
292}
293
294void
295CacheMemory::deallocate(Addr address)
296{
297 assert(address == makeLineAddress(address));
298 assert(isTagPresent(address));
299 DPRINTF(RubyCache, "address: %#x\n", address);
300 int64_t cacheSet = addressToCacheSet(address);
301 int loc = findTagInSet(cacheSet, address);
302 if (loc != -1) {
303 delete m_cache[cacheSet][loc];
304 m_cache[cacheSet][loc] = NULL;
305 m_tag_index.erase(address);
306 }
307}
308
309// Returns with the physical address of the conflicting cache line
310Addr
311CacheMemory::cacheProbe(Addr address) const
312{
313 assert(address == makeLineAddress(address));
314 assert(!cacheAvail(address));
315
316 int64_t cacheSet = addressToCacheSet(address);
317 return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
318 m_Address;
319}
320
321// looks an address up in the cache
322AbstractCacheEntry*
323CacheMemory::lookup(Addr address)
324{
325 assert(address == makeLineAddress(address));
326 int64_t cacheSet = addressToCacheSet(address);
327 int loc = findTagInSet(cacheSet, address);
328 if (loc == -1) return NULL;
329 return m_cache[cacheSet][loc];
330}
331
332// looks an address up in the cache
333const AbstractCacheEntry*
334CacheMemory::lookup(Addr address) const
335{
336 assert(address == makeLineAddress(address));
337 int64_t cacheSet = addressToCacheSet(address);
338 int loc = findTagInSet(cacheSet, address);
339 if (loc == -1) return NULL;
340 return m_cache[cacheSet][loc];
341}
342
343// Sets the most recently used bit for a cache block
344void
345CacheMemory::setMRU(Addr address)
346{
347 int64_t cacheSet = addressToCacheSet(address);
348 int loc = findTagInSet(cacheSet, address);
349
350 if (loc != -1)
351 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
352}
353
354void
355CacheMemory::setMRU(const AbstractCacheEntry *e)
356{
357 uint32_t cacheSet = e->getSetIndex();
358 uint32_t loc = e->getWayIndex();
359 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
360}
361
362void
363CacheMemory::setMRU(Addr address, int occupancy)
364{
365 int64_t cacheSet = addressToCacheSet(address);
366 int loc = findTagInSet(cacheSet, address);
367
368 if (loc != -1) {
369 if (m_replacementPolicy_ptr->useOccupancy()) {
370 (static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
371 touch(cacheSet, loc, curTick(), occupancy);
372 } else {
373 m_replacementPolicy_ptr->
374 touch(cacheSet, loc, curTick());
375 }
376 }
377}
378
379int
380CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
381{
382 assert(set < m_cache_num_sets);
383 assert(loc < m_cache_assoc);
384 int ret = 0;
385 if (m_cache[set][loc] != NULL) {
386 ret = m_cache[set][loc]->getNumValidBlocks();
387 assert(ret >= 0);
388 }
389
390 return ret;
391}
392
393void
394CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
395{
396 uint64_t warmedUpBlocks = 0;
397 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
398 (uint64_t)m_cache_assoc;
399
400 for (int i = 0; i < m_cache_num_sets; i++) {
401 for (int j = 0; j < m_cache_assoc; j++) {
402 if (m_cache[i][j] != NULL) {
403 AccessPermission perm = m_cache[i][j]->m_Permission;
404 RubyRequestType request_type = RubyRequestType_NULL;
405 if (perm == AccessPermission_Read_Only) {
406 if (m_is_instruction_only_cache) {
407 request_type = RubyRequestType_IFETCH;
408 } else {
409 request_type = RubyRequestType_LD;
410 }
411 } else if (perm == AccessPermission_Read_Write) {
412 request_type = RubyRequestType_ST;
413 }
414
415 if (request_type != RubyRequestType_NULL) {
416 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
417 0, request_type,
418 m_replacementPolicy_ptr->getLastAccess(i, j),
419 m_cache[i][j]->getDataBlk());
420 warmedUpBlocks++;
421 }
422 }
423 }
424 }
425
426 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
427 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
428 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
429}
430
431void
432CacheMemory::print(ostream& out) const
433{
434 out << "Cache dump: " << name() << endl;
435 for (int i = 0; i < m_cache_num_sets; i++) {
436 for (int j = 0; j < m_cache_assoc; j++) {
437 if (m_cache[i][j] != NULL) {
438 out << " Index: " << i
439 << " way: " << j
440 << " entry: " << *m_cache[i][j] << endl;
441 } else {
442 out << " Index: " << i
443 << " way: " << j
444 << " entry: NULL" << endl;
445 }
446 }
447 }
448}
449
450void
451CacheMemory::printData(ostream& out) const
452{
453 out << "printData() not supported" << endl;
454}
455
456void
457CacheMemory::setLocked(Addr address, int context)
458{
459 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
460 assert(address == makeLineAddress(address));
461 int64_t cacheSet = addressToCacheSet(address);
462 int loc = findTagInSet(cacheSet, address);
463 assert(loc != -1);
464 m_cache[cacheSet][loc]->setLocked(context);
465}
466
467void
468CacheMemory::clearLocked(Addr address)
469{
470 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
471 assert(address == makeLineAddress(address));
472 int64_t cacheSet = addressToCacheSet(address);
473 int loc = findTagInSet(cacheSet, address);
474 assert(loc != -1);
475 m_cache[cacheSet][loc]->clearLocked();
476}
477
478bool
479CacheMemory::isLocked(Addr address, int context)
480{
481 assert(address == makeLineAddress(address));
482 int64_t cacheSet = addressToCacheSet(address);
483 int loc = findTagInSet(cacheSet, address);
484 assert(loc != -1);
485 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
486 address, m_cache[cacheSet][loc]->m_locked, context);
487 return m_cache[cacheSet][loc]->isLocked(context);
488}
489
490void
491CacheMemory::regStats()
492{
493 SimObject::regStats();
494
495 m_demand_hits
496 .name(name() + ".demand_hits")
497 .desc("Number of cache demand hits")
498 ;
499
500 m_demand_misses
501 .name(name() + ".demand_misses")
502 .desc("Number of cache demand misses")
503 ;
504
505 m_demand_accesses
506 .name(name() + ".demand_accesses")
507 .desc("Number of cache demand accesses")
508 ;
509
510 m_demand_accesses = m_demand_hits + m_demand_misses;
511
512 m_sw_prefetches
513 .name(name() + ".total_sw_prefetches")
514 .desc("Number of software prefetches")
515 .flags(Stats::nozero)
516 ;
517
518 m_hw_prefetches
519 .name(name() + ".total_hw_prefetches")
520 .desc("Number of hardware prefetches")
521 .flags(Stats::nozero)
522 ;
523
524 m_prefetches
525 .name(name() + ".total_prefetches")
526 .desc("Number of prefetches")
527 .flags(Stats::nozero)
528 ;
529
530 m_prefetches = m_sw_prefetches + m_hw_prefetches;
531
532 m_accessModeType
533 .init(RubyRequestType_NUM)
534 .name(name() + ".access_mode")
535 .flags(Stats::pdf | Stats::total)
536 ;
537 for (int i = 0; i < RubyAccessMode_NUM; i++) {
538 m_accessModeType
539 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
540 .flags(Stats::nozero)
541 ;
542 }
543
544 numDataArrayReads
545 .name(name() + ".num_data_array_reads")
546 .desc("number of data array reads")
547 .flags(Stats::nozero)
548 ;
549
550 numDataArrayWrites
551 .name(name() + ".num_data_array_writes")
552 .desc("number of data array writes")
553 .flags(Stats::nozero)
554 ;
555
556 numTagArrayReads
557 .name(name() + ".num_tag_array_reads")
558 .desc("number of tag array reads")
559 .flags(Stats::nozero)
560 ;
561
562 numTagArrayWrites
563 .name(name() + ".num_tag_array_writes")
564 .desc("number of tag array writes")
565 .flags(Stats::nozero)
566 ;
567
568 numTagArrayStalls
569 .name(name() + ".num_tag_array_stalls")
570 .desc("number of stalls caused by tag array")
571 .flags(Stats::nozero)
572 ;
573
574 numDataArrayStalls
575 .name(name() + ".num_data_array_stalls")
576 .desc("number of stalls caused by data array")
577 .flags(Stats::nozero)
578 ;
579}
580
581// assumption: SLICC generated files will only call this function
582// once **all** resources are granted
583void
584CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
585{
586 DPRINTF(RubyStats, "Recorded statistic: %s\n",
587 CacheRequestType_to_string(requestType));
588 switch(requestType) {
589 case CacheRequestType_DataArrayRead:
590 if (m_resource_stalls)
591 dataArray.reserve(addressToCacheSet(addr));
592 numDataArrayReads++;
593 return;
594 case CacheRequestType_DataArrayWrite:
595 if (m_resource_stalls)
596 dataArray.reserve(addressToCacheSet(addr));
597 numDataArrayWrites++;
598 return;
599 case CacheRequestType_TagArrayRead:
600 if (m_resource_stalls)
601 tagArray.reserve(addressToCacheSet(addr));
602 numTagArrayReads++;
603 return;
604 case CacheRequestType_TagArrayWrite:
605 if (m_resource_stalls)
606 tagArray.reserve(addressToCacheSet(addr));
607 numTagArrayWrites++;
608 return;
609 default:
610 warn("CacheMemory access_type not found: %s",
611 CacheRequestType_to_string(requestType));
612 }
613}
614
615bool
616CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
617{
618 if (!m_resource_stalls) {
619 return true;
620 }
621
622 if (res == CacheResourceType_TagArray) {
623 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
624 else {
625 DPRINTF(RubyResourceStalls,
626 "Tag array stall on addr %#x in set %d\n",
627 addr, addressToCacheSet(addr));
628 numTagArrayStalls++;
629 return false;
630 }
631 } else if (res == CacheResourceType_DataArray) {
632 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
633 else {
634 DPRINTF(RubyResourceStalls,
635 "Data array stall on addr %#x in set %d\n",
636 addr, addressToCacheSet(addr));
637 numDataArrayStalls++;
638 return false;
639 }
640 } else {
640 assert(false);
641 return true;
641 panic("Unrecognized cache resource type.");
642 }
643}
644
645bool
646CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
647{
648 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
649}
650
651bool
652CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
653{
654 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
655}
642 }
643}
644
645bool
646CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
647{
648 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
649}
650
651bool
652CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
653{
654 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
655}