CacheMemory.cc (11523:81332eb10367) CacheMemory.cc (11793:ef606668d247)
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/structures/CacheMemory.hh"
31
30#include "base/intmath.hh"
31#include "debug/RubyCache.hh"
32#include "debug/RubyCacheTrace.hh"
33#include "debug/RubyResourceStalls.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/AccessPermission.hh"
32#include "base/intmath.hh"
33#include "debug/RubyCache.hh"
34#include "debug/RubyCacheTrace.hh"
35#include "debug/RubyResourceStalls.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/AccessPermission.hh"
36#include "mem/ruby/structures/CacheMemory.hh"
37#include "mem/ruby/system/RubySystem.hh"
38#include "mem/ruby/system/WeightedLRUPolicy.hh"
39
40using namespace std;
41
42ostream&
43operator<<(ostream& out, const CacheMemory& obj)
44{
45 obj.print(out);
46 out << flush;
47 return out;
48}
49
50CacheMemory *
51RubyCacheParams::create()
52{
53 return new CacheMemory(this);
54}
55
56CacheMemory::CacheMemory(const Params *p)
57 : SimObject(p),
58 dataArray(p->dataArrayBanks, p->dataAccessLatency,
59 p->start_index_bit, p->ruby_system),
60 tagArray(p->tagArrayBanks, p->tagAccessLatency,
61 p->start_index_bit, p->ruby_system)
62{
63 m_cache_size = p->size;
64 m_cache_assoc = p->assoc;
65 m_replacementPolicy_ptr = p->replacement_policy;
66 m_replacementPolicy_ptr->setCache(this);
67 m_start_index_bit = p->start_index_bit;
68 m_is_instruction_only_cache = p->is_icache;
69 m_resource_stalls = p->resourceStalls;
70 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
71}
72
73void
74CacheMemory::init()
75{
76 if (m_block_size == 0) {
77 m_block_size = RubySystem::getBlockSizeBytes();
78 }
79 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
80 assert(m_cache_num_sets > 1);
81 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
82 assert(m_cache_num_set_bits > 0);
83
84 m_cache.resize(m_cache_num_sets,
85 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
86}
87
88CacheMemory::~CacheMemory()
89{
90 if (m_replacementPolicy_ptr)
91 delete m_replacementPolicy_ptr;
92 for (int i = 0; i < m_cache_num_sets; i++) {
93 for (int j = 0; j < m_cache_assoc; j++) {
94 delete m_cache[i][j];
95 }
96 }
97}
98
99// convert a Address to its location in the cache
100int64_t
101CacheMemory::addressToCacheSet(Addr address) const
102{
103 assert(address == makeLineAddress(address));
104 return bitSelect(address, m_start_index_bit,
105 m_start_index_bit + m_cache_num_set_bits - 1);
106}
107
108// Given a cache index: returns the index of the tag in a set.
109// returns -1 if the tag is not found.
110int
111CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
112{
113 assert(tag == makeLineAddress(tag));
114 // search the set for the tags
115 auto it = m_tag_index.find(tag);
116 if (it != m_tag_index.end())
117 if (m_cache[cacheSet][it->second]->m_Permission !=
118 AccessPermission_NotPresent)
119 return it->second;
120 return -1; // Not found
121}
122
123// Given a cache index: returns the index of the tag in a set.
124// returns -1 if the tag is not found.
125int
126CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
127 Addr tag) const
128{
129 assert(tag == makeLineAddress(tag));
130 // search the set for the tags
131 auto it = m_tag_index.find(tag);
132 if (it != m_tag_index.end())
133 return it->second;
134 return -1; // Not found
135}
136
137// Given an unique cache block identifier (idx): return the valid address
138// stored by the cache block. If the block is invalid/notpresent, the
139// function returns the 0 address
140Addr
141CacheMemory::getAddressAtIdx(int idx) const
142{
143 Addr tmp(0);
144
145 int set = idx / m_cache_assoc;
146 assert(set < m_cache_num_sets);
147
148 int way = idx - set * m_cache_assoc;
149 assert (way < m_cache_assoc);
150
151 AbstractCacheEntry* entry = m_cache[set][way];
152 if (entry == NULL ||
153 entry->m_Permission == AccessPermission_Invalid ||
154 entry->m_Permission == AccessPermission_NotPresent) {
155 return tmp;
156 }
157 return entry->m_Address;
158}
159
160bool
161CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
162 DataBlock*& data_ptr)
163{
164 assert(address == makeLineAddress(address));
165 DPRINTF(RubyCache, "address: %#x\n", address);
166 int64_t cacheSet = addressToCacheSet(address);
167 int loc = findTagInSet(cacheSet, address);
168 if (loc != -1) {
169 // Do we even have a tag match?
170 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
171 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
172 data_ptr = &(entry->getDataBlk());
173
174 if (entry->m_Permission == AccessPermission_Read_Write) {
175 return true;
176 }
177 if ((entry->m_Permission == AccessPermission_Read_Only) &&
178 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
179 return true;
180 }
181 // The line must not be accessible
182 }
183 data_ptr = NULL;
184 return false;
185}
186
187bool
188CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
189 DataBlock*& data_ptr)
190{
191 assert(address == makeLineAddress(address));
192 DPRINTF(RubyCache, "address: %#x\n", address);
193 int64_t cacheSet = addressToCacheSet(address);
194 int loc = findTagInSet(cacheSet, address);
195
196 if (loc != -1) {
197 // Do we even have a tag match?
198 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
199 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
200 data_ptr = &(entry->getDataBlk());
201
202 return m_cache[cacheSet][loc]->m_Permission !=
203 AccessPermission_NotPresent;
204 }
205
206 data_ptr = NULL;
207 return false;
208}
209
210// tests to see if an address is present in the cache
211bool
212CacheMemory::isTagPresent(Addr address) const
213{
214 assert(address == makeLineAddress(address));
215 int64_t cacheSet = addressToCacheSet(address);
216 int loc = findTagInSet(cacheSet, address);
217
218 if (loc == -1) {
219 // We didn't find the tag
220 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
221 return false;
222 }
223 DPRINTF(RubyCache, "address: %#x found\n", address);
224 return true;
225}
226
227// Returns true if there is:
228// a) a tag match on this address or there is
229// b) an unused line in the same cache "way"
230bool
231CacheMemory::cacheAvail(Addr address) const
232{
233 assert(address == makeLineAddress(address));
234
235 int64_t cacheSet = addressToCacheSet(address);
236
237 for (int i = 0; i < m_cache_assoc; i++) {
238 AbstractCacheEntry* entry = m_cache[cacheSet][i];
239 if (entry != NULL) {
240 if (entry->m_Address == address ||
241 entry->m_Permission == AccessPermission_NotPresent) {
242 // Already in the cache or we found an empty entry
243 return true;
244 }
245 } else {
246 return true;
247 }
248 }
249 return false;
250}
251
252AbstractCacheEntry*
253CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
254{
255 assert(address == makeLineAddress(address));
256 assert(!isTagPresent(address));
257 assert(cacheAvail(address));
258 DPRINTF(RubyCache, "address: %#x\n", address);
259
260 // Find the first open slot
261 int64_t cacheSet = addressToCacheSet(address);
262 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
263 for (int i = 0; i < m_cache_assoc; i++) {
264 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
265 if (set[i] && (set[i] != entry)) {
266 warn_once("This protocol contains a cache entry handling bug: "
267 "Entries in the cache should never be NotPresent! If\n"
268 "this entry (%#x) is not tracked elsewhere, it will memory "
269 "leak here. Fix your protocol to eliminate these!",
270 address);
271 }
272 set[i] = entry; // Init entry
273 set[i]->m_Address = address;
274 set[i]->m_Permission = AccessPermission_Invalid;
275 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
276 address);
277 set[i]->m_locked = -1;
278 m_tag_index[address] = i;
279 entry->setSetIndex(cacheSet);
280 entry->setWayIndex(i);
281
282 if (touch) {
283 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
284 }
285
286 return entry;
287 }
288 }
289 panic("Allocate didn't find an available entry");
290}
291
292void
293CacheMemory::deallocate(Addr address)
294{
295 assert(address == makeLineAddress(address));
296 assert(isTagPresent(address));
297 DPRINTF(RubyCache, "address: %#x\n", address);
298 int64_t cacheSet = addressToCacheSet(address);
299 int loc = findTagInSet(cacheSet, address);
300 if (loc != -1) {
301 delete m_cache[cacheSet][loc];
302 m_cache[cacheSet][loc] = NULL;
303 m_tag_index.erase(address);
304 }
305}
306
307// Returns with the physical address of the conflicting cache line
308Addr
309CacheMemory::cacheProbe(Addr address) const
310{
311 assert(address == makeLineAddress(address));
312 assert(!cacheAvail(address));
313
314 int64_t cacheSet = addressToCacheSet(address);
315 return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
316 m_Address;
317}
318
319// looks an address up in the cache
320AbstractCacheEntry*
321CacheMemory::lookup(Addr address)
322{
323 assert(address == makeLineAddress(address));
324 int64_t cacheSet = addressToCacheSet(address);
325 int loc = findTagInSet(cacheSet, address);
326 if (loc == -1) return NULL;
327 return m_cache[cacheSet][loc];
328}
329
330// looks an address up in the cache
331const AbstractCacheEntry*
332CacheMemory::lookup(Addr address) const
333{
334 assert(address == makeLineAddress(address));
335 int64_t cacheSet = addressToCacheSet(address);
336 int loc = findTagInSet(cacheSet, address);
337 if (loc == -1) return NULL;
338 return m_cache[cacheSet][loc];
339}
340
341// Sets the most recently used bit for a cache block
342void
343CacheMemory::setMRU(Addr address)
344{
345 int64_t cacheSet = addressToCacheSet(address);
346 int loc = findTagInSet(cacheSet, address);
347
348 if (loc != -1)
349 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
350}
351
352void
353CacheMemory::setMRU(const AbstractCacheEntry *e)
354{
355 uint32_t cacheSet = e->getSetIndex();
356 uint32_t loc = e->getWayIndex();
357 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
358}
359
360void
361CacheMemory::setMRU(Addr address, int occupancy)
362{
363 int64_t cacheSet = addressToCacheSet(address);
364 int loc = findTagInSet(cacheSet, address);
365
366 if (loc != -1) {
367 if (m_replacementPolicy_ptr->useOccupancy()) {
368 (static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
369 touch(cacheSet, loc, curTick(), occupancy);
370 } else {
371 m_replacementPolicy_ptr->
372 touch(cacheSet, loc, curTick());
373 }
374 }
375}
376
377int
378CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
379{
380 assert(set < m_cache_num_sets);
381 assert(loc < m_cache_assoc);
382 int ret = 0;
383 if (m_cache[set][loc] != NULL) {
384 ret = m_cache[set][loc]->getNumValidBlocks();
385 assert(ret >= 0);
386 }
387
388 return ret;
389}
390
391void
392CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
393{
394 uint64_t warmedUpBlocks = 0;
395 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
396 (uint64_t)m_cache_assoc;
397
398 for (int i = 0; i < m_cache_num_sets; i++) {
399 for (int j = 0; j < m_cache_assoc; j++) {
400 if (m_cache[i][j] != NULL) {
401 AccessPermission perm = m_cache[i][j]->m_Permission;
402 RubyRequestType request_type = RubyRequestType_NULL;
403 if (perm == AccessPermission_Read_Only) {
404 if (m_is_instruction_only_cache) {
405 request_type = RubyRequestType_IFETCH;
406 } else {
407 request_type = RubyRequestType_LD;
408 }
409 } else if (perm == AccessPermission_Read_Write) {
410 request_type = RubyRequestType_ST;
411 }
412
413 if (request_type != RubyRequestType_NULL) {
414 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
415 0, request_type,
416 m_replacementPolicy_ptr->getLastAccess(i, j),
417 m_cache[i][j]->getDataBlk());
418 warmedUpBlocks++;
419 }
420 }
421 }
422 }
423
424 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
425 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
426 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
427}
428
429void
430CacheMemory::print(ostream& out) const
431{
432 out << "Cache dump: " << name() << endl;
433 for (int i = 0; i < m_cache_num_sets; i++) {
434 for (int j = 0; j < m_cache_assoc; j++) {
435 if (m_cache[i][j] != NULL) {
436 out << " Index: " << i
437 << " way: " << j
438 << " entry: " << *m_cache[i][j] << endl;
439 } else {
440 out << " Index: " << i
441 << " way: " << j
442 << " entry: NULL" << endl;
443 }
444 }
445 }
446}
447
448void
449CacheMemory::printData(ostream& out) const
450{
451 out << "printData() not supported" << endl;
452}
453
454void
455CacheMemory::setLocked(Addr address, int context)
456{
457 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
458 assert(address == makeLineAddress(address));
459 int64_t cacheSet = addressToCacheSet(address);
460 int loc = findTagInSet(cacheSet, address);
461 assert(loc != -1);
462 m_cache[cacheSet][loc]->setLocked(context);
463}
464
465void
466CacheMemory::clearLocked(Addr address)
467{
468 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
469 assert(address == makeLineAddress(address));
470 int64_t cacheSet = addressToCacheSet(address);
471 int loc = findTagInSet(cacheSet, address);
472 assert(loc != -1);
473 m_cache[cacheSet][loc]->clearLocked();
474}
475
476bool
477CacheMemory::isLocked(Addr address, int context)
478{
479 assert(address == makeLineAddress(address));
480 int64_t cacheSet = addressToCacheSet(address);
481 int loc = findTagInSet(cacheSet, address);
482 assert(loc != -1);
483 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
484 address, m_cache[cacheSet][loc]->m_locked, context);
485 return m_cache[cacheSet][loc]->isLocked(context);
486}
487
488void
489CacheMemory::regStats()
490{
491 SimObject::regStats();
492
493 m_demand_hits
494 .name(name() + ".demand_hits")
495 .desc("Number of cache demand hits")
496 ;
497
498 m_demand_misses
499 .name(name() + ".demand_misses")
500 .desc("Number of cache demand misses")
501 ;
502
503 m_demand_accesses
504 .name(name() + ".demand_accesses")
505 .desc("Number of cache demand accesses")
506 ;
507
508 m_demand_accesses = m_demand_hits + m_demand_misses;
509
510 m_sw_prefetches
511 .name(name() + ".total_sw_prefetches")
512 .desc("Number of software prefetches")
513 .flags(Stats::nozero)
514 ;
515
516 m_hw_prefetches
517 .name(name() + ".total_hw_prefetches")
518 .desc("Number of hardware prefetches")
519 .flags(Stats::nozero)
520 ;
521
522 m_prefetches
523 .name(name() + ".total_prefetches")
524 .desc("Number of prefetches")
525 .flags(Stats::nozero)
526 ;
527
528 m_prefetches = m_sw_prefetches + m_hw_prefetches;
529
530 m_accessModeType
531 .init(RubyRequestType_NUM)
532 .name(name() + ".access_mode")
533 .flags(Stats::pdf | Stats::total)
534 ;
535 for (int i = 0; i < RubyAccessMode_NUM; i++) {
536 m_accessModeType
537 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
538 .flags(Stats::nozero)
539 ;
540 }
541
542 numDataArrayReads
543 .name(name() + ".num_data_array_reads")
544 .desc("number of data array reads")
545 .flags(Stats::nozero)
546 ;
547
548 numDataArrayWrites
549 .name(name() + ".num_data_array_writes")
550 .desc("number of data array writes")
551 .flags(Stats::nozero)
552 ;
553
554 numTagArrayReads
555 .name(name() + ".num_tag_array_reads")
556 .desc("number of tag array reads")
557 .flags(Stats::nozero)
558 ;
559
560 numTagArrayWrites
561 .name(name() + ".num_tag_array_writes")
562 .desc("number of tag array writes")
563 .flags(Stats::nozero)
564 ;
565
566 numTagArrayStalls
567 .name(name() + ".num_tag_array_stalls")
568 .desc("number of stalls caused by tag array")
569 .flags(Stats::nozero)
570 ;
571
572 numDataArrayStalls
573 .name(name() + ".num_data_array_stalls")
574 .desc("number of stalls caused by data array")
575 .flags(Stats::nozero)
576 ;
577}
578
579// assumption: SLICC generated files will only call this function
580// once **all** resources are granted
581void
582CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
583{
584 DPRINTF(RubyStats, "Recorded statistic: %s\n",
585 CacheRequestType_to_string(requestType));
586 switch(requestType) {
587 case CacheRequestType_DataArrayRead:
588 if (m_resource_stalls)
589 dataArray.reserve(addressToCacheSet(addr));
590 numDataArrayReads++;
591 return;
592 case CacheRequestType_DataArrayWrite:
593 if (m_resource_stalls)
594 dataArray.reserve(addressToCacheSet(addr));
595 numDataArrayWrites++;
596 return;
597 case CacheRequestType_TagArrayRead:
598 if (m_resource_stalls)
599 tagArray.reserve(addressToCacheSet(addr));
600 numTagArrayReads++;
601 return;
602 case CacheRequestType_TagArrayWrite:
603 if (m_resource_stalls)
604 tagArray.reserve(addressToCacheSet(addr));
605 numTagArrayWrites++;
606 return;
607 default:
608 warn("CacheMemory access_type not found: %s",
609 CacheRequestType_to_string(requestType));
610 }
611}
612
613bool
614CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
615{
616 if (!m_resource_stalls) {
617 return true;
618 }
619
620 if (res == CacheResourceType_TagArray) {
621 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
622 else {
623 DPRINTF(RubyResourceStalls,
624 "Tag array stall on addr %#x in set %d\n",
625 addr, addressToCacheSet(addr));
626 numTagArrayStalls++;
627 return false;
628 }
629 } else if (res == CacheResourceType_DataArray) {
630 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
631 else {
632 DPRINTF(RubyResourceStalls,
633 "Data array stall on addr %#x in set %d\n",
634 addr, addressToCacheSet(addr));
635 numDataArrayStalls++;
636 return false;
637 }
638 } else {
639 assert(false);
640 return true;
641 }
642}
643
644bool
645CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
646{
647 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
648}
649
650bool
651CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
652{
653 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
654}
38#include "mem/ruby/system/RubySystem.hh"
39#include "mem/ruby/system/WeightedLRUPolicy.hh"
40
41using namespace std;
42
43ostream&
44operator<<(ostream& out, const CacheMemory& obj)
45{
46 obj.print(out);
47 out << flush;
48 return out;
49}
50
51CacheMemory *
52RubyCacheParams::create()
53{
54 return new CacheMemory(this);
55}
56
57CacheMemory::CacheMemory(const Params *p)
58 : SimObject(p),
59 dataArray(p->dataArrayBanks, p->dataAccessLatency,
60 p->start_index_bit, p->ruby_system),
61 tagArray(p->tagArrayBanks, p->tagAccessLatency,
62 p->start_index_bit, p->ruby_system)
63{
64 m_cache_size = p->size;
65 m_cache_assoc = p->assoc;
66 m_replacementPolicy_ptr = p->replacement_policy;
67 m_replacementPolicy_ptr->setCache(this);
68 m_start_index_bit = p->start_index_bit;
69 m_is_instruction_only_cache = p->is_icache;
70 m_resource_stalls = p->resourceStalls;
71 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
72}
73
74void
75CacheMemory::init()
76{
77 if (m_block_size == 0) {
78 m_block_size = RubySystem::getBlockSizeBytes();
79 }
80 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
81 assert(m_cache_num_sets > 1);
82 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
83 assert(m_cache_num_set_bits > 0);
84
85 m_cache.resize(m_cache_num_sets,
86 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
87}
88
89CacheMemory::~CacheMemory()
90{
91 if (m_replacementPolicy_ptr)
92 delete m_replacementPolicy_ptr;
93 for (int i = 0; i < m_cache_num_sets; i++) {
94 for (int j = 0; j < m_cache_assoc; j++) {
95 delete m_cache[i][j];
96 }
97 }
98}
99
100// convert a Address to its location in the cache
101int64_t
102CacheMemory::addressToCacheSet(Addr address) const
103{
104 assert(address == makeLineAddress(address));
105 return bitSelect(address, m_start_index_bit,
106 m_start_index_bit + m_cache_num_set_bits - 1);
107}
108
109// Given a cache index: returns the index of the tag in a set.
110// returns -1 if the tag is not found.
111int
112CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
113{
114 assert(tag == makeLineAddress(tag));
115 // search the set for the tags
116 auto it = m_tag_index.find(tag);
117 if (it != m_tag_index.end())
118 if (m_cache[cacheSet][it->second]->m_Permission !=
119 AccessPermission_NotPresent)
120 return it->second;
121 return -1; // Not found
122}
123
124// Given a cache index: returns the index of the tag in a set.
125// returns -1 if the tag is not found.
126int
127CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
128 Addr tag) const
129{
130 assert(tag == makeLineAddress(tag));
131 // search the set for the tags
132 auto it = m_tag_index.find(tag);
133 if (it != m_tag_index.end())
134 return it->second;
135 return -1; // Not found
136}
137
138// Given an unique cache block identifier (idx): return the valid address
139// stored by the cache block. If the block is invalid/notpresent, the
140// function returns the 0 address
141Addr
142CacheMemory::getAddressAtIdx(int idx) const
143{
144 Addr tmp(0);
145
146 int set = idx / m_cache_assoc;
147 assert(set < m_cache_num_sets);
148
149 int way = idx - set * m_cache_assoc;
150 assert (way < m_cache_assoc);
151
152 AbstractCacheEntry* entry = m_cache[set][way];
153 if (entry == NULL ||
154 entry->m_Permission == AccessPermission_Invalid ||
155 entry->m_Permission == AccessPermission_NotPresent) {
156 return tmp;
157 }
158 return entry->m_Address;
159}
160
161bool
162CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
163 DataBlock*& data_ptr)
164{
165 assert(address == makeLineAddress(address));
166 DPRINTF(RubyCache, "address: %#x\n", address);
167 int64_t cacheSet = addressToCacheSet(address);
168 int loc = findTagInSet(cacheSet, address);
169 if (loc != -1) {
170 // Do we even have a tag match?
171 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
172 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
173 data_ptr = &(entry->getDataBlk());
174
175 if (entry->m_Permission == AccessPermission_Read_Write) {
176 return true;
177 }
178 if ((entry->m_Permission == AccessPermission_Read_Only) &&
179 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
180 return true;
181 }
182 // The line must not be accessible
183 }
184 data_ptr = NULL;
185 return false;
186}
187
188bool
189CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
190 DataBlock*& data_ptr)
191{
192 assert(address == makeLineAddress(address));
193 DPRINTF(RubyCache, "address: %#x\n", address);
194 int64_t cacheSet = addressToCacheSet(address);
195 int loc = findTagInSet(cacheSet, address);
196
197 if (loc != -1) {
198 // Do we even have a tag match?
199 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
200 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
201 data_ptr = &(entry->getDataBlk());
202
203 return m_cache[cacheSet][loc]->m_Permission !=
204 AccessPermission_NotPresent;
205 }
206
207 data_ptr = NULL;
208 return false;
209}
210
211// tests to see if an address is present in the cache
212bool
213CacheMemory::isTagPresent(Addr address) const
214{
215 assert(address == makeLineAddress(address));
216 int64_t cacheSet = addressToCacheSet(address);
217 int loc = findTagInSet(cacheSet, address);
218
219 if (loc == -1) {
220 // We didn't find the tag
221 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
222 return false;
223 }
224 DPRINTF(RubyCache, "address: %#x found\n", address);
225 return true;
226}
227
228// Returns true if there is:
229// a) a tag match on this address or there is
230// b) an unused line in the same cache "way"
231bool
232CacheMemory::cacheAvail(Addr address) const
233{
234 assert(address == makeLineAddress(address));
235
236 int64_t cacheSet = addressToCacheSet(address);
237
238 for (int i = 0; i < m_cache_assoc; i++) {
239 AbstractCacheEntry* entry = m_cache[cacheSet][i];
240 if (entry != NULL) {
241 if (entry->m_Address == address ||
242 entry->m_Permission == AccessPermission_NotPresent) {
243 // Already in the cache or we found an empty entry
244 return true;
245 }
246 } else {
247 return true;
248 }
249 }
250 return false;
251}
252
253AbstractCacheEntry*
254CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
255{
256 assert(address == makeLineAddress(address));
257 assert(!isTagPresent(address));
258 assert(cacheAvail(address));
259 DPRINTF(RubyCache, "address: %#x\n", address);
260
261 // Find the first open slot
262 int64_t cacheSet = addressToCacheSet(address);
263 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
264 for (int i = 0; i < m_cache_assoc; i++) {
265 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
266 if (set[i] && (set[i] != entry)) {
267 warn_once("This protocol contains a cache entry handling bug: "
268 "Entries in the cache should never be NotPresent! If\n"
269 "this entry (%#x) is not tracked elsewhere, it will memory "
270 "leak here. Fix your protocol to eliminate these!",
271 address);
272 }
273 set[i] = entry; // Init entry
274 set[i]->m_Address = address;
275 set[i]->m_Permission = AccessPermission_Invalid;
276 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
277 address);
278 set[i]->m_locked = -1;
279 m_tag_index[address] = i;
280 entry->setSetIndex(cacheSet);
281 entry->setWayIndex(i);
282
283 if (touch) {
284 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
285 }
286
287 return entry;
288 }
289 }
290 panic("Allocate didn't find an available entry");
291}
292
293void
294CacheMemory::deallocate(Addr address)
295{
296 assert(address == makeLineAddress(address));
297 assert(isTagPresent(address));
298 DPRINTF(RubyCache, "address: %#x\n", address);
299 int64_t cacheSet = addressToCacheSet(address);
300 int loc = findTagInSet(cacheSet, address);
301 if (loc != -1) {
302 delete m_cache[cacheSet][loc];
303 m_cache[cacheSet][loc] = NULL;
304 m_tag_index.erase(address);
305 }
306}
307
308// Returns with the physical address of the conflicting cache line
309Addr
310CacheMemory::cacheProbe(Addr address) const
311{
312 assert(address == makeLineAddress(address));
313 assert(!cacheAvail(address));
314
315 int64_t cacheSet = addressToCacheSet(address);
316 return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
317 m_Address;
318}
319
320// looks an address up in the cache
321AbstractCacheEntry*
322CacheMemory::lookup(Addr address)
323{
324 assert(address == makeLineAddress(address));
325 int64_t cacheSet = addressToCacheSet(address);
326 int loc = findTagInSet(cacheSet, address);
327 if (loc == -1) return NULL;
328 return m_cache[cacheSet][loc];
329}
330
331// looks an address up in the cache
332const AbstractCacheEntry*
333CacheMemory::lookup(Addr address) const
334{
335 assert(address == makeLineAddress(address));
336 int64_t cacheSet = addressToCacheSet(address);
337 int loc = findTagInSet(cacheSet, address);
338 if (loc == -1) return NULL;
339 return m_cache[cacheSet][loc];
340}
341
342// Sets the most recently used bit for a cache block
343void
344CacheMemory::setMRU(Addr address)
345{
346 int64_t cacheSet = addressToCacheSet(address);
347 int loc = findTagInSet(cacheSet, address);
348
349 if (loc != -1)
350 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
351}
352
353void
354CacheMemory::setMRU(const AbstractCacheEntry *e)
355{
356 uint32_t cacheSet = e->getSetIndex();
357 uint32_t loc = e->getWayIndex();
358 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
359}
360
361void
362CacheMemory::setMRU(Addr address, int occupancy)
363{
364 int64_t cacheSet = addressToCacheSet(address);
365 int loc = findTagInSet(cacheSet, address);
366
367 if (loc != -1) {
368 if (m_replacementPolicy_ptr->useOccupancy()) {
369 (static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
370 touch(cacheSet, loc, curTick(), occupancy);
371 } else {
372 m_replacementPolicy_ptr->
373 touch(cacheSet, loc, curTick());
374 }
375 }
376}
377
378int
379CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
380{
381 assert(set < m_cache_num_sets);
382 assert(loc < m_cache_assoc);
383 int ret = 0;
384 if (m_cache[set][loc] != NULL) {
385 ret = m_cache[set][loc]->getNumValidBlocks();
386 assert(ret >= 0);
387 }
388
389 return ret;
390}
391
392void
393CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
394{
395 uint64_t warmedUpBlocks = 0;
396 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
397 (uint64_t)m_cache_assoc;
398
399 for (int i = 0; i < m_cache_num_sets; i++) {
400 for (int j = 0; j < m_cache_assoc; j++) {
401 if (m_cache[i][j] != NULL) {
402 AccessPermission perm = m_cache[i][j]->m_Permission;
403 RubyRequestType request_type = RubyRequestType_NULL;
404 if (perm == AccessPermission_Read_Only) {
405 if (m_is_instruction_only_cache) {
406 request_type = RubyRequestType_IFETCH;
407 } else {
408 request_type = RubyRequestType_LD;
409 }
410 } else if (perm == AccessPermission_Read_Write) {
411 request_type = RubyRequestType_ST;
412 }
413
414 if (request_type != RubyRequestType_NULL) {
415 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
416 0, request_type,
417 m_replacementPolicy_ptr->getLastAccess(i, j),
418 m_cache[i][j]->getDataBlk());
419 warmedUpBlocks++;
420 }
421 }
422 }
423 }
424
425 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
426 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
427 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
428}
429
430void
431CacheMemory::print(ostream& out) const
432{
433 out << "Cache dump: " << name() << endl;
434 for (int i = 0; i < m_cache_num_sets; i++) {
435 for (int j = 0; j < m_cache_assoc; j++) {
436 if (m_cache[i][j] != NULL) {
437 out << " Index: " << i
438 << " way: " << j
439 << " entry: " << *m_cache[i][j] << endl;
440 } else {
441 out << " Index: " << i
442 << " way: " << j
443 << " entry: NULL" << endl;
444 }
445 }
446 }
447}
448
449void
450CacheMemory::printData(ostream& out) const
451{
452 out << "printData() not supported" << endl;
453}
454
455void
456CacheMemory::setLocked(Addr address, int context)
457{
458 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
459 assert(address == makeLineAddress(address));
460 int64_t cacheSet = addressToCacheSet(address);
461 int loc = findTagInSet(cacheSet, address);
462 assert(loc != -1);
463 m_cache[cacheSet][loc]->setLocked(context);
464}
465
466void
467CacheMemory::clearLocked(Addr address)
468{
469 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
470 assert(address == makeLineAddress(address));
471 int64_t cacheSet = addressToCacheSet(address);
472 int loc = findTagInSet(cacheSet, address);
473 assert(loc != -1);
474 m_cache[cacheSet][loc]->clearLocked();
475}
476
477bool
478CacheMemory::isLocked(Addr address, int context)
479{
480 assert(address == makeLineAddress(address));
481 int64_t cacheSet = addressToCacheSet(address);
482 int loc = findTagInSet(cacheSet, address);
483 assert(loc != -1);
484 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
485 address, m_cache[cacheSet][loc]->m_locked, context);
486 return m_cache[cacheSet][loc]->isLocked(context);
487}
488
489void
490CacheMemory::regStats()
491{
492 SimObject::regStats();
493
494 m_demand_hits
495 .name(name() + ".demand_hits")
496 .desc("Number of cache demand hits")
497 ;
498
499 m_demand_misses
500 .name(name() + ".demand_misses")
501 .desc("Number of cache demand misses")
502 ;
503
504 m_demand_accesses
505 .name(name() + ".demand_accesses")
506 .desc("Number of cache demand accesses")
507 ;
508
509 m_demand_accesses = m_demand_hits + m_demand_misses;
510
511 m_sw_prefetches
512 .name(name() + ".total_sw_prefetches")
513 .desc("Number of software prefetches")
514 .flags(Stats::nozero)
515 ;
516
517 m_hw_prefetches
518 .name(name() + ".total_hw_prefetches")
519 .desc("Number of hardware prefetches")
520 .flags(Stats::nozero)
521 ;
522
523 m_prefetches
524 .name(name() + ".total_prefetches")
525 .desc("Number of prefetches")
526 .flags(Stats::nozero)
527 ;
528
529 m_prefetches = m_sw_prefetches + m_hw_prefetches;
530
531 m_accessModeType
532 .init(RubyRequestType_NUM)
533 .name(name() + ".access_mode")
534 .flags(Stats::pdf | Stats::total)
535 ;
536 for (int i = 0; i < RubyAccessMode_NUM; i++) {
537 m_accessModeType
538 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
539 .flags(Stats::nozero)
540 ;
541 }
542
543 numDataArrayReads
544 .name(name() + ".num_data_array_reads")
545 .desc("number of data array reads")
546 .flags(Stats::nozero)
547 ;
548
549 numDataArrayWrites
550 .name(name() + ".num_data_array_writes")
551 .desc("number of data array writes")
552 .flags(Stats::nozero)
553 ;
554
555 numTagArrayReads
556 .name(name() + ".num_tag_array_reads")
557 .desc("number of tag array reads")
558 .flags(Stats::nozero)
559 ;
560
561 numTagArrayWrites
562 .name(name() + ".num_tag_array_writes")
563 .desc("number of tag array writes")
564 .flags(Stats::nozero)
565 ;
566
567 numTagArrayStalls
568 .name(name() + ".num_tag_array_stalls")
569 .desc("number of stalls caused by tag array")
570 .flags(Stats::nozero)
571 ;
572
573 numDataArrayStalls
574 .name(name() + ".num_data_array_stalls")
575 .desc("number of stalls caused by data array")
576 .flags(Stats::nozero)
577 ;
578}
579
580// assumption: SLICC generated files will only call this function
581// once **all** resources are granted
582void
583CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
584{
585 DPRINTF(RubyStats, "Recorded statistic: %s\n",
586 CacheRequestType_to_string(requestType));
587 switch(requestType) {
588 case CacheRequestType_DataArrayRead:
589 if (m_resource_stalls)
590 dataArray.reserve(addressToCacheSet(addr));
591 numDataArrayReads++;
592 return;
593 case CacheRequestType_DataArrayWrite:
594 if (m_resource_stalls)
595 dataArray.reserve(addressToCacheSet(addr));
596 numDataArrayWrites++;
597 return;
598 case CacheRequestType_TagArrayRead:
599 if (m_resource_stalls)
600 tagArray.reserve(addressToCacheSet(addr));
601 numTagArrayReads++;
602 return;
603 case CacheRequestType_TagArrayWrite:
604 if (m_resource_stalls)
605 tagArray.reserve(addressToCacheSet(addr));
606 numTagArrayWrites++;
607 return;
608 default:
609 warn("CacheMemory access_type not found: %s",
610 CacheRequestType_to_string(requestType));
611 }
612}
613
614bool
615CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
616{
617 if (!m_resource_stalls) {
618 return true;
619 }
620
621 if (res == CacheResourceType_TagArray) {
622 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
623 else {
624 DPRINTF(RubyResourceStalls,
625 "Tag array stall on addr %#x in set %d\n",
626 addr, addressToCacheSet(addr));
627 numTagArrayStalls++;
628 return false;
629 }
630 } else if (res == CacheResourceType_DataArray) {
631 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
632 else {
633 DPRINTF(RubyResourceStalls,
634 "Data array stall on addr %#x in set %d\n",
635 addr, addressToCacheSet(addr));
636 numDataArrayStalls++;
637 return false;
638 }
639 } else {
640 assert(false);
641 return true;
642 }
643}
644
645bool
646CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
647{
648 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
649}
650
651bool
652CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
653{
654 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
655}