Deleted Added
sdiff udiff text old ( 11793:ef606668d247 ) new ( 13449:2f7efa89c58b )
full compact
1/*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/structures/CacheMemory.hh"
31
32#include "base/intmath.hh"
33#include "debug/RubyCache.hh"
34#include "debug/RubyCacheTrace.hh"
35#include "debug/RubyResourceStalls.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/AccessPermission.hh"
38#include "mem/ruby/system/RubySystem.hh"
39#include "mem/ruby/system/WeightedLRUPolicy.hh"
40
41using namespace std;
42
43ostream&
44operator<<(ostream& out, const CacheMemory& obj)
45{
46 obj.print(out);
47 out << flush;
48 return out;
49}
50
51CacheMemory *
52RubyCacheParams::create()
53{
54 return new CacheMemory(this);
55}
56
57CacheMemory::CacheMemory(const Params *p)
58 : SimObject(p),
59 dataArray(p->dataArrayBanks, p->dataAccessLatency,
60 p->start_index_bit, p->ruby_system),
61 tagArray(p->tagArrayBanks, p->tagAccessLatency,
62 p->start_index_bit, p->ruby_system)
63{
64 m_cache_size = p->size;
65 m_cache_assoc = p->assoc;
66 m_replacementPolicy_ptr = p->replacement_policy;
67 m_replacementPolicy_ptr->setCache(this);
68 m_start_index_bit = p->start_index_bit;
69 m_is_instruction_only_cache = p->is_icache;
70 m_resource_stalls = p->resourceStalls;
71 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
72}
73
74void
75CacheMemory::init()
76{
77 if (m_block_size == 0) {
78 m_block_size = RubySystem::getBlockSizeBytes();
79 }
80 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
81 assert(m_cache_num_sets > 1);
82 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
83 assert(m_cache_num_set_bits > 0);
84
85 m_cache.resize(m_cache_num_sets,
86 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
87}
88
89CacheMemory::~CacheMemory()
90{
91 if (m_replacementPolicy_ptr)
92 delete m_replacementPolicy_ptr;
93 for (int i = 0; i < m_cache_num_sets; i++) {
94 for (int j = 0; j < m_cache_assoc; j++) {
95 delete m_cache[i][j];
96 }
97 }
98}
99
100// convert a Address to its location in the cache
101int64_t
102CacheMemory::addressToCacheSet(Addr address) const
103{
104 assert(address == makeLineAddress(address));
105 return bitSelect(address, m_start_index_bit,
106 m_start_index_bit + m_cache_num_set_bits - 1);
107}
108
109// Given a cache index: returns the index of the tag in a set.
110// returns -1 if the tag is not found.
111int
112CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
113{
114 assert(tag == makeLineAddress(tag));
115 // search the set for the tags
116 auto it = m_tag_index.find(tag);
117 if (it != m_tag_index.end())
118 if (m_cache[cacheSet][it->second]->m_Permission !=
119 AccessPermission_NotPresent)
120 return it->second;
121 return -1; // Not found
122}
123
124// Given a cache index: returns the index of the tag in a set.
125// returns -1 if the tag is not found.
126int
127CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
128 Addr tag) const
129{
130 assert(tag == makeLineAddress(tag));
131 // search the set for the tags
132 auto it = m_tag_index.find(tag);
133 if (it != m_tag_index.end())
134 return it->second;
135 return -1; // Not found
136}
137
138// Given an unique cache block identifier (idx): return the valid address
139// stored by the cache block. If the block is invalid/notpresent, the
140// function returns the 0 address
141Addr
142CacheMemory::getAddressAtIdx(int idx) const
143{
144 Addr tmp(0);
145
146 int set = idx / m_cache_assoc;
147 assert(set < m_cache_num_sets);
148
149 int way = idx - set * m_cache_assoc;
150 assert (way < m_cache_assoc);
151
152 AbstractCacheEntry* entry = m_cache[set][way];
153 if (entry == NULL ||
154 entry->m_Permission == AccessPermission_Invalid ||
155 entry->m_Permission == AccessPermission_NotPresent) {
156 return tmp;
157 }
158 return entry->m_Address;
159}
160
161bool
162CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
163 DataBlock*& data_ptr)
164{
165 assert(address == makeLineAddress(address));
166 DPRINTF(RubyCache, "address: %#x\n", address);
167 int64_t cacheSet = addressToCacheSet(address);
168 int loc = findTagInSet(cacheSet, address);
169 if (loc != -1) {
170 // Do we even have a tag match?
171 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
172 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
173 data_ptr = &(entry->getDataBlk());
174
175 if (entry->m_Permission == AccessPermission_Read_Write) {
176 return true;
177 }
178 if ((entry->m_Permission == AccessPermission_Read_Only) &&
179 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
180 return true;
181 }
182 // The line must not be accessible
183 }
184 data_ptr = NULL;
185 return false;
186}
187
188bool
189CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
190 DataBlock*& data_ptr)
191{
192 assert(address == makeLineAddress(address));
193 DPRINTF(RubyCache, "address: %#x\n", address);
194 int64_t cacheSet = addressToCacheSet(address);
195 int loc = findTagInSet(cacheSet, address);
196
197 if (loc != -1) {
198 // Do we even have a tag match?
199 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
200 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
201 data_ptr = &(entry->getDataBlk());
202
203 return m_cache[cacheSet][loc]->m_Permission !=
204 AccessPermission_NotPresent;
205 }
206
207 data_ptr = NULL;
208 return false;
209}
210
211// tests to see if an address is present in the cache
212bool
213CacheMemory::isTagPresent(Addr address) const
214{
215 assert(address == makeLineAddress(address));
216 int64_t cacheSet = addressToCacheSet(address);
217 int loc = findTagInSet(cacheSet, address);
218
219 if (loc == -1) {
220 // We didn't find the tag
221 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
222 return false;
223 }
224 DPRINTF(RubyCache, "address: %#x found\n", address);
225 return true;
226}
227
228// Returns true if there is:
229// a) a tag match on this address or there is
230// b) an unused line in the same cache "way"
231bool
232CacheMemory::cacheAvail(Addr address) const
233{
234 assert(address == makeLineAddress(address));
235
236 int64_t cacheSet = addressToCacheSet(address);
237
238 for (int i = 0; i < m_cache_assoc; i++) {
239 AbstractCacheEntry* entry = m_cache[cacheSet][i];
240 if (entry != NULL) {
241 if (entry->m_Address == address ||
242 entry->m_Permission == AccessPermission_NotPresent) {
243 // Already in the cache or we found an empty entry
244 return true;
245 }
246 } else {
247 return true;
248 }
249 }
250 return false;
251}
252
253AbstractCacheEntry*
254CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
255{
256 assert(address == makeLineAddress(address));
257 assert(!isTagPresent(address));
258 assert(cacheAvail(address));
259 DPRINTF(RubyCache, "address: %#x\n", address);
260
261 // Find the first open slot
262 int64_t cacheSet = addressToCacheSet(address);
263 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
264 for (int i = 0; i < m_cache_assoc; i++) {
265 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
266 if (set[i] && (set[i] != entry)) {
267 warn_once("This protocol contains a cache entry handling bug: "
268 "Entries in the cache should never be NotPresent! If\n"
269 "this entry (%#x) is not tracked elsewhere, it will memory "
270 "leak here. Fix your protocol to eliminate these!",
271 address);
272 }
273 set[i] = entry; // Init entry
274 set[i]->m_Address = address;
275 set[i]->m_Permission = AccessPermission_Invalid;
276 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
277 address);
278 set[i]->m_locked = -1;
279 m_tag_index[address] = i;
280 entry->setSetIndex(cacheSet);
281 entry->setWayIndex(i);
282
283 if (touch) {
284 m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
285 }
286
287 return entry;
288 }
289 }
290 panic("Allocate didn't find an available entry");
291}
292
293void
294CacheMemory::deallocate(Addr address)
295{
296 assert(address == makeLineAddress(address));
297 assert(isTagPresent(address));
298 DPRINTF(RubyCache, "address: %#x\n", address);
299 int64_t cacheSet = addressToCacheSet(address);
300 int loc = findTagInSet(cacheSet, address);
301 if (loc != -1) {
302 delete m_cache[cacheSet][loc];
303 m_cache[cacheSet][loc] = NULL;
304 m_tag_index.erase(address);
305 }
306}
307
308// Returns with the physical address of the conflicting cache line
309Addr
310CacheMemory::cacheProbe(Addr address) const
311{
312 assert(address == makeLineAddress(address));
313 assert(!cacheAvail(address));
314
315 int64_t cacheSet = addressToCacheSet(address);
316 return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
317 m_Address;
318}
319
320// looks an address up in the cache
321AbstractCacheEntry*
322CacheMemory::lookup(Addr address)
323{
324 assert(address == makeLineAddress(address));
325 int64_t cacheSet = addressToCacheSet(address);
326 int loc = findTagInSet(cacheSet, address);
327 if (loc == -1) return NULL;
328 return m_cache[cacheSet][loc];
329}
330
331// looks an address up in the cache
332const AbstractCacheEntry*
333CacheMemory::lookup(Addr address) const
334{
335 assert(address == makeLineAddress(address));
336 int64_t cacheSet = addressToCacheSet(address);
337 int loc = findTagInSet(cacheSet, address);
338 if (loc == -1) return NULL;
339 return m_cache[cacheSet][loc];
340}
341
342// Sets the most recently used bit for a cache block
343void
344CacheMemory::setMRU(Addr address)
345{
346 int64_t cacheSet = addressToCacheSet(address);
347 int loc = findTagInSet(cacheSet, address);
348
349 if (loc != -1)
350 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
351}
352
353void
354CacheMemory::setMRU(const AbstractCacheEntry *e)
355{
356 uint32_t cacheSet = e->getSetIndex();
357 uint32_t loc = e->getWayIndex();
358 m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
359}
360
361void
362CacheMemory::setMRU(Addr address, int occupancy)
363{
364 int64_t cacheSet = addressToCacheSet(address);
365 int loc = findTagInSet(cacheSet, address);
366
367 if (loc != -1) {
368 if (m_replacementPolicy_ptr->useOccupancy()) {
369 (static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
370 touch(cacheSet, loc, curTick(), occupancy);
371 } else {
372 m_replacementPolicy_ptr->
373 touch(cacheSet, loc, curTick());
374 }
375 }
376}
377
378int
379CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
380{
381 assert(set < m_cache_num_sets);
382 assert(loc < m_cache_assoc);
383 int ret = 0;
384 if (m_cache[set][loc] != NULL) {
385 ret = m_cache[set][loc]->getNumValidBlocks();
386 assert(ret >= 0);
387 }
388
389 return ret;
390}
391
392void
393CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
394{
395 uint64_t warmedUpBlocks = 0;
396 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
397 (uint64_t)m_cache_assoc;
398
399 for (int i = 0; i < m_cache_num_sets; i++) {
400 for (int j = 0; j < m_cache_assoc; j++) {
401 if (m_cache[i][j] != NULL) {
402 AccessPermission perm = m_cache[i][j]->m_Permission;
403 RubyRequestType request_type = RubyRequestType_NULL;
404 if (perm == AccessPermission_Read_Only) {
405 if (m_is_instruction_only_cache) {
406 request_type = RubyRequestType_IFETCH;
407 } else {
408 request_type = RubyRequestType_LD;
409 }
410 } else if (perm == AccessPermission_Read_Write) {
411 request_type = RubyRequestType_ST;
412 }
413
414 if (request_type != RubyRequestType_NULL) {
415 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
416 0, request_type,
417 m_replacementPolicy_ptr->getLastAccess(i, j),
418 m_cache[i][j]->getDataBlk());
419 warmedUpBlocks++;
420 }
421 }
422 }
423 }
424
425 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
426 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
427 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
428}
429
430void
431CacheMemory::print(ostream& out) const
432{
433 out << "Cache dump: " << name() << endl;
434 for (int i = 0; i < m_cache_num_sets; i++) {
435 for (int j = 0; j < m_cache_assoc; j++) {
436 if (m_cache[i][j] != NULL) {
437 out << " Index: " << i
438 << " way: " << j
439 << " entry: " << *m_cache[i][j] << endl;
440 } else {
441 out << " Index: " << i
442 << " way: " << j
443 << " entry: NULL" << endl;
444 }
445 }
446 }
447}
448
449void
450CacheMemory::printData(ostream& out) const
451{
452 out << "printData() not supported" << endl;
453}
454
455void
456CacheMemory::setLocked(Addr address, int context)
457{
458 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
459 assert(address == makeLineAddress(address));
460 int64_t cacheSet = addressToCacheSet(address);
461 int loc = findTagInSet(cacheSet, address);
462 assert(loc != -1);
463 m_cache[cacheSet][loc]->setLocked(context);
464}
465
466void
467CacheMemory::clearLocked(Addr address)
468{
469 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
470 assert(address == makeLineAddress(address));
471 int64_t cacheSet = addressToCacheSet(address);
472 int loc = findTagInSet(cacheSet, address);
473 assert(loc != -1);
474 m_cache[cacheSet][loc]->clearLocked();
475}
476
477bool
478CacheMemory::isLocked(Addr address, int context)
479{
480 assert(address == makeLineAddress(address));
481 int64_t cacheSet = addressToCacheSet(address);
482 int loc = findTagInSet(cacheSet, address);
483 assert(loc != -1);
484 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
485 address, m_cache[cacheSet][loc]->m_locked, context);
486 return m_cache[cacheSet][loc]->isLocked(context);
487}
488
489void
490CacheMemory::regStats()
491{
492 SimObject::regStats();
493
494 m_demand_hits
495 .name(name() + ".demand_hits")
496 .desc("Number of cache demand hits")
497 ;
498
499 m_demand_misses
500 .name(name() + ".demand_misses")
501 .desc("Number of cache demand misses")
502 ;
503
504 m_demand_accesses
505 .name(name() + ".demand_accesses")
506 .desc("Number of cache demand accesses")
507 ;
508
509 m_demand_accesses = m_demand_hits + m_demand_misses;
510
511 m_sw_prefetches
512 .name(name() + ".total_sw_prefetches")
513 .desc("Number of software prefetches")
514 .flags(Stats::nozero)
515 ;
516
517 m_hw_prefetches
518 .name(name() + ".total_hw_prefetches")
519 .desc("Number of hardware prefetches")
520 .flags(Stats::nozero)
521 ;
522
523 m_prefetches
524 .name(name() + ".total_prefetches")
525 .desc("Number of prefetches")
526 .flags(Stats::nozero)
527 ;
528
529 m_prefetches = m_sw_prefetches + m_hw_prefetches;
530
531 m_accessModeType
532 .init(RubyRequestType_NUM)
533 .name(name() + ".access_mode")
534 .flags(Stats::pdf | Stats::total)
535 ;
536 for (int i = 0; i < RubyAccessMode_NUM; i++) {
537 m_accessModeType
538 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
539 .flags(Stats::nozero)
540 ;
541 }
542
543 numDataArrayReads
544 .name(name() + ".num_data_array_reads")
545 .desc("number of data array reads")
546 .flags(Stats::nozero)
547 ;
548
549 numDataArrayWrites
550 .name(name() + ".num_data_array_writes")
551 .desc("number of data array writes")
552 .flags(Stats::nozero)
553 ;
554
555 numTagArrayReads
556 .name(name() + ".num_tag_array_reads")
557 .desc("number of tag array reads")
558 .flags(Stats::nozero)
559 ;
560
561 numTagArrayWrites
562 .name(name() + ".num_tag_array_writes")
563 .desc("number of tag array writes")
564 .flags(Stats::nozero)
565 ;
566
567 numTagArrayStalls
568 .name(name() + ".num_tag_array_stalls")
569 .desc("number of stalls caused by tag array")
570 .flags(Stats::nozero)
571 ;
572
573 numDataArrayStalls
574 .name(name() + ".num_data_array_stalls")
575 .desc("number of stalls caused by data array")
576 .flags(Stats::nozero)
577 ;
578}
579
580// assumption: SLICC generated files will only call this function
581// once **all** resources are granted
582void
583CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
584{
585 DPRINTF(RubyStats, "Recorded statistic: %s\n",
586 CacheRequestType_to_string(requestType));
587 switch(requestType) {
588 case CacheRequestType_DataArrayRead:
589 if (m_resource_stalls)
590 dataArray.reserve(addressToCacheSet(addr));
591 numDataArrayReads++;
592 return;
593 case CacheRequestType_DataArrayWrite:
594 if (m_resource_stalls)
595 dataArray.reserve(addressToCacheSet(addr));
596 numDataArrayWrites++;
597 return;
598 case CacheRequestType_TagArrayRead:
599 if (m_resource_stalls)
600 tagArray.reserve(addressToCacheSet(addr));
601 numTagArrayReads++;
602 return;
603 case CacheRequestType_TagArrayWrite:
604 if (m_resource_stalls)
605 tagArray.reserve(addressToCacheSet(addr));
606 numTagArrayWrites++;
607 return;
608 default:
609 warn("CacheMemory access_type not found: %s",
610 CacheRequestType_to_string(requestType));
611 }
612}
613
614bool
615CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
616{
617 if (!m_resource_stalls) {
618 return true;
619 }
620
621 if (res == CacheResourceType_TagArray) {
622 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
623 else {
624 DPRINTF(RubyResourceStalls,
625 "Tag array stall on addr %#x in set %d\n",
626 addr, addressToCacheSet(addr));
627 numTagArrayStalls++;
628 return false;
629 }
630 } else if (res == CacheResourceType_DataArray) {
631 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
632 else {
633 DPRINTF(RubyResourceStalls,
634 "Data array stall on addr %#x in set %d\n",
635 addr, addressToCacheSet(addr));
636 numDataArrayStalls++;
637 return false;
638 }
639 } else {
640 assert(false);
641 return true;
642 }
643}
644
645bool
646CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
647{
648 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
649}
650
651bool
652CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
653{
654 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
655}