CacheMemory.cc revision 8937
16911SBrad.Beckmann@amd.com/*
26911SBrad.Beckmann@amd.com * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
36911SBrad.Beckmann@amd.com * All rights reserved.
46911SBrad.Beckmann@amd.com *
56911SBrad.Beckmann@amd.com * Redistribution and use in source and binary forms, with or without
66911SBrad.Beckmann@amd.com * modification, are permitted provided that the following conditions are
76911SBrad.Beckmann@amd.com * met: redistributions of source code must retain the above copyright
86911SBrad.Beckmann@amd.com * notice, this list of conditions and the following disclaimer;
96911SBrad.Beckmann@amd.com * redistributions in binary form must reproduce the above copyright
106911SBrad.Beckmann@amd.com * notice, this list of conditions and the following disclaimer in the
116911SBrad.Beckmann@amd.com * documentation and/or other materials provided with the distribution;
126911SBrad.Beckmann@amd.com * neither the name of the copyright holders nor the names of its
136911SBrad.Beckmann@amd.com * contributors may be used to endorse or promote products derived from
146911SBrad.Beckmann@amd.com * this software without specific prior written permission.
156911SBrad.Beckmann@amd.com *
166911SBrad.Beckmann@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176911SBrad.Beckmann@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186911SBrad.Beckmann@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196911SBrad.Beckmann@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206911SBrad.Beckmann@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216911SBrad.Beckmann@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226911SBrad.Beckmann@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236911SBrad.Beckmann@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246911SBrad.Beckmann@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256911SBrad.Beckmann@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266911SBrad.Beckmann@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276911SBrad.Beckmann@amd.com */
286911SBrad.Beckmann@amd.com
296911SBrad.Beckmann@amd.com#include "base/intmath.hh"
306911SBrad.Beckmann@amd.com#include "debug/RubyCache.hh"
316911SBrad.Beckmann@amd.com#include "debug/RubyCacheTrace.hh"
326911SBrad.Beckmann@amd.com#include "mem/protocol/AccessPermission.hh"
336911SBrad.Beckmann@amd.com#include "mem/ruby/system/CacheMemory.hh"
346911SBrad.Beckmann@amd.com#include "mem/ruby/system/System.hh"
356911SBrad.Beckmann@amd.com
366911SBrad.Beckmann@amd.comusing namespace std;
376911SBrad.Beckmann@amd.com
386911SBrad.Beckmann@amd.comostream&
396911SBrad.Beckmann@amd.comoperator<<(ostream& out, const CacheMemory& obj)
406911SBrad.Beckmann@amd.com{
416911SBrad.Beckmann@amd.com    obj.print(out);
426911SBrad.Beckmann@amd.com    out << flush;
436911SBrad.Beckmann@amd.com    return out;
446911SBrad.Beckmann@amd.com}
456911SBrad.Beckmann@amd.com
466911SBrad.Beckmann@amd.comCacheMemory *
476911SBrad.Beckmann@amd.comRubyCacheParams::create()
486911SBrad.Beckmann@amd.com{
496911SBrad.Beckmann@amd.com    return new CacheMemory(this);
506911SBrad.Beckmann@amd.com}
516911SBrad.Beckmann@amd.com
526911SBrad.Beckmann@amd.comCacheMemory::CacheMemory(const Params *p)
536911SBrad.Beckmann@amd.com    : SimObject(p)
546911SBrad.Beckmann@amd.com{
556911SBrad.Beckmann@amd.com    m_cache_size = p->size;
566911SBrad.Beckmann@amd.com    m_latency = p->latency;
576911SBrad.Beckmann@amd.com    m_cache_assoc = p->assoc;
586911SBrad.Beckmann@amd.com    m_policy = p->replacement_policy;
596911SBrad.Beckmann@amd.com    m_profiler_ptr = new CacheProfiler(name());
606911SBrad.Beckmann@amd.com    m_start_index_bit = p->start_index_bit;
616911SBrad.Beckmann@amd.com    m_is_instruction_only_cache = p->is_icache;
626911SBrad.Beckmann@amd.com}
636911SBrad.Beckmann@amd.com
646911SBrad.Beckmann@amd.comvoid
656911SBrad.Beckmann@amd.comCacheMemory::init()
666911SBrad.Beckmann@amd.com{
676911SBrad.Beckmann@amd.com    m_cache_num_sets = (m_cache_size / m_cache_assoc) /
686911SBrad.Beckmann@amd.com        RubySystem::getBlockSizeBytes();
696911SBrad.Beckmann@amd.com    assert(m_cache_num_sets > 1);
706911SBrad.Beckmann@amd.com    m_cache_num_set_bits = floorLog2(m_cache_num_sets);
716911SBrad.Beckmann@amd.com    assert(m_cache_num_set_bits > 0);
726911SBrad.Beckmann@amd.com
736911SBrad.Beckmann@amd.com    if (m_policy == "PSEUDO_LRU")
746911SBrad.Beckmann@amd.com        m_replacementPolicy_ptr =
756911SBrad.Beckmann@amd.com            new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
766911SBrad.Beckmann@amd.com    else if (m_policy == "LRU")
776911SBrad.Beckmann@amd.com        m_replacementPolicy_ptr =
786911SBrad.Beckmann@amd.com            new LRUPolicy(m_cache_num_sets, m_cache_assoc);
797015SBrad.Beckmann@amd.com    else
807015SBrad.Beckmann@amd.com        assert(false);
816911SBrad.Beckmann@amd.com
826911SBrad.Beckmann@amd.com    m_cache.resize(m_cache_num_sets);
836911SBrad.Beckmann@amd.com    for (int i = 0; i < m_cache_num_sets; i++) {
846911SBrad.Beckmann@amd.com        m_cache[i].resize(m_cache_assoc);
856911SBrad.Beckmann@amd.com        for (int j = 0; j < m_cache_assoc; j++) {
866911SBrad.Beckmann@amd.com            m_cache[i][j] = NULL;
876911SBrad.Beckmann@amd.com        }
886911SBrad.Beckmann@amd.com    }
896911SBrad.Beckmann@amd.com}
906911SBrad.Beckmann@amd.com
916911SBrad.Beckmann@amd.comCacheMemory::~CacheMemory()
926911SBrad.Beckmann@amd.com{
936911SBrad.Beckmann@amd.com    if (m_replacementPolicy_ptr != NULL)
946911SBrad.Beckmann@amd.com        delete m_replacementPolicy_ptr;
956911SBrad.Beckmann@amd.com    delete m_profiler_ptr;
966911SBrad.Beckmann@amd.com    for (int i = 0; i < m_cache_num_sets; i++) {
976911SBrad.Beckmann@amd.com        for (int j = 0; j < m_cache_assoc; j++) {
986911SBrad.Beckmann@amd.com            delete m_cache[i][j];
996911SBrad.Beckmann@amd.com        }
1006911SBrad.Beckmann@amd.com    }
1016911SBrad.Beckmann@amd.com}
1026911SBrad.Beckmann@amd.com
1036911SBrad.Beckmann@amd.comvoid
1046911SBrad.Beckmann@amd.comCacheMemory::printConfig(ostream& out)
1056911SBrad.Beckmann@amd.com{
1066911SBrad.Beckmann@amd.com    int block_size = RubySystem::getBlockSizeBytes();
1076911SBrad.Beckmann@amd.com
1086911SBrad.Beckmann@amd.com    out << "Cache config: " << m_cache_name << endl;
1096911SBrad.Beckmann@amd.com    out << "  cache_associativity: " << m_cache_assoc << endl;
1106911SBrad.Beckmann@amd.com    out << "  num_cache_sets_bits: " << m_cache_num_set_bits << endl;
1116911SBrad.Beckmann@amd.com    const int cache_num_sets = 1 << m_cache_num_set_bits;
1126911SBrad.Beckmann@amd.com    out << "  num_cache_sets: " << cache_num_sets << endl;
1136911SBrad.Beckmann@amd.com    out << "  cache_set_size_bytes: " << cache_num_sets * block_size << endl;
1146911SBrad.Beckmann@amd.com    out << "  cache_set_size_Kbytes: "
1156911SBrad.Beckmann@amd.com        << double(cache_num_sets * block_size) / (1<<10) << endl;
1166911SBrad.Beckmann@amd.com    out << "  cache_set_size_Mbytes: "
1176911SBrad.Beckmann@amd.com        << double(cache_num_sets * block_size) / (1<<20) << endl;
1186911SBrad.Beckmann@amd.com    out << "  cache_size_bytes: "
1196911SBrad.Beckmann@amd.com        << cache_num_sets * block_size * m_cache_assoc << endl;
1206911SBrad.Beckmann@amd.com    out << "  cache_size_Kbytes: "
1216911SBrad.Beckmann@amd.com        << double(cache_num_sets * block_size * m_cache_assoc) / (1<<10)
1226911SBrad.Beckmann@amd.com        << endl;
1236911SBrad.Beckmann@amd.com    out << "  cache_size_Mbytes: "
1246911SBrad.Beckmann@amd.com        << double(cache_num_sets * block_size * m_cache_assoc) / (1<<20)
1256911SBrad.Beckmann@amd.com        << endl;
1266911SBrad.Beckmann@amd.com}
1276911SBrad.Beckmann@amd.com
1286911SBrad.Beckmann@amd.com// convert a Address to its location in the cache
1296911SBrad.Beckmann@amd.comIndex
1306911SBrad.Beckmann@amd.comCacheMemory::addressToCacheSet(const Address& address) const
1316911SBrad.Beckmann@amd.com{
1326911SBrad.Beckmann@amd.com    assert(address == line_address(address));
1336911SBrad.Beckmann@amd.com    return address.bitSelect(m_start_index_bit,
1346911SBrad.Beckmann@amd.com                             m_start_index_bit + m_cache_num_set_bits - 1);
1356911SBrad.Beckmann@amd.com}
1366911SBrad.Beckmann@amd.com
1376911SBrad.Beckmann@amd.com// Given a cache index: returns the index of the tag in a set.
1386911SBrad.Beckmann@amd.com// returns -1 if the tag is not found.
1396911SBrad.Beckmann@amd.comint
1406911SBrad.Beckmann@amd.comCacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
1416911SBrad.Beckmann@amd.com{
1426911SBrad.Beckmann@amd.com    assert(tag == line_address(tag));
1436911SBrad.Beckmann@amd.com    // search the set for the tags
1446911SBrad.Beckmann@amd.com    m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
1456911SBrad.Beckmann@amd.com    if (it != m_tag_index.end())
1466911SBrad.Beckmann@amd.com        if (m_cache[cacheSet][it->second]->m_Permission !=
1476911SBrad.Beckmann@amd.com            AccessPermission_NotPresent)
1486911SBrad.Beckmann@amd.com            return it->second;
1496911SBrad.Beckmann@amd.com    return -1; // Not found
1506911SBrad.Beckmann@amd.com}
1516911SBrad.Beckmann@amd.com
1526911SBrad.Beckmann@amd.com// Given a cache index: returns the index of the tag in a set.
153// returns -1 if the tag is not found.
154int
155CacheMemory::findTagInSetIgnorePermissions(Index cacheSet,
156                                           const Address& tag) const
157{
158    assert(tag == line_address(tag));
159    // search the set for the tags
160    m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
161    if (it != m_tag_index.end())
162        return it->second;
163    return -1; // Not found
164}
165
166bool
167CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type,
168                            DataBlock*& data_ptr)
169{
170    assert(address == line_address(address));
171    DPRINTF(RubyCache, "address: %s\n", address);
172    Index cacheSet = addressToCacheSet(address);
173    int loc = findTagInSet(cacheSet, address);
174    if (loc != -1) {
175        // Do we even have a tag match?
176        AbstractCacheEntry* entry = m_cache[cacheSet][loc];
177        m_replacementPolicy_ptr->
178            touch(cacheSet, loc, g_eventQueue_ptr->getTime());
179        data_ptr = &(entry->getDataBlk());
180
181        if (entry->m_Permission == AccessPermission_Read_Write) {
182            return true;
183        }
184        if ((entry->m_Permission == AccessPermission_Read_Only) &&
185            (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
186            return true;
187        }
188        // The line must not be accessible
189    }
190    data_ptr = NULL;
191    return false;
192}
193
194bool
195CacheMemory::testCacheAccess(const Address& address, RubyRequestType type,
196                             DataBlock*& data_ptr)
197{
198    assert(address == line_address(address));
199    DPRINTF(RubyCache, "address: %s\n", address);
200    Index cacheSet = addressToCacheSet(address);
201    int loc = findTagInSet(cacheSet, address);
202
203    if (loc != -1) {
204        // Do we even have a tag match?
205        AbstractCacheEntry* entry = m_cache[cacheSet][loc];
206        m_replacementPolicy_ptr->
207            touch(cacheSet, loc, g_eventQueue_ptr->getTime());
208        data_ptr = &(entry->getDataBlk());
209
210        return m_cache[cacheSet][loc]->m_Permission !=
211            AccessPermission_NotPresent;
212    }
213
214    data_ptr = NULL;
215    return false;
216}
217
218// tests to see if an address is present in the cache
219bool
220CacheMemory::isTagPresent(const Address& address) const
221{
222    assert(address == line_address(address));
223    Index cacheSet = addressToCacheSet(address);
224    int loc = findTagInSet(cacheSet, address);
225
226    if (loc == -1) {
227        // We didn't find the tag
228        DPRINTF(RubyCache, "No tag match for address: %s\n", address);
229        return false;
230    }
231    DPRINTF(RubyCache, "address: %s found\n", address);
232    return true;
233}
234
235// Returns true if there is:
236//   a) a tag match on this address or there is
237//   b) an unused line in the same cache "way"
238bool
239CacheMemory::cacheAvail(const Address& address) const
240{
241    assert(address == line_address(address));
242
243    Index cacheSet = addressToCacheSet(address);
244
245    for (int i = 0; i < m_cache_assoc; i++) {
246        AbstractCacheEntry* entry = m_cache[cacheSet][i];
247        if (entry != NULL) {
248            if (entry->m_Address == address ||
249                entry->m_Permission == AccessPermission_NotPresent) {
250                // Already in the cache or we found an empty entry
251                return true;
252            }
253        } else {
254            return true;
255        }
256    }
257    return false;
258}
259
260AbstractCacheEntry*
261CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
262{
263    assert(address == line_address(address));
264    assert(!isTagPresent(address));
265    assert(cacheAvail(address));
266    DPRINTF(RubyCache, "address: %s\n", address);
267
268    // Find the first open slot
269    Index cacheSet = addressToCacheSet(address);
270    std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
271    for (int i = 0; i < m_cache_assoc; i++) {
272        if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
273            set[i] = entry;  // Init entry
274            set[i]->m_Address = address;
275            set[i]->m_Permission = AccessPermission_Invalid;
276            DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
277                    address);
278            set[i]->m_locked = -1;
279            m_tag_index[address] = i;
280
281            m_replacementPolicy_ptr->
282                touch(cacheSet, i, g_eventQueue_ptr->getTime());
283
284            return entry;
285        }
286    }
287    panic("Allocate didn't find an available entry");
288}
289
290void
291CacheMemory::deallocate(const Address& address)
292{
293    assert(address == line_address(address));
294    assert(isTagPresent(address));
295    DPRINTF(RubyCache, "address: %s\n", address);
296    Index cacheSet = addressToCacheSet(address);
297    int loc = findTagInSet(cacheSet, address);
298    if (loc != -1) {
299        delete m_cache[cacheSet][loc];
300        m_cache[cacheSet][loc] = NULL;
301        m_tag_index.erase(address);
302    }
303}
304
305// Returns with the physical address of the conflicting cache line
306Address
307CacheMemory::cacheProbe(const Address& address) const
308{
309    assert(address == line_address(address));
310    assert(!cacheAvail(address));
311
312    Index cacheSet = addressToCacheSet(address);
313    return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
314        m_Address;
315}
316
317// looks an address up in the cache
318AbstractCacheEntry*
319CacheMemory::lookup(const Address& address)
320{
321    assert(address == line_address(address));
322    Index cacheSet = addressToCacheSet(address);
323    int loc = findTagInSet(cacheSet, address);
324    if(loc == -1) return NULL;
325    return m_cache[cacheSet][loc];
326}
327
328// looks an address up in the cache
329const AbstractCacheEntry*
330CacheMemory::lookup(const Address& address) const
331{
332    assert(address == line_address(address));
333    Index cacheSet = addressToCacheSet(address);
334    int loc = findTagInSet(cacheSet, address);
335    if(loc == -1) return NULL;
336    return m_cache[cacheSet][loc];
337}
338
339// Sets the most recently used bit for a cache block
340void
341CacheMemory::setMRU(const Address& address)
342{
343    Index cacheSet = addressToCacheSet(address);
344    int loc = findTagInSet(cacheSet, address);
345
346    if(loc != -1)
347        m_replacementPolicy_ptr->
348             touch(cacheSet, loc, g_eventQueue_ptr->getTime());
349}
350
351void
352CacheMemory::profileMiss(const RubyRequest& msg)
353{
354    m_profiler_ptr->addCacheStatSample(msg.getType(),
355                                       msg.getAccessMode(),
356                                       msg.getPrefetch());
357}
358
359void
360CacheMemory::profileGenericRequest(GenericRequestType requestType,
361                                   RubyAccessMode accessType,
362                                   PrefetchBit pfBit)
363{
364    m_profiler_ptr->addGenericStatSample(requestType,
365                                         accessType,
366                                         pfBit);
367}
368
369void
370CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
371{
372    uint64 warmedUpBlocks = 0;
373    uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
374                                                  * (uint64)m_cache_assoc;
375
376    for (int i = 0; i < m_cache_num_sets; i++) {
377        for (int j = 0; j < m_cache_assoc; j++) {
378            if (m_cache[i][j] != NULL) {
379                AccessPermission perm = m_cache[i][j]->m_Permission;
380                RubyRequestType request_type = RubyRequestType_NULL;
381                if (perm == AccessPermission_Read_Only) {
382                    if (m_is_instruction_only_cache) {
383                        request_type = RubyRequestType_IFETCH;
384                    } else {
385                        request_type = RubyRequestType_LD;
386                    }
387                } else if (perm == AccessPermission_Read_Write) {
388                    request_type = RubyRequestType_ST;
389                }
390
391                if (request_type != RubyRequestType_NULL) {
392                    tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(),
393                                  0, request_type,
394                                  m_replacementPolicy_ptr->getLastAccess(i, j),
395                                  m_cache[i][j]->getDataBlk());
396                    warmedUpBlocks++;
397                }
398            }
399        }
400    }
401
402    DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
403            "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
404            (uint64)m_cache_num_sets * (uint64)m_cache_assoc,
405            (float(warmedUpBlocks)/float(totalBlocks))*100.0);
406}
407
408void
409CacheMemory::print(ostream& out) const
410{
411    out << "Cache dump: " << m_cache_name << endl;
412    for (int i = 0; i < m_cache_num_sets; i++) {
413        for (int j = 0; j < m_cache_assoc; j++) {
414            if (m_cache[i][j] != NULL) {
415                out << "  Index: " << i
416                    << " way: " << j
417                    << " entry: " << *m_cache[i][j] << endl;
418            } else {
419                out << "  Index: " << i
420                    << " way: " << j
421                    << " entry: NULL" << endl;
422            }
423        }
424    }
425}
426
427void
428CacheMemory::printData(ostream& out) const
429{
430    out << "printData() not supported" << endl;
431}
432
433void
434CacheMemory::clearStats() const
435{
436    m_profiler_ptr->clearStats();
437}
438
439void
440CacheMemory::printStats(ostream& out) const
441{
442    m_profiler_ptr->printStats(out);
443}
444
445void
446CacheMemory::setLocked(const Address& address, int context)
447{
448    DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
449    assert(address == line_address(address));
450    Index cacheSet = addressToCacheSet(address);
451    int loc = findTagInSet(cacheSet, address);
452    assert(loc != -1);
453    m_cache[cacheSet][loc]->m_locked = context;
454}
455
456void
457CacheMemory::clearLocked(const Address& address)
458{
459    DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
460    assert(address == line_address(address));
461    Index cacheSet = addressToCacheSet(address);
462    int loc = findTagInSet(cacheSet, address);
463    assert(loc != -1);
464    m_cache[cacheSet][loc]->m_locked = -1;
465}
466
467bool
468CacheMemory::isLocked(const Address& address, int context)
469{
470    assert(address == line_address(address));
471    Index cacheSet = addressToCacheSet(address);
472    int loc = findTagInSet(cacheSet, address);
473    assert(loc != -1);
474    DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
475            address, m_cache[cacheSet][loc]->m_locked, context);
476    return m_cache[cacheSet][loc]->m_locked == context;
477}
478
479