CacheRecorder.cc revision 10991:72781d410e48
11376Sbinkertn@umich.edu/* 21376Sbinkertn@umich.edu * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood 31376Sbinkertn@umich.edu * Copyright (c) 2010 Advanced Micro Devices, Inc. 41376Sbinkertn@umich.edu * All rights reserved. 51376Sbinkertn@umich.edu * 61376Sbinkertn@umich.edu * Redistribution and use in source and binary forms, with or without 71376Sbinkertn@umich.edu * modification, are permitted provided that the following conditions are 81376Sbinkertn@umich.edu * met: redistributions of source code must retain the above copyright 91376Sbinkertn@umich.edu * notice, this list of conditions and the following disclaimer; 101376Sbinkertn@umich.edu * redistributions in binary form must reproduce the above copyright 111376Sbinkertn@umich.edu * notice, this list of conditions and the following disclaimer in the 121376Sbinkertn@umich.edu * documentation and/or other materials provided with the distribution; 131376Sbinkertn@umich.edu * neither the name of the copyright holders nor the names of its 141376Sbinkertn@umich.edu * contributors may be used to endorse or promote products derived from 151376Sbinkertn@umich.edu * this software without specific prior written permission. 161376Sbinkertn@umich.edu * 171376Sbinkertn@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 181376Sbinkertn@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 191376Sbinkertn@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 201376Sbinkertn@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 211376Sbinkertn@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 221376Sbinkertn@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 231376Sbinkertn@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 241376Sbinkertn@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 251376Sbinkertn@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 261376Sbinkertn@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 271376Sbinkertn@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 281376Sbinkertn@umich.edu */ 291376Sbinkertn@umich.edu 301376Sbinkertn@umich.edu#include "debug/RubyCacheTrace.hh" 311385Sbinkertn@umich.edu#include "mem/ruby/system/CacheRecorder.hh" 321376Sbinkertn@umich.edu#include "mem/ruby/system/Sequencer.hh" 331816Sbinkertn@umich.edu#include "mem/ruby/system/System.hh" 341376Sbinkertn@umich.edu 351816Sbinkertn@umich.eduusing namespace std; 361376Sbinkertn@umich.edu 371385Sbinkertn@umich.eduvoid 381385Sbinkertn@umich.eduTraceRecord::print(ostream& out) const 391385Sbinkertn@umich.edu{ 401385Sbinkertn@umich.edu out << "[TraceRecord: Node, " << m_cntrl_id << ", " 411385Sbinkertn@umich.edu << m_data_address << ", " << m_pc_address << ", " 421385Sbinkertn@umich.edu << m_type << ", Time: " << m_time << "]"; 431385Sbinkertn@umich.edu} 441816Sbinkertn@umich.edu 451816Sbinkertn@umich.eduCacheRecorder::CacheRecorder() 461816Sbinkertn@umich.edu : m_uncompressed_trace(NULL), 471816Sbinkertn@umich.edu m_uncompressed_trace_size(0), 481816Sbinkertn@umich.edu m_block_size_bytes(RubySystem::getBlockSizeBytes()) 491816Sbinkertn@umich.edu{ 501816Sbinkertn@umich.edu} 511816Sbinkertn@umich.edu 521816Sbinkertn@umich.eduCacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, 531816Sbinkertn@umich.edu uint64_t uncompressed_trace_size, 541816Sbinkertn@umich.edu std::vector<Sequencer*>& seq_map, 551816Sbinkertn@umich.edu uint64_t block_size_bytes) 561816Sbinkertn@umich.edu : m_uncompressed_trace(uncompressed_trace), 571816Sbinkertn@umich.edu m_uncompressed_trace_size(uncompressed_trace_size), 581816Sbinkertn@umich.edu m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), 591816Sbinkertn@umich.edu m_records_flushed(0), m_block_size_bytes(block_size_bytes) 601816Sbinkertn@umich.edu{ 611816Sbinkertn@umich.edu if (m_uncompressed_trace != NULL) { 621816Sbinkertn@umich.edu if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { 631816Sbinkertn@umich.edu // Block sizes larger than when the trace was recorded are not 641816Sbinkertn@umich.edu // supported, as we cannot reliably turn accesses to smaller blocks 651816Sbinkertn@umich.edu // into larger ones. 661816Sbinkertn@umich.edu panic("Recorded cache block size (%d) < current block size (%d) !!", 671816Sbinkertn@umich.edu m_block_size_bytes, RubySystem::getBlockSizeBytes()); 681816Sbinkertn@umich.edu } 691816Sbinkertn@umich.edu } 701816Sbinkertn@umich.edu} 711816Sbinkertn@umich.edu 721816Sbinkertn@umich.eduCacheRecorder::~CacheRecorder() 731816Sbinkertn@umich.edu{ 741816Sbinkertn@umich.edu if (m_uncompressed_trace != NULL) { 751816Sbinkertn@umich.edu delete [] m_uncompressed_trace; 761385Sbinkertn@umich.edu m_uncompressed_trace = NULL; 771376Sbinkertn@umich.edu } 781376Sbinkertn@umich.edu m_seq_map.clear(); 791376Sbinkertn@umich.edu} 801602Sbinkertn@umich.edu 811376Sbinkertn@umich.eduvoid 821376Sbinkertn@umich.eduCacheRecorder::enqueueNextFlushRequest() 831376Sbinkertn@umich.edu{ 841376Sbinkertn@umich.edu if (m_records_flushed < m_records.size()) { 851916Sbinkertn@umich.edu TraceRecord* rec = m_records[m_records_flushed]; 861376Sbinkertn@umich.edu m_records_flushed++; 871376Sbinkertn@umich.edu Request* req = new Request(rec->m_data_address, 881602Sbinkertn@umich.edu m_block_size_bytes, 0, 891916Sbinkertn@umich.edu Request::funcMasterId); 901376Sbinkertn@umich.edu MemCmd::Command requestType = MemCmd::FlushReq; 911376Sbinkertn@umich.edu Packet *pkt = new Packet(req, requestType); 921376Sbinkertn@umich.edu 931376Sbinkertn@umich.edu Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id]; 941376Sbinkertn@umich.edu assert(m_sequencer_ptr != NULL); 951376Sbinkertn@umich.edu m_sequencer_ptr->makeRequest(pkt); 961376Sbinkertn@umich.edu 971376Sbinkertn@umich.edu DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec); 981376Sbinkertn@umich.edu } else { 991948Sbinkertn@umich.edu DPRINTF(RubyCacheTrace, "Flushed all %d records\n", m_records_flushed); 1001376Sbinkertn@umich.edu } 1011376Sbinkertn@umich.edu} 1021376Sbinkertn@umich.edu 1031916Sbinkertn@umich.eduvoid 1041376Sbinkertn@umich.eduCacheRecorder::enqueueNextFetchRequest() 1051376Sbinkertn@umich.edu{ 1061376Sbinkertn@umich.edu if (m_bytes_read < m_uncompressed_trace_size) { 1071376Sbinkertn@umich.edu TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace + 1081376Sbinkertn@umich.edu m_bytes_read); 1091376Sbinkertn@umich.edu 1101376Sbinkertn@umich.edu DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); 1111916Sbinkertn@umich.edu 1121881Sbinkertn@umich.edu for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; 1131881Sbinkertn@umich.edu rec_bytes_read += RubySystem::getBlockSizeBytes()) { 1141881Sbinkertn@umich.edu Request* req = nullptr; 1151916Sbinkertn@umich.edu MemCmd::Command requestType; 1161948Sbinkertn@umich.edu 1171881Sbinkertn@umich.edu if (traceRecord->m_type == RubyRequestType_LD) { 1181381Sbinkertn@umich.edu requestType = MemCmd::ReadReq; 1191881Sbinkertn@umich.edu req = new Request(traceRecord->m_data_address + rec_bytes_read, 1201881Sbinkertn@umich.edu RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 1211381Sbinkertn@umich.edu } else if (traceRecord->m_type == RubyRequestType_IFETCH) { 1221376Sbinkertn@umich.edu requestType = MemCmd::ReadReq; 1231916Sbinkertn@umich.edu req = new Request(traceRecord->m_data_address + rec_bytes_read, 1241916Sbinkertn@umich.edu RubySystem::getBlockSizeBytes(), 1251381Sbinkertn@umich.edu Request::INST_FETCH, Request::funcMasterId); 1261376Sbinkertn@umich.edu } else { 1271381Sbinkertn@umich.edu requestType = MemCmd::WriteReq; 1281376Sbinkertn@umich.edu req = new Request(traceRecord->m_data_address + rec_bytes_read, 1291381Sbinkertn@umich.edu RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 1301376Sbinkertn@umich.edu } 1311376Sbinkertn@umich.edu 1321602Sbinkertn@umich.edu Packet *pkt = new Packet(req, requestType); 1331602Sbinkertn@umich.edu pkt->dataStatic(traceRecord->m_data + rec_bytes_read); 1341381Sbinkertn@umich.edu 1351376Sbinkertn@umich.edu Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; 1361948Sbinkertn@umich.edu assert(m_sequencer_ptr != NULL); 1371948Sbinkertn@umich.edu m_sequencer_ptr->makeRequest(pkt); 1381381Sbinkertn@umich.edu } 1391381Sbinkertn@umich.edu 1401916Sbinkertn@umich.edu m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); 1411916Sbinkertn@umich.edu m_records_read++; 1421916Sbinkertn@umich.edu } else { 1431916Sbinkertn@umich.edu DPRINTF(RubyCacheTrace, "Fetched all %d records\n", m_records_read); 1441381Sbinkertn@umich.edu } 1451376Sbinkertn@umich.edu} 1461376Sbinkertn@umich.edu 1471881Sbinkertn@umich.eduvoid 1481881Sbinkertn@umich.eduCacheRecorder::addRecord(int cntrl, const physical_address_t data_addr, 1491381Sbinkertn@umich.edu const physical_address_t pc_addr, 1501376Sbinkertn@umich.edu RubyRequestType type, Tick time, DataBlock& data) 1511376Sbinkertn@umich.edu{ 1521376Sbinkertn@umich.edu TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + 1531881Sbinkertn@umich.edu m_block_size_bytes); 1541881Sbinkertn@umich.edu rec->m_cntrl_id = cntrl; 1551881Sbinkertn@umich.edu rec->m_time = time; 1561881Sbinkertn@umich.edu rec->m_data_address = data_addr; 1571881Sbinkertn@umich.edu rec->m_pc_address = pc_addr; 1581948Sbinkertn@umich.edu rec->m_type = type; 1591385Sbinkertn@umich.edu memcpy(rec->m_data, data.getData(0, m_block_size_bytes), 1601385Sbinkertn@umich.edu m_block_size_bytes); 1611916Sbinkertn@umich.edu 1621916Sbinkertn@umich.edu m_records.push_back(rec); 1631881Sbinkertn@umich.edu} 1641376Sbinkertn@umich.edu 1651881Sbinkertn@umich.eduuint64 1661881Sbinkertn@umich.eduCacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) 1671376Sbinkertn@umich.edu{ 1681881Sbinkertn@umich.edu std::sort(m_records.begin(), m_records.end(), compareTraceRecords); 1691881Sbinkertn@umich.edu 1701881Sbinkertn@umich.edu int size = m_records.size(); 1711881Sbinkertn@umich.edu uint64 current_size = 0; 1721881Sbinkertn@umich.edu int record_size = sizeof(TraceRecord) + m_block_size_bytes; 1731881Sbinkertn@umich.edu 1741376Sbinkertn@umich.edu for (int i = 0; i < size; ++i) { 1751881Sbinkertn@umich.edu // Determine if we need to expand the buffer size 1761881Sbinkertn@umich.edu if (current_size + record_size > total_size) { 1771376Sbinkertn@umich.edu uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2]; 1781376Sbinkertn@umich.edu if (new_buf == NULL) { 1791881Sbinkertn@umich.edu fatal("Unable to allocate buffer of size %s\n", 1801881Sbinkertn@umich.edu total_size * 2); 1811881Sbinkertn@umich.edu } 1821881Sbinkertn@umich.edu total_size = total_size * 2; 1831881Sbinkertn@umich.edu uint8_t* old_buf = *buf; 1841881Sbinkertn@umich.edu memcpy(new_buf, old_buf, current_size); 1851881Sbinkertn@umich.edu *buf = new_buf; 1861376Sbinkertn@umich.edu delete [] old_buf; 1871376Sbinkertn@umich.edu } 1881376Sbinkertn@umich.edu 1891881Sbinkertn@umich.edu // Copy the current record into the buffer 1901881Sbinkertn@umich.edu memcpy(&((*buf)[current_size]), m_records[i], record_size); 1911376Sbinkertn@umich.edu current_size += record_size; 1921881Sbinkertn@umich.edu 1931881Sbinkertn@umich.edu free(m_records[i]); 1941376Sbinkertn@umich.edu m_records[i] = NULL; 1951376Sbinkertn@umich.edu } 1961376Sbinkertn@umich.edu 1971881Sbinkertn@umich.edu m_records.clear(); 1981881Sbinkertn@umich.edu return current_size; 1991881Sbinkertn@umich.edu} 2001881Sbinkertn@umich.edu