CacheRecorder.cc revision 11049
16145SN/A/* 28683SN/A * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood 38683SN/A * Copyright (c) 2010 Advanced Micro Devices, Inc. 46145SN/A * All rights reserved. 56145SN/A * 66145SN/A * Redistribution and use in source and binary forms, with or without 76145SN/A * modification, are permitted provided that the following conditions are 86145SN/A * met: redistributions of source code must retain the above copyright 96145SN/A * notice, this list of conditions and the following disclaimer; 106145SN/A * redistributions in binary form must reproduce the above copyright 116145SN/A * notice, this list of conditions and the following disclaimer in the 126145SN/A * documentation and/or other materials provided with the distribution; 136145SN/A * neither the name of the copyright holders nor the names of its 146145SN/A * contributors may be used to endorse or promote products derived from 156145SN/A * this software without specific prior written permission. 166145SN/A * 176145SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 186145SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 196145SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 206145SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 216145SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 226145SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 236145SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 246145SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 256145SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 266145SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 276145SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 286145SN/A */ 296145SN/A 308683SN/A#include "debug/RubyCacheTrace.hh" 3110301Snilay@cs.wisc.edu#include "mem/ruby/system/CacheRecorder.hh" 328683SN/A#include "mem/ruby/system/Sequencer.hh" 338683SN/A#include "mem/ruby/system/System.hh" 346145SN/A 357055SN/Ausing namespace std; 367055SN/A 377054SN/Avoid 388683SN/ATraceRecord::print(ostream& out) const 396145SN/A{ 408683SN/A out << "[TraceRecord: Node, " << m_cntrl_id << ", " 418683SN/A << m_data_address << ", " << m_pc_address << ", " 428683SN/A << m_type << ", Time: " << m_time << "]"; 438683SN/A} 448683SN/A 458683SN/ACacheRecorder::CacheRecorder() 468683SN/A : m_uncompressed_trace(NULL), 4710163SN/A m_uncompressed_trace_size(0), 4810163SN/A m_block_size_bytes(RubySystem::getBlockSizeBytes()) 498683SN/A{ 508683SN/A} 518683SN/A 528683SN/ACacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, 538683SN/A uint64_t uncompressed_trace_size, 5410163SN/A std::vector<Sequencer*>& seq_map, 5510163SN/A uint64_t block_size_bytes) 568683SN/A : m_uncompressed_trace(uncompressed_trace), 578683SN/A m_uncompressed_trace_size(uncompressed_trace_size), 588683SN/A m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), 5910163SN/A m_records_flushed(0), m_block_size_bytes(block_size_bytes) 608683SN/A{ 6111049Snilay@cs.wisc.edu if (m_uncompressed_trace != NULL) { 6211049Snilay@cs.wisc.edu if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { 6311049Snilay@cs.wisc.edu // Block sizes larger than when the trace was recorded are not 6411049Snilay@cs.wisc.edu // supported, as we cannot reliably turn accesses to smaller blocks 6511049Snilay@cs.wisc.edu // into larger ones. 6611049Snilay@cs.wisc.edu panic("Recorded cache block size (%d) < current block size (%d) !!", 6711049Snilay@cs.wisc.edu m_block_size_bytes, RubySystem::getBlockSizeBytes()); 6811049Snilay@cs.wisc.edu } 6911049Snilay@cs.wisc.edu } 708683SN/A} 718683SN/A 728683SN/ACacheRecorder::~CacheRecorder() 738683SN/A{ 748683SN/A if (m_uncompressed_trace != NULL) { 759627SN/A delete [] m_uncompressed_trace; 768683SN/A m_uncompressed_trace = NULL; 778683SN/A } 788683SN/A m_seq_map.clear(); 798683SN/A} 808683SN/A 818683SN/Avoid 828683SN/ACacheRecorder::enqueueNextFlushRequest() 838683SN/A{ 848683SN/A if (m_records_flushed < m_records.size()) { 858683SN/A TraceRecord* rec = m_records[m_records_flushed]; 868683SN/A m_records_flushed++; 878683SN/A Request* req = new Request(rec->m_data_address, 8810163SN/A m_block_size_bytes, 0, 898832SN/A Request::funcMasterId); 908683SN/A MemCmd::Command requestType = MemCmd::FlushReq; 918949SN/A Packet *pkt = new Packet(req, requestType); 928683SN/A 938683SN/A Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id]; 948683SN/A assert(m_sequencer_ptr != NULL); 958683SN/A m_sequencer_ptr->makeRequest(pkt); 968683SN/A 978683SN/A DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec); 9810991Stimothy.jones@cl.cam.ac.uk } else { 9910991Stimothy.jones@cl.cam.ac.uk DPRINTF(RubyCacheTrace, "Flushed all %d records\n", m_records_flushed); 1008683SN/A } 1018683SN/A} 1028683SN/A 1038683SN/Avoid 1048683SN/ACacheRecorder::enqueueNextFetchRequest() 1058683SN/A{ 1068683SN/A if (m_bytes_read < m_uncompressed_trace_size) { 1078683SN/A TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace + 1088683SN/A m_bytes_read); 1098683SN/A 1108683SN/A DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); 1118683SN/A 11210163SN/A for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; 11310163SN/A rec_bytes_read += RubySystem::getBlockSizeBytes()) { 11410653Sandreas.hansson@arm.com Request* req = nullptr; 11510163SN/A MemCmd::Command requestType; 11610163SN/A 11710163SN/A if (traceRecord->m_type == RubyRequestType_LD) { 11810163SN/A requestType = MemCmd::ReadReq; 11910653Sandreas.hansson@arm.com req = new Request(traceRecord->m_data_address + rec_bytes_read, 12010163SN/A RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 12110163SN/A } else if (traceRecord->m_type == RubyRequestType_IFETCH) { 12210163SN/A requestType = MemCmd::ReadReq; 12310653Sandreas.hansson@arm.com req = new Request(traceRecord->m_data_address + rec_bytes_read, 12410163SN/A RubySystem::getBlockSizeBytes(), 12510163SN/A Request::INST_FETCH, Request::funcMasterId); 12610163SN/A } else { 12710163SN/A requestType = MemCmd::WriteReq; 12810653Sandreas.hansson@arm.com req = new Request(traceRecord->m_data_address + rec_bytes_read, 12910163SN/A RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 13010163SN/A } 13110163SN/A 13210163SN/A Packet *pkt = new Packet(req, requestType); 13310163SN/A pkt->dataStatic(traceRecord->m_data + rec_bytes_read); 13410163SN/A 13510163SN/A Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; 13610163SN/A assert(m_sequencer_ptr != NULL); 13710163SN/A m_sequencer_ptr->makeRequest(pkt); 1388683SN/A } 1398683SN/A 14010163SN/A m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); 1418683SN/A m_records_read++; 14210991Stimothy.jones@cl.cam.ac.uk } else { 14310991Stimothy.jones@cl.cam.ac.uk DPRINTF(RubyCacheTrace, "Fetched all %d records\n", m_records_read); 1448683SN/A } 1458683SN/A} 1468683SN/A 1478683SN/Avoid 14811025Snilay@cs.wisc.eduCacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr, 14910302Snilay@cs.wisc.edu RubyRequestType type, Tick time, DataBlock& data) 1508683SN/A{ 1518683SN/A TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + 15210163SN/A m_block_size_bytes); 1538683SN/A rec->m_cntrl_id = cntrl; 1548683SN/A rec->m_time = time; 1558683SN/A rec->m_data_address = data_addr; 1568683SN/A rec->m_pc_address = pc_addr; 1578683SN/A rec->m_type = type; 15810163SN/A memcpy(rec->m_data, data.getData(0, m_block_size_bytes), 15910163SN/A m_block_size_bytes); 1608683SN/A 1617456SN/A m_records.push_back(rec); 1626145SN/A} 1636145SN/A 16411049Snilay@cs.wisc.eduuint64 16511049Snilay@cs.wisc.eduCacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) 1666145SN/A{ 1678683SN/A std::sort(m_records.begin(), m_records.end(), compareTraceRecords); 1688683SN/A 1698683SN/A int size = m_records.size(); 17011049Snilay@cs.wisc.edu uint64 current_size = 0; 17110163SN/A int record_size = sizeof(TraceRecord) + m_block_size_bytes; 1728683SN/A 1738683SN/A for (int i = 0; i < size; ++i) { 1748683SN/A // Determine if we need to expand the buffer size 1758683SN/A if (current_size + record_size > total_size) { 1768683SN/A uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2]; 1778683SN/A if (new_buf == NULL) { 1788683SN/A fatal("Unable to allocate buffer of size %s\n", 1798683SN/A total_size * 2); 1808683SN/A } 1818683SN/A total_size = total_size * 2; 1828683SN/A uint8_t* old_buf = *buf; 1838683SN/A memcpy(new_buf, old_buf, current_size); 1848683SN/A *buf = new_buf; 1858683SN/A delete [] old_buf; 1868683SN/A } 1878683SN/A 1888683SN/A // Copy the current record into the buffer 1898683SN/A memcpy(&((*buf)[current_size]), m_records[i], record_size); 1908683SN/A current_size += record_size; 1918683SN/A 1928683SN/A free(m_records[i]); 1938683SN/A m_records[i] = NULL; 1947054SN/A } 1956145SN/A 1967456SN/A m_records.clear(); 1978683SN/A return current_size; 1986145SN/A} 199