CacheRecorder.cc revision 10163
17008Snate@binkert.org/* 27008Snate@binkert.org * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood 37008Snate@binkert.org * Copyright (c) 2010 Advanced Micro Devices, Inc. 47008Snate@binkert.org * All rights reserved. 57008Snate@binkert.org * 67008Snate@binkert.org * Redistribution and use in source and binary forms, with or without 77008Snate@binkert.org * modification, are permitted provided that the following conditions are 87008Snate@binkert.org * met: redistributions of source code must retain the above copyright 97008Snate@binkert.org * notice, this list of conditions and the following disclaimer; 107008Snate@binkert.org * redistributions in binary form must reproduce the above copyright 117008Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 127008Snate@binkert.org * documentation and/or other materials provided with the distribution; 137008Snate@binkert.org * neither the name of the copyright holders nor the names of its 147008Snate@binkert.org * contributors may be used to endorse or promote products derived from 157008Snate@binkert.org * this software without specific prior written permission. 167008Snate@binkert.org * 177008Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 187008Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 197008Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 207008Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 217008Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 227008Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 237008Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 247008Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 257008Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 267008Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 277008Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 286285Snate@binkert.org */ 2910472Sandreas.hansson@arm.com 3010472Sandreas.hansson@arm.com#include "debug/RubyCacheTrace.hh" 3110518Snilay@cs.wisc.edu#include "mem/ruby/recorder/CacheRecorder.hh" 3210518Snilay@cs.wisc.edu#include "mem/ruby/system/Sequencer.hh" 338232Snate@binkert.org#include "mem/ruby/system/System.hh" 349104Shestness@cs.utexas.edu 357039Snate@binkert.orgusing namespace std; 367039Snate@binkert.org 376285Snate@binkert.orgvoid 3810518Snilay@cs.wisc.eduTraceRecord::print(ostream& out) const 396285Snate@binkert.org{ 406876Ssteve.reinhardt@amd.com out << "[TraceRecord: Node, " << m_cntrl_id << ", " 4110919Sbrandon.potter@amd.com << m_data_address << ", " << m_pc_address << ", " 4210919Sbrandon.potter@amd.com << m_type << ", Time: " << m_time << "]"; 4310919Sbrandon.potter@amd.com} 4410706Spower.jg@gmail.com 4510706Spower.jg@gmail.comCacheRecorder::CacheRecorder() 4610913Sandreas.sandberg@arm.com : m_uncompressed_trace(NULL), 476285Snate@binkert.org m_uncompressed_trace_size(0), 4810518Snilay@cs.wisc.edu m_block_size_bytes(RubySystem::getBlockSizeBytes()) 496285Snate@binkert.org{ 506285Snate@binkert.org} 517039Snate@binkert.org 527039Snate@binkert.orgCacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, 536285Snate@binkert.org uint64_t uncompressed_trace_size, 5410518Snilay@cs.wisc.edu std::vector<Sequencer*>& seq_map, 5510518Snilay@cs.wisc.edu uint64_t block_size_bytes) 5610518Snilay@cs.wisc.edu : m_uncompressed_trace(uncompressed_trace), 5710518Snilay@cs.wisc.edu m_uncompressed_trace_size(uncompressed_trace_size), 587039Snate@binkert.org m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), 597039Snate@binkert.org m_records_flushed(0), m_block_size_bytes(block_size_bytes) 6010519Snilay@cs.wisc.edu{ 6110519Snilay@cs.wisc.edu if (m_uncompressed_trace != NULL) { 626285Snate@binkert.org if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { 636285Snate@binkert.org // Block sizes larger than when the trace was recorded are not 6410518Snilay@cs.wisc.edu // supported, as we cannot reliably turn accesses to smaller blocks 6510518Snilay@cs.wisc.edu // into larger ones. 6610518Snilay@cs.wisc.edu panic("Recorded cache block size (%d) < current block size (%d) !!", 6710518Snilay@cs.wisc.edu m_block_size_bytes, RubySystem::getBlockSizeBytes()); 6810518Snilay@cs.wisc.edu } 6910518Snilay@cs.wisc.edu } 7010518Snilay@cs.wisc.edu} 7110518Snilay@cs.wisc.edu 7210518Snilay@cs.wisc.eduCacheRecorder::~CacheRecorder() 7310518Snilay@cs.wisc.edu{ 7410518Snilay@cs.wisc.edu if (m_uncompressed_trace != NULL) { 7510518Snilay@cs.wisc.edu delete [] m_uncompressed_trace; 7610518Snilay@cs.wisc.edu m_uncompressed_trace = NULL; 7710518Snilay@cs.wisc.edu } 7810706Spower.jg@gmail.com m_seq_map.clear(); 7910706Spower.jg@gmail.com} 8010706Spower.jg@gmail.com 8110919Sbrandon.potter@amd.comvoid 8210518Snilay@cs.wisc.eduCacheRecorder::enqueueNextFlushRequest() 8310518Snilay@cs.wisc.edu{ 8410518Snilay@cs.wisc.edu if (m_records_flushed < m_records.size()) { 8510518Snilay@cs.wisc.edu TraceRecord* rec = m_records[m_records_flushed]; 8610518Snilay@cs.wisc.edu m_records_flushed++; 8710518Snilay@cs.wisc.edu Request* req = new Request(rec->m_data_address, 8810518Snilay@cs.wisc.edu m_block_size_bytes, 0, 8910518Snilay@cs.wisc.edu Request::funcMasterId); 9010518Snilay@cs.wisc.edu MemCmd::Command requestType = MemCmd::FlushReq; 9110518Snilay@cs.wisc.edu Packet *pkt = new Packet(req, requestType); 9210518Snilay@cs.wisc.edu 9310518Snilay@cs.wisc.edu Sequencer* m_sequencer_ptr = m_seq_map[rec->m_cntrl_id]; 9410518Snilay@cs.wisc.edu assert(m_sequencer_ptr != NULL); 9510518Snilay@cs.wisc.edu m_sequencer_ptr->makeRequest(pkt); 9610518Snilay@cs.wisc.edu 9710518Snilay@cs.wisc.edu DPRINTF(RubyCacheTrace, "Flushing %s\n", *rec); 9810518Snilay@cs.wisc.edu } 9910518Snilay@cs.wisc.edu} 10010518Snilay@cs.wisc.edu 10110518Snilay@cs.wisc.eduvoid 10210518Snilay@cs.wisc.eduCacheRecorder::enqueueNextFetchRequest() 10310518Snilay@cs.wisc.edu{ 10410518Snilay@cs.wisc.edu if (m_bytes_read < m_uncompressed_trace_size) { 10510518Snilay@cs.wisc.edu TraceRecord* traceRecord = (TraceRecord*) (m_uncompressed_trace + 10610518Snilay@cs.wisc.edu m_bytes_read); 10710518Snilay@cs.wisc.edu 10810518Snilay@cs.wisc.edu DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); 10910518Snilay@cs.wisc.edu 11010518Snilay@cs.wisc.edu for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; 11110518Snilay@cs.wisc.edu rec_bytes_read += RubySystem::getBlockSizeBytes()) { 11210518Snilay@cs.wisc.edu Request* req = new Request(); 11310518Snilay@cs.wisc.edu MemCmd::Command requestType; 11410518Snilay@cs.wisc.edu 11510518Snilay@cs.wisc.edu if (traceRecord->m_type == RubyRequestType_LD) { 11610518Snilay@cs.wisc.edu requestType = MemCmd::ReadReq; 11710518Snilay@cs.wisc.edu req->setPhys(traceRecord->m_data_address + rec_bytes_read, 11810518Snilay@cs.wisc.edu RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 11910518Snilay@cs.wisc.edu } else if (traceRecord->m_type == RubyRequestType_IFETCH) { 12010518Snilay@cs.wisc.edu requestType = MemCmd::ReadReq; 12110518Snilay@cs.wisc.edu req->setPhys(traceRecord->m_data_address + rec_bytes_read, 12210518Snilay@cs.wisc.edu RubySystem::getBlockSizeBytes(), 12310518Snilay@cs.wisc.edu Request::INST_FETCH, Request::funcMasterId); 12410518Snilay@cs.wisc.edu } else { 12510518Snilay@cs.wisc.edu requestType = MemCmd::WriteReq; 12610518Snilay@cs.wisc.edu req->setPhys(traceRecord->m_data_address + rec_bytes_read, 12710518Snilay@cs.wisc.edu RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); 12810518Snilay@cs.wisc.edu } 12910518Snilay@cs.wisc.edu 13010518Snilay@cs.wisc.edu Packet *pkt = new Packet(req, requestType); 13110518Snilay@cs.wisc.edu pkt->dataStatic(traceRecord->m_data + rec_bytes_read); 13210518Snilay@cs.wisc.edu 13310518Snilay@cs.wisc.edu Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; 13410518Snilay@cs.wisc.edu assert(m_sequencer_ptr != NULL); 13510518Snilay@cs.wisc.edu m_sequencer_ptr->makeRequest(pkt); 13610518Snilay@cs.wisc.edu } 13710518Snilay@cs.wisc.edu 13810518Snilay@cs.wisc.edu m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); 13910518Snilay@cs.wisc.edu m_records_read++; 14010518Snilay@cs.wisc.edu } 14110518Snilay@cs.wisc.edu} 14210713Sandreas.hansson@arm.com 14310518Snilay@cs.wisc.eduvoid 14410518Snilay@cs.wisc.eduCacheRecorder::addRecord(int cntrl, const physical_address_t data_addr, 14510518Snilay@cs.wisc.edu const physical_address_t pc_addr, 14610518Snilay@cs.wisc.edu RubyRequestType type, Time time, DataBlock& data) 14710518Snilay@cs.wisc.edu{ 14810518Snilay@cs.wisc.edu TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + 14910518Snilay@cs.wisc.edu m_block_size_bytes); 15010518Snilay@cs.wisc.edu rec->m_cntrl_id = cntrl; 15110518Snilay@cs.wisc.edu rec->m_time = time; 15210913Sandreas.sandberg@arm.com rec->m_data_address = data_addr; 15310518Snilay@cs.wisc.edu rec->m_pc_address = pc_addr; 15410518Snilay@cs.wisc.edu rec->m_type = type; 15510518Snilay@cs.wisc.edu memcpy(rec->m_data, data.getData(0, m_block_size_bytes), 15610518Snilay@cs.wisc.edu m_block_size_bytes); 15710913Sandreas.sandberg@arm.com 15810518Snilay@cs.wisc.edu m_records.push_back(rec); 15910518Snilay@cs.wisc.edu} 16010518Snilay@cs.wisc.edu 16110518Snilay@cs.wisc.eduuint64 16210913Sandreas.sandberg@arm.comCacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) 16310913Sandreas.sandberg@arm.com{ 16410518Snilay@cs.wisc.edu std::sort(m_records.begin(), m_records.end(), compareTraceRecords); 16510518Snilay@cs.wisc.edu 16610518Snilay@cs.wisc.edu int size = m_records.size(); 16710518Snilay@cs.wisc.edu uint64 current_size = 0; 16810518Snilay@cs.wisc.edu int record_size = sizeof(TraceRecord) + m_block_size_bytes; 16910518Snilay@cs.wisc.edu 17010913Sandreas.sandberg@arm.com for (int i = 0; i < size; ++i) { 17110518Snilay@cs.wisc.edu // Determine if we need to expand the buffer size 17210518Snilay@cs.wisc.edu if (current_size + record_size > total_size) { 17310518Snilay@cs.wisc.edu uint8_t* new_buf = new (nothrow) uint8_t[total_size * 2]; 17410913Sandreas.sandberg@arm.com if (new_buf == NULL) { 17510518Snilay@cs.wisc.edu fatal("Unable to allocate buffer of size %s\n", 17610913Sandreas.sandberg@arm.com total_size * 2); 17710913Sandreas.sandberg@arm.com } 17810913Sandreas.sandberg@arm.com total_size = total_size * 2; 17910518Snilay@cs.wisc.edu uint8_t* old_buf = *buf; 18010518Snilay@cs.wisc.edu memcpy(new_buf, old_buf, current_size); 18110518Snilay@cs.wisc.edu *buf = new_buf; 18210518Snilay@cs.wisc.edu delete [] old_buf; 18310518Snilay@cs.wisc.edu } 18410518Snilay@cs.wisc.edu 18510518Snilay@cs.wisc.edu // Copy the current record into the buffer 18610518Snilay@cs.wisc.edu memcpy(&((*buf)[current_size]), m_records[i], record_size); 18710518Snilay@cs.wisc.edu current_size += record_size; 18810518Snilay@cs.wisc.edu 18910518Snilay@cs.wisc.edu free(m_records[i]); 19010518Snilay@cs.wisc.edu m_records[i] = NULL; 19110518Snilay@cs.wisc.edu } 19210706Spower.jg@gmail.com 19310706Spower.jg@gmail.com m_records.clear(); 19410919Sbrandon.potter@amd.com return current_size; 19510706Spower.jg@gmail.com} 19610706Spower.jg@gmail.com