RubySystem.cc revision 8688
111986Sandreas.sandberg@arm.com/* 211986Sandreas.sandberg@arm.com * Copyright (c) 1999-2011 Mark D. Hill and David A. Wood 311986Sandreas.sandberg@arm.com * All rights reserved. 411986Sandreas.sandberg@arm.com * 511986Sandreas.sandberg@arm.com * Redistribution and use in source and binary forms, with or without 611986Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are 711986Sandreas.sandberg@arm.com * met: redistributions of source code must retain the above copyright 811986Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer; 911986Sandreas.sandberg@arm.com * redistributions in binary form must reproduce the above copyright 1011986Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer in the 1112037Sandreas.sandberg@arm.com * documentation and/or other materials provided with the distribution; 1211986Sandreas.sandberg@arm.com * neither the name of the copyright holders nor the names of its 1312391Sjason@lowepower.com * contributors may be used to endorse or promote products derived from 1414299Sbbruce@ucdavis.edu * this software without specific prior written permission. 1514299Sbbruce@ucdavis.edu * 1614299Sbbruce@ucdavis.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1714299Sbbruce@ucdavis.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1814299Sbbruce@ucdavis.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1911986Sandreas.sandberg@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2011986Sandreas.sandberg@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2112037Sandreas.sandberg@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2211986Sandreas.sandberg@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2311986Sandreas.sandberg@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2411986Sandreas.sandberg@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2512037Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 2612037Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2712037Sandreas.sandberg@arm.com */ 2812037Sandreas.sandberg@arm.com 2912037Sandreas.sandberg@arm.com#include <fcntl.h> 3012037Sandreas.sandberg@arm.com#include <zlib.h> 3111986Sandreas.sandberg@arm.com 3212037Sandreas.sandberg@arm.com#include <cstdio> 3312037Sandreas.sandberg@arm.com 3412037Sandreas.sandberg@arm.com#include "base/intmath.hh" 3512037Sandreas.sandberg@arm.com#include "base/output.hh" 3612037Sandreas.sandberg@arm.com#include "debug/RubySystem.hh" 3712037Sandreas.sandberg@arm.com#include "mem/ruby/common/Address.hh" 3812037Sandreas.sandberg@arm.com#include "mem/ruby/network/Network.hh" 3912037Sandreas.sandberg@arm.com#include "mem/ruby/profiler/Profiler.hh" 4012037Sandreas.sandberg@arm.com#include "mem/ruby/system/System.hh" 4112037Sandreas.sandberg@arm.com#include "sim/simulate.hh" 4212037Sandreas.sandberg@arm.com 4312037Sandreas.sandberg@arm.comusing namespace std; 4412037Sandreas.sandberg@arm.com 4512037Sandreas.sandberg@arm.comint RubySystem::m_random_seed; 4612037Sandreas.sandberg@arm.combool RubySystem::m_randomization; 4712037Sandreas.sandberg@arm.comTick RubySystem::m_clock; 4812037Sandreas.sandberg@arm.comint RubySystem::m_block_size_bytes; 4912037Sandreas.sandberg@arm.comint RubySystem::m_block_size_bits; 5012037Sandreas.sandberg@arm.comuint64 RubySystem::m_memory_size_bytes; 5112037Sandreas.sandberg@arm.comint RubySystem::m_memory_size_bits; 5212037Sandreas.sandberg@arm.com 5312037Sandreas.sandberg@arm.comNetwork* RubySystem::m_network_ptr; 5412037Sandreas.sandberg@arm.comProfiler* RubySystem::m_profiler_ptr; 5512037Sandreas.sandberg@arm.comMemoryVector* RubySystem::m_mem_vec_ptr; 5612037Sandreas.sandberg@arm.com 5712037Sandreas.sandberg@arm.comRubySystem::RubySystem(const Params *p) 5812037Sandreas.sandberg@arm.com : SimObject(p) 5912037Sandreas.sandberg@arm.com{ 6012037Sandreas.sandberg@arm.com if (g_system_ptr != NULL) 6112037Sandreas.sandberg@arm.com fatal("Only one RubySystem object currently allowed.\n"); 6212037Sandreas.sandberg@arm.com 6312037Sandreas.sandberg@arm.com m_random_seed = p->random_seed; 6412037Sandreas.sandberg@arm.com srandom(m_random_seed); 6512037Sandreas.sandberg@arm.com m_randomization = p->randomization; 6612037Sandreas.sandberg@arm.com m_clock = p->clock; 6712037Sandreas.sandberg@arm.com 6812037Sandreas.sandberg@arm.com m_block_size_bytes = p->block_size_bytes; 6912037Sandreas.sandberg@arm.com assert(isPowerOf2(m_block_size_bytes)); 7012037Sandreas.sandberg@arm.com m_block_size_bits = floorLog2(m_block_size_bytes); 7112037Sandreas.sandberg@arm.com 7212037Sandreas.sandberg@arm.com m_memory_size_bytes = p->mem_size; 7312037Sandreas.sandberg@arm.com if (m_memory_size_bytes == 0) { 7412037Sandreas.sandberg@arm.com m_memory_size_bits = 0; 7512037Sandreas.sandberg@arm.com } else { 7612037Sandreas.sandberg@arm.com m_memory_size_bits = floorLog2(m_memory_size_bytes); 7711986Sandreas.sandberg@arm.com } 7812391Sjason@lowepower.com 7912391Sjason@lowepower.com g_eventQueue_ptr = new RubyEventQueue(p->eventq, m_clock); 8012391Sjason@lowepower.com g_system_ptr = this; 8112391Sjason@lowepower.com if (p->no_mem_vec) { 8212391Sjason@lowepower.com m_mem_vec_ptr = NULL; 8312391Sjason@lowepower.com } else { 8412391Sjason@lowepower.com m_mem_vec_ptr = new MemoryVector; 8512391Sjason@lowepower.com m_mem_vec_ptr->resize(m_memory_size_bytes); 8612391Sjason@lowepower.com } 8712391Sjason@lowepower.com 8812391Sjason@lowepower.com // 8911986Sandreas.sandberg@arm.com // Print ruby configuration and stats at exit 9011986Sandreas.sandberg@arm.com // 9111986Sandreas.sandberg@arm.com RubyExitCallback* rubyExitCB = new RubyExitCallback(p->stats_filename); 9212391Sjason@lowepower.com registerExitCallback(rubyExitCB); 9312037Sandreas.sandberg@arm.com m_warmup_enabled = false; 9412037Sandreas.sandberg@arm.com m_cooldown_enabled = false; 9512037Sandreas.sandberg@arm.com} 9612037Sandreas.sandberg@arm.com 9712037Sandreas.sandberg@arm.comvoid 9812037Sandreas.sandberg@arm.comRubySystem::init() 9912037Sandreas.sandberg@arm.com{ 10011986Sandreas.sandberg@arm.com m_profiler_ptr->clearStats(); 10112391Sjason@lowepower.com} 10212037Sandreas.sandberg@arm.com 10312037Sandreas.sandberg@arm.comvoid 10412037Sandreas.sandberg@arm.comRubySystem::registerNetwork(Network* network_ptr) 10512037Sandreas.sandberg@arm.com{ 10612037Sandreas.sandberg@arm.com m_network_ptr = network_ptr; 10712037Sandreas.sandberg@arm.com} 10812391Sjason@lowepower.com 10912037Sandreas.sandberg@arm.comvoid 11012037Sandreas.sandberg@arm.comRubySystem::registerProfiler(Profiler* profiler_ptr) 11112037Sandreas.sandberg@arm.com{ 11212037Sandreas.sandberg@arm.com m_profiler_ptr = profiler_ptr; 11312037Sandreas.sandberg@arm.com} 11412037Sandreas.sandberg@arm.com 11512037Sandreas.sandberg@arm.comvoid 11612037Sandreas.sandberg@arm.comRubySystem::registerAbstractController(AbstractController* cntrl) 11712037Sandreas.sandberg@arm.com{ 11812037Sandreas.sandberg@arm.com m_abs_cntrl_vec.push_back(cntrl); 11912037Sandreas.sandberg@arm.com} 12012037Sandreas.sandberg@arm.com 12112037Sandreas.sandberg@arm.comvoid 12212037Sandreas.sandberg@arm.comRubySystem::registerSparseMemory(SparseMemory* s) 12312037Sandreas.sandberg@arm.com{ 12412037Sandreas.sandberg@arm.com m_sparse_memory_vector.push_back(s); 12512037Sandreas.sandberg@arm.com} 12612037Sandreas.sandberg@arm.com 12714299Sbbruce@ucdavis.eduRubySystem::~RubySystem() 12812037Sandreas.sandberg@arm.com{ 12912037Sandreas.sandberg@arm.com delete m_network_ptr; 13012037Sandreas.sandberg@arm.com delete m_profiler_ptr; 13112037Sandreas.sandberg@arm.com if (m_mem_vec_ptr) 13212037Sandreas.sandberg@arm.com delete m_mem_vec_ptr; 13312037Sandreas.sandberg@arm.com} 13412037Sandreas.sandberg@arm.com 13512037Sandreas.sandberg@arm.comvoid 13612037Sandreas.sandberg@arm.comRubySystem::printSystemConfig(ostream & out) 13712037Sandreas.sandberg@arm.com{ 13812037Sandreas.sandberg@arm.com out << "RubySystem config:" << endl 13912037Sandreas.sandberg@arm.com << " random_seed: " << m_random_seed << endl 14012037Sandreas.sandberg@arm.com << " randomization: " << m_randomization << endl 14112037Sandreas.sandberg@arm.com << " cycle_period: " << m_clock << endl 14212037Sandreas.sandberg@arm.com << " block_size_bytes: " << m_block_size_bytes << endl 14312037Sandreas.sandberg@arm.com << " block_size_bits: " << m_block_size_bits << endl 14412037Sandreas.sandberg@arm.com << " memory_size_bytes: " << m_memory_size_bytes << endl 14512037Sandreas.sandberg@arm.com << " memory_size_bits: " << m_memory_size_bits << endl; 14612037Sandreas.sandberg@arm.com} 14712037Sandreas.sandberg@arm.com 14812037Sandreas.sandberg@arm.comvoid 14912037Sandreas.sandberg@arm.comRubySystem::printConfig(ostream& out) 15012037Sandreas.sandberg@arm.com{ 15112037Sandreas.sandberg@arm.com out << "\n================ Begin RubySystem Configuration Print ================\n\n"; 15212037Sandreas.sandberg@arm.com printSystemConfig(out); 15312037Sandreas.sandberg@arm.com m_network_ptr->printConfig(out); 15412037Sandreas.sandberg@arm.com m_profiler_ptr->printConfig(out); 15512037Sandreas.sandberg@arm.com out << "\n================ End RubySystem Configuration Print ================\n\n"; 15612037Sandreas.sandberg@arm.com} 15712037Sandreas.sandberg@arm.com 15812037Sandreas.sandberg@arm.comvoid 15912037Sandreas.sandberg@arm.comRubySystem::printStats(ostream& out) 16012037Sandreas.sandberg@arm.com{ 16111986Sandreas.sandberg@arm.com const time_t T = time(NULL); 16211986Sandreas.sandberg@arm.com tm *localTime = localtime(&T); 16311986Sandreas.sandberg@arm.com char buf[100]; 16411986Sandreas.sandberg@arm.com strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime); 16511986Sandreas.sandberg@arm.com 16611986Sandreas.sandberg@arm.com out << "Real time: " << buf << endl; 16711986Sandreas.sandberg@arm.com 16811986Sandreas.sandberg@arm.com m_profiler_ptr->printStats(out); 16911986Sandreas.sandberg@arm.com m_network_ptr->printStats(out); 17011986Sandreas.sandberg@arm.com} 17111986Sandreas.sandberg@arm.com 17212391Sjason@lowepower.comvoid 17312037Sandreas.sandberg@arm.comRubySystem::writeCompressedTrace(uint8* raw_data, string filename, 17412037Sandreas.sandberg@arm.com uint64 uncompressed_trace_size) 17512037Sandreas.sandberg@arm.com{ 17612037Sandreas.sandberg@arm.com // Create the checkpoint file for the memory 17712037Sandreas.sandberg@arm.com string thefile = Checkpoint::dir() + "/" + filename.c_str(); 17812037Sandreas.sandberg@arm.com 17912037Sandreas.sandberg@arm.com int fd = creat(thefile.c_str(), 0664); 18012037Sandreas.sandberg@arm.com if (fd < 0) { 18112037Sandreas.sandberg@arm.com perror("creat"); 18212037Sandreas.sandberg@arm.com fatal("Can't open memory trace file '%s'\n", filename); 18312037Sandreas.sandberg@arm.com } 18412037Sandreas.sandberg@arm.com 18512037Sandreas.sandberg@arm.com gzFile compressedMemory = gzdopen(fd, "wb"); 18612037Sandreas.sandberg@arm.com if (compressedMemory == NULL) 18712037Sandreas.sandberg@arm.com fatal("Insufficient memory to allocate compression state for %s\n", 18812037Sandreas.sandberg@arm.com filename); 18912037Sandreas.sandberg@arm.com 19012037Sandreas.sandberg@arm.com if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) != 19112037Sandreas.sandberg@arm.com uncompressed_trace_size) { 19212037Sandreas.sandberg@arm.com fatal("Write failed on memory trace file '%s'\n", filename); 19312037Sandreas.sandberg@arm.com } 19412037Sandreas.sandberg@arm.com 19512037Sandreas.sandberg@arm.com if (gzclose(compressedMemory)) { 19612037Sandreas.sandberg@arm.com fatal("Close failed on memory trace file '%s'\n", filename); 19712037Sandreas.sandberg@arm.com } 19812037Sandreas.sandberg@arm.com delete raw_data; 19912037Sandreas.sandberg@arm.com} 20012037Sandreas.sandberg@arm.com 20112037Sandreas.sandberg@arm.comvoid 20212037Sandreas.sandberg@arm.comRubySystem::serialize(std::ostream &os) 20312037Sandreas.sandberg@arm.com{ 20412037Sandreas.sandberg@arm.com m_cooldown_enabled = true; 20512037Sandreas.sandberg@arm.com 20612037Sandreas.sandberg@arm.com vector<Sequencer*> sequencer_map; 20712037Sandreas.sandberg@arm.com Sequencer* sequencer_ptr = NULL; 20812037Sandreas.sandberg@arm.com int cntrl_id = -1; 20912037Sandreas.sandberg@arm.com 21012037Sandreas.sandberg@arm.com 21112037Sandreas.sandberg@arm.com for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { 21212037Sandreas.sandberg@arm.com sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getSequencer()); 21312037Sandreas.sandberg@arm.com if (sequencer_ptr == NULL) { 21412037Sandreas.sandberg@arm.com sequencer_ptr = sequencer_map[cntrl]; 21512037Sandreas.sandberg@arm.com cntrl_id = cntrl; 21612037Sandreas.sandberg@arm.com } 21712037Sandreas.sandberg@arm.com } 21812037Sandreas.sandberg@arm.com 21912391Sjason@lowepower.com assert(sequencer_ptr != NULL); 22011986Sandreas.sandberg@arm.com 22111986Sandreas.sandberg@arm.com for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { 22211986Sandreas.sandberg@arm.com if (sequencer_map[cntrl] == NULL) { 22311986Sandreas.sandberg@arm.com sequencer_map[cntrl] = sequencer_ptr; 22411986Sandreas.sandberg@arm.com } 22511986Sandreas.sandberg@arm.com } 22611986Sandreas.sandberg@arm.com 22711986Sandreas.sandberg@arm.com // Create the CacheRecorder and record the cache trace 22811986Sandreas.sandberg@arm.com m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map); 22911986Sandreas.sandberg@arm.com 23011986Sandreas.sandberg@arm.com for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { 23111986Sandreas.sandberg@arm.com m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder); 23211986Sandreas.sandberg@arm.com } 23311986Sandreas.sandberg@arm.com 23411986Sandreas.sandberg@arm.com // save the current tick value 23511986Sandreas.sandberg@arm.com Tick curtick_original = curTick(); 23612037Sandreas.sandberg@arm.com // save the event queue head 23712037Sandreas.sandberg@arm.com Event* eventq_head = eventq->replaceHead(NULL); 23812037Sandreas.sandberg@arm.com 23912037Sandreas.sandberg@arm.com // Schedule an event to start cache cooldown 24012037Sandreas.sandberg@arm.com RubyEvent* e = new RubyEvent(this); 24112037Sandreas.sandberg@arm.com schedule(e,curTick()); 24212037Sandreas.sandberg@arm.com simulate(); 24311986Sandreas.sandberg@arm.com 24412391Sjason@lowepower.com // Restore eventq head 24512037Sandreas.sandberg@arm.com eventq_head = eventq->replaceHead(eventq_head); 24612037Sandreas.sandberg@arm.com // Restore curTick 24712037Sandreas.sandberg@arm.com curTick(curtick_original); 24812037Sandreas.sandberg@arm.com 24912037Sandreas.sandberg@arm.com uint8* raw_data = NULL; 25012391Sjason@lowepower.com 25112037Sandreas.sandberg@arm.com if (m_mem_vec_ptr != NULL) { 25212037Sandreas.sandberg@arm.com uint64 memory_trace_size = m_mem_vec_ptr->collatePages(raw_data); 25312037Sandreas.sandberg@arm.com 25412391Sjason@lowepower.com string memory_trace_file = name() + ".memory.gz"; 25512037Sandreas.sandberg@arm.com writeCompressedTrace(raw_data, memory_trace_file, 25612037Sandreas.sandberg@arm.com memory_trace_size); 25712037Sandreas.sandberg@arm.com 25812037Sandreas.sandberg@arm.com SERIALIZE_SCALAR(memory_trace_file); 25912391Sjason@lowepower.com SERIALIZE_SCALAR(memory_trace_size); 26012037Sandreas.sandberg@arm.com 26112037Sandreas.sandberg@arm.com } else { 26212037Sandreas.sandberg@arm.com for (int i = 0; i < m_sparse_memory_vector.size(); ++i) { 26312037Sandreas.sandberg@arm.com m_sparse_memory_vector[i]->recordBlocks(cntrl_id, 26412391Sjason@lowepower.com m_cache_recorder); 26512037Sandreas.sandberg@arm.com } 26612037Sandreas.sandberg@arm.com } 26712037Sandreas.sandberg@arm.com 26812037Sandreas.sandberg@arm.com // Aggergate the trace entries together into a single array 26911986Sandreas.sandberg@arm.com raw_data = new uint8_t[4096]; 27012391Sjason@lowepower.com uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data, 27112037Sandreas.sandberg@arm.com 4096); 27212037Sandreas.sandberg@arm.com string cache_trace_file = name() + ".cache.gz"; 27312037Sandreas.sandberg@arm.com writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size); 27412037Sandreas.sandberg@arm.com 27512037Sandreas.sandberg@arm.com SERIALIZE_SCALAR(cache_trace_file); 27611986Sandreas.sandberg@arm.com SERIALIZE_SCALAR(cache_trace_size); 27711986Sandreas.sandberg@arm.com 27812391Sjason@lowepower.com m_cooldown_enabled = false; 27912037Sandreas.sandberg@arm.com} 28012037Sandreas.sandberg@arm.com 28112037Sandreas.sandberg@arm.comvoid 28212037Sandreas.sandberg@arm.comRubySystem::readCompressedTrace(string filename, uint8*& raw_data, 28312037Sandreas.sandberg@arm.com uint64& uncompressed_trace_size) 28412037Sandreas.sandberg@arm.com{ 28512037Sandreas.sandberg@arm.com // Read the trace file 28612037Sandreas.sandberg@arm.com gzFile compressedTrace; 28712037Sandreas.sandberg@arm.com 28811986Sandreas.sandberg@arm.com // trace file 28912391Sjason@lowepower.com int fd = open(filename.c_str(), O_RDONLY); 29012037Sandreas.sandberg@arm.com if (fd < 0) { 29112037Sandreas.sandberg@arm.com perror("open"); 29212037Sandreas.sandberg@arm.com fatal("Unable to open trace file %s", filename); 29312037Sandreas.sandberg@arm.com } 29412037Sandreas.sandberg@arm.com 29511986Sandreas.sandberg@arm.com compressedTrace = gzdopen(fd, "rb"); 29614299Sbbruce@ucdavis.edu if (compressedTrace == NULL) { 29714299Sbbruce@ucdavis.edu fatal("Insufficient memory to allocate compression state for %s\n", 29814299Sbbruce@ucdavis.edu filename); 29914299Sbbruce@ucdavis.edu } 30014299Sbbruce@ucdavis.edu 30114299Sbbruce@ucdavis.edu raw_data = new uint8_t[uncompressed_trace_size]; 30214299Sbbruce@ucdavis.edu if (gzread(compressedTrace, raw_data, uncompressed_trace_size) < 30312391Sjason@lowepower.com uncompressed_trace_size) { 30412391Sjason@lowepower.com fatal("Unable to read complete trace from file %s\n", filename); 30512391Sjason@lowepower.com } 30612391Sjason@lowepower.com 30712391Sjason@lowepower.com if (gzclose(compressedTrace)) { 30812391Sjason@lowepower.com fatal("Failed to close cache trace file '%s'\n", filename); 30912391Sjason@lowepower.com } 31012391Sjason@lowepower.com} 31112391Sjason@lowepower.com 31212037Sandreas.sandberg@arm.comvoid 31312037Sandreas.sandberg@arm.comRubySystem::unserialize(Checkpoint *cp, const string §ion) 31412037Sandreas.sandberg@arm.com{ 31512037Sandreas.sandberg@arm.com // 31612391Sjason@lowepower.com // The main purpose for clearing stats in the unserialize process is so 31712391Sjason@lowepower.com // that the profiler can correctly set its start time to the unserialized 31812391Sjason@lowepower.com // value of curTick() 31912391Sjason@lowepower.com // 32012391Sjason@lowepower.com clearStats(); 32112391Sjason@lowepower.com uint8* uncompressed_trace = NULL; 32212391Sjason@lowepower.com 32312391Sjason@lowepower.com if (m_mem_vec_ptr != NULL) { 32412391Sjason@lowepower.com string memory_trace_file; 32512391Sjason@lowepower.com uint64 memory_trace_size = 0; 32612391Sjason@lowepower.com 32712391Sjason@lowepower.com UNSERIALIZE_SCALAR(memory_trace_file); 32812391Sjason@lowepower.com UNSERIALIZE_SCALAR(memory_trace_size); 32912391Sjason@lowepower.com memory_trace_file = cp->cptDir + "/" + memory_trace_file; 330 331 readCompressedTrace(memory_trace_file, uncompressed_trace, 332 memory_trace_size); 333 m_mem_vec_ptr->populatePages(uncompressed_trace); 334 335 delete uncompressed_trace; 336 uncompressed_trace = NULL; 337 } 338 339 string cache_trace_file; 340 uint64 cache_trace_size = 0; 341 342 UNSERIALIZE_SCALAR(cache_trace_file); 343 UNSERIALIZE_SCALAR(cache_trace_size); 344 cache_trace_file = cp->cptDir + "/" + cache_trace_file; 345 346 readCompressedTrace(cache_trace_file, uncompressed_trace, 347 cache_trace_size); 348 m_warmup_enabled = true; 349 350 vector<Sequencer*> sequencer_map; 351 Sequencer* t = NULL; 352 for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { 353 sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getSequencer()); 354 if(t == NULL) t = sequencer_map[cntrl]; 355 } 356 357 assert(t != NULL); 358 359 for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { 360 if (sequencer_map[cntrl] == NULL) { 361 sequencer_map[cntrl] = t; 362 } 363 } 364 365 m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size, 366 sequencer_map); 367} 368 369void 370RubySystem::startup() 371{ 372 if (m_warmup_enabled) { 373 // save the current tick value 374 Tick curtick_original = curTick(); 375 // save the event queue head 376 Event* eventq_head = eventq->replaceHead(NULL); 377 // set curTick to 0 378 curTick(0); 379 380 // Schedule an event to start cache warmup 381 RubyEvent* e = new RubyEvent(this); 382 schedule(e,curTick()); 383 simulate(); 384 385 delete m_cache_recorder; 386 m_cache_recorder = NULL; 387 m_warmup_enabled = false; 388 // Restore eventq head 389 eventq_head = eventq->replaceHead(eventq_head); 390 // Restore curTick 391 curTick(curtick_original); 392 } 393} 394 395void 396RubySystem::RubyEvent::process() 397{ 398 if (ruby_system->m_warmup_enabled) { 399 ruby_system->m_cache_recorder->enqueueNextFetchRequest(); 400 } else if (ruby_system->m_cooldown_enabled) { 401 ruby_system->m_cache_recorder->enqueueNextFlushRequest(); 402 } 403} 404 405void 406RubySystem::clearStats() const 407{ 408 m_profiler_ptr->clearStats(); 409 m_network_ptr->clearStats(); 410} 411 412#ifdef CHECK_COHERENCE 413// This code will check for cases if the given cache block is exclusive in 414// one node and shared in another-- a coherence violation 415// 416// To use, the SLICC specification must call sequencer.checkCoherence(address) 417// when the controller changes to a state with new permissions. Do this 418// in setState. The SLICC spec must also define methods "isBlockShared" 419// and "isBlockExclusive" that are specific to that protocol 420// 421void 422RubySystem::checkGlobalCoherenceInvariant(const Address& addr) 423{ 424#if 0 425 NodeID exclusive = -1; 426 bool sharedDetected = false; 427 NodeID lastShared = -1; 428 429 for (int i = 0; i < m_chip_vector.size(); i++) { 430 if (m_chip_vector[i]->isBlockExclusive(addr)) { 431 if (exclusive != -1) { 432 // coherence violation 433 WARN_EXPR(exclusive); 434 WARN_EXPR(m_chip_vector[i]->getID()); 435 WARN_EXPR(addr); 436 WARN_EXPR(g_eventQueue_ptr->getTime()); 437 ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips"); 438 } else if (sharedDetected) { 439 WARN_EXPR(lastShared); 440 WARN_EXPR(m_chip_vector[i]->getID()); 441 WARN_EXPR(addr); 442 WARN_EXPR(g_eventQueue_ptr->getTime()); 443 ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared"); 444 } else { 445 exclusive = m_chip_vector[i]->getID(); 446 } 447 } else if (m_chip_vector[i]->isBlockShared(addr)) { 448 sharedDetected = true; 449 lastShared = m_chip_vector[i]->getID(); 450 451 if (exclusive != -1) { 452 WARN_EXPR(lastShared); 453 WARN_EXPR(exclusive); 454 WARN_EXPR(addr); 455 WARN_EXPR(g_eventQueue_ptr->getTime()); 456 ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared"); 457 } 458 } 459 } 460#endif 461} 462#endif 463 464RubySystem * 465RubySystemParams::create() 466{ 467 return new RubySystem(this); 468} 469 470/** 471 * virtual process function that is invoked when the callback 472 * queue is executed. 473 */ 474void 475RubyExitCallback::process() 476{ 477 std::ostream *os = simout.create(stats_filename); 478 RubySystem::printConfig(*os); 479 *os << endl; 480 RubySystem::printStats(*os); 481} 482