Sequencer.cc revision 11793
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 36145Snate@binkert.org * All rights reserved. 46145Snate@binkert.org * 56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 66145Snate@binkert.org * modification, are permitted provided that the following conditions are 76145Snate@binkert.org * met: redistributions of source code must retain the above copyright 86145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 116145Snate@binkert.org * documentation and/or other materials provided with the distribution; 126145Snate@binkert.org * neither the name of the copyright holders nor the names of its 136145Snate@binkert.org * contributors may be used to endorse or promote products derived from 146145Snate@binkert.org * this software without specific prior written permission. 156145Snate@binkert.org * 166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276145Snate@binkert.org */ 286145Snate@binkert.org 2911793Sbrandon.potter@amd.com#include "mem/ruby/system/Sequencer.hh" 3011793Sbrandon.potter@amd.com 3110467Sandreas.hansson@arm.com#include "arch/x86/ldstflags.hh" 328229Snate@binkert.org#include "base/misc.hh" 337056Snate@binkert.org#include "base/str.hh" 347632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh" 358232Snate@binkert.org#include "debug/MemoryAccess.hh" 368232Snate@binkert.org#include "debug/ProtocolTrace.hh" 378615Snilay@cs.wisc.edu#include "debug/RubySequencer.hh" 389104Shestness@cs.utexas.edu#include "debug/RubyStats.hh" 3911793Sbrandon.potter@amd.com#include "mem/packet.hh" 408615Snilay@cs.wisc.edu#include "mem/protocol/PrefetchBit.hh" 418615Snilay@cs.wisc.edu#include "mem/protocol/RubyAccessMode.hh" 427039Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh" 438229Snate@binkert.org#include "mem/ruby/slicc_interface/RubyRequest.hh" 4411108Sdavid.hashe@amd.com#include "mem/ruby/system/RubySystem.hh" 4510467Sandreas.hansson@arm.com#include "sim/system.hh" 466876Ssteve.reinhardt@amd.com 477055Snate@binkert.orgusing namespace std; 487055Snate@binkert.org 496876Ssteve.reinhardt@amd.comSequencer * 506876Ssteve.reinhardt@amd.comRubySequencerParams::create() 516285Snate@binkert.org{ 526876Ssteve.reinhardt@amd.com return new Sequencer(this); 536285Snate@binkert.org} 547039Snate@binkert.org 556876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p) 5610012Snilay@cs.wisc.edu : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this) 576876Ssteve.reinhardt@amd.com{ 586876Ssteve.reinhardt@amd.com m_outstanding_count = 0; 596285Snate@binkert.org 606876Ssteve.reinhardt@amd.com m_instCache_ptr = p->icache; 616876Ssteve.reinhardt@amd.com m_dataCache_ptr = p->dcache; 6211019Sjthestness@gmail.com m_data_cache_hit_latency = p->dcache_hit_latency; 6311019Sjthestness@gmail.com m_inst_cache_hit_latency = p->icache_hit_latency; 646876Ssteve.reinhardt@amd.com m_max_outstanding_requests = p->max_outstanding_requests; 656876Ssteve.reinhardt@amd.com m_deadlock_threshold = p->deadlock_threshold; 666899SBrad.Beckmann@amd.com 6711308Santhony.gutierrez@amd.com m_coreId = p->coreid; // for tracking the two CorePair sequencers 686876Ssteve.reinhardt@amd.com assert(m_max_outstanding_requests > 0); 696876Ssteve.reinhardt@amd.com assert(m_deadlock_threshold > 0); 706876Ssteve.reinhardt@amd.com assert(m_instCache_ptr != NULL); 716876Ssteve.reinhardt@amd.com assert(m_dataCache_ptr != NULL); 7211019Sjthestness@gmail.com assert(m_data_cache_hit_latency > 0); 7311019Sjthestness@gmail.com assert(m_inst_cache_hit_latency > 0); 748171Stushar@csail.mit.edu 7511660Stushar@ece.gatech.edu m_runningGarnetStandalone = p->garnet_standalone; 766145Snate@binkert.org} 776145Snate@binkert.org 787039Snate@binkert.orgSequencer::~Sequencer() 797039Snate@binkert.org{ 806145Snate@binkert.org} 816145Snate@binkert.org 827039Snate@binkert.orgvoid 837039Snate@binkert.orgSequencer::wakeup() 847039Snate@binkert.org{ 8510913Sandreas.sandberg@arm.com assert(drainState() != DrainState::Draining); 869245Shestness@cs.wisc.edu 877039Snate@binkert.org // Check for deadlock of any of the requests 889501Snilay@cs.wisc.edu Cycles current_time = curCycle(); 896145Snate@binkert.org 907039Snate@binkert.org // Check across all outstanding requests 917039Snate@binkert.org int total_outstanding = 0; 926285Snate@binkert.org 937455Snate@binkert.org RequestTable::iterator read = m_readRequestTable.begin(); 947455Snate@binkert.org RequestTable::iterator read_end = m_readRequestTable.end(); 957455Snate@binkert.org for (; read != read_end; ++read) { 967455Snate@binkert.org SequencerRequest* request = read->second; 977455Snate@binkert.org if (current_time - request->issue_time < m_deadlock_threshold) 987455Snate@binkert.org continue; 997455Snate@binkert.org 1007805Snilay@cs.wisc.edu panic("Possible Deadlock detected. Aborting!\n" 10111025Snilay@cs.wisc.edu "version: %d request.paddr: 0x%x m_readRequestTable: %d " 10211025Snilay@cs.wisc.edu "current time: %u issue_time: %d difference: %d\n", m_version, 10311025Snilay@cs.wisc.edu request->pkt->getAddr(), m_readRequestTable.size(), 1049467Smalek.musleh@gmail.com current_time * clockPeriod(), request->issue_time * clockPeriod(), 1059467Smalek.musleh@gmail.com (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 1066145Snate@binkert.org } 1076145Snate@binkert.org 1087455Snate@binkert.org RequestTable::iterator write = m_writeRequestTable.begin(); 1097455Snate@binkert.org RequestTable::iterator write_end = m_writeRequestTable.end(); 1107455Snate@binkert.org for (; write != write_end; ++write) { 1117455Snate@binkert.org SequencerRequest* request = write->second; 1127455Snate@binkert.org if (current_time - request->issue_time < m_deadlock_threshold) 1137455Snate@binkert.org continue; 1147455Snate@binkert.org 1157805Snilay@cs.wisc.edu panic("Possible Deadlock detected. Aborting!\n" 11611025Snilay@cs.wisc.edu "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 11711025Snilay@cs.wisc.edu "current time: %u issue_time: %d difference: %d\n", m_version, 11811025Snilay@cs.wisc.edu request->pkt->getAddr(), m_writeRequestTable.size(), 1199467Smalek.musleh@gmail.com current_time * clockPeriod(), request->issue_time * clockPeriod(), 1209467Smalek.musleh@gmail.com (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 1216145Snate@binkert.org } 1226285Snate@binkert.org 1237039Snate@binkert.org total_outstanding += m_writeRequestTable.size(); 1247039Snate@binkert.org total_outstanding += m_readRequestTable.size(); 1256145Snate@binkert.org 1267039Snate@binkert.org assert(m_outstanding_count == total_outstanding); 1277039Snate@binkert.org 1287039Snate@binkert.org if (m_outstanding_count > 0) { 1297039Snate@binkert.org // If there are still outstanding requests, keep checking 1309465Snilay@cs.wisc.edu schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 1317039Snate@binkert.org } 1326145Snate@binkert.org} 1336145Snate@binkert.org 13410012Snilay@cs.wisc.eduvoid Sequencer::resetStats() 1359598Snilay@cs.wisc.edu{ 13610012Snilay@cs.wisc.edu m_latencyHist.reset(); 13710012Snilay@cs.wisc.edu m_hitLatencyHist.reset(); 13810012Snilay@cs.wisc.edu m_missLatencyHist.reset(); 1399773Snilay@cs.wisc.edu for (int i = 0; i < RubyRequestType_NUM; i++) { 14010012Snilay@cs.wisc.edu m_typeLatencyHist[i]->reset(); 14110012Snilay@cs.wisc.edu m_hitTypeLatencyHist[i]->reset(); 14210012Snilay@cs.wisc.edu m_missTypeLatencyHist[i]->reset(); 1439773Snilay@cs.wisc.edu for (int j = 0; j < MachineType_NUM; j++) { 14410012Snilay@cs.wisc.edu m_hitTypeMachLatencyHist[i][j]->reset(); 14510012Snilay@cs.wisc.edu m_missTypeMachLatencyHist[i][j]->reset(); 1469773Snilay@cs.wisc.edu } 1479773Snilay@cs.wisc.edu } 1489773Snilay@cs.wisc.edu 14910012Snilay@cs.wisc.edu for (int i = 0; i < MachineType_NUM; i++) { 15010012Snilay@cs.wisc.edu m_missMachLatencyHist[i]->reset(); 15110012Snilay@cs.wisc.edu m_hitMachLatencyHist[i]->reset(); 1529773Snilay@cs.wisc.edu 15310012Snilay@cs.wisc.edu m_IssueToInitialDelayHist[i]->reset(); 15410012Snilay@cs.wisc.edu m_InitialToForwardDelayHist[i]->reset(); 15510012Snilay@cs.wisc.edu m_ForwardToFirstResponseDelayHist[i]->reset(); 15610012Snilay@cs.wisc.edu m_FirstResponseToCompletionDelayHist[i]->reset(); 1579773Snilay@cs.wisc.edu 1589773Snilay@cs.wisc.edu m_IncompleteTimes[i] = 0; 1599773Snilay@cs.wisc.edu } 1609598Snilay@cs.wisc.edu} 1619598Snilay@cs.wisc.edu 1626145Snate@binkert.org// Insert the request on the correct request table. Return true if 1636145Snate@binkert.org// the entry was already present. 1648615Snilay@cs.wisc.eduRequestStatus 1658615Snilay@cs.wisc.eduSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 1667039Snate@binkert.org{ 1678641Snate@binkert.org assert(m_outstanding_count == 1688641Snate@binkert.org (m_writeRequestTable.size() + m_readRequestTable.size())); 1696145Snate@binkert.org 1707039Snate@binkert.org // See if we should schedule a deadlock check 1719342SAndreas.Sandberg@arm.com if (!deadlockCheckEvent.scheduled() && 17210913Sandreas.sandberg@arm.com drainState() != DrainState::Draining) { 1739465Snilay@cs.wisc.edu schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 1747039Snate@binkert.org } 1756145Snate@binkert.org 17611025Snilay@cs.wisc.edu Addr line_addr = makeLineAddress(pkt->getAddr()); 17711448Sjthestness@gmail.com 17811448Sjthestness@gmail.com // Check if the line is blocked for a Locked_RMW 17911448Sjthestness@gmail.com if (m_controller->isBlocked(line_addr) && 18011448Sjthestness@gmail.com (request_type != RubyRequestType_Locked_RMW_Write)) { 18111448Sjthestness@gmail.com // Return that this request's cache line address aliases with 18211448Sjthestness@gmail.com // a prior request that locked the cache line. The request cannot 18311448Sjthestness@gmail.com // proceed until the cache line is unlocked by a Locked_RMW_Write 18411448Sjthestness@gmail.com return RequestStatus_Aliased; 18511448Sjthestness@gmail.com } 18611448Sjthestness@gmail.com 1879224Sandreas.hansson@arm.com // Create a default entry, mapping the address to NULL, the cast is 1889224Sandreas.hansson@arm.com // there to make gcc 4.4 happy 1899224Sandreas.hansson@arm.com RequestTable::value_type default_entry(line_addr, 1909224Sandreas.hansson@arm.com (SequencerRequest*) NULL); 1919224Sandreas.hansson@arm.com 1928615Snilay@cs.wisc.edu if ((request_type == RubyRequestType_ST) || 1938615Snilay@cs.wisc.edu (request_type == RubyRequestType_RMW_Read) || 1948615Snilay@cs.wisc.edu (request_type == RubyRequestType_RMW_Write) || 1958615Snilay@cs.wisc.edu (request_type == RubyRequestType_Load_Linked) || 1968615Snilay@cs.wisc.edu (request_type == RubyRequestType_Store_Conditional) || 1978615Snilay@cs.wisc.edu (request_type == RubyRequestType_Locked_RMW_Read) || 1988615Snilay@cs.wisc.edu (request_type == RubyRequestType_Locked_RMW_Write) || 1998615Snilay@cs.wisc.edu (request_type == RubyRequestType_FLUSH)) { 2008615Snilay@cs.wisc.edu 2018615Snilay@cs.wisc.edu // Check if there is any outstanding read request for the same 2028615Snilay@cs.wisc.edu // cache line. 2038615Snilay@cs.wisc.edu if (m_readRequestTable.count(line_addr) > 0) { 20410012Snilay@cs.wisc.edu m_store_waiting_on_load++; 2058615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2068615Snilay@cs.wisc.edu } 2078615Snilay@cs.wisc.edu 2087455Snate@binkert.org pair<RequestTable::iterator, bool> r = 2099224Sandreas.hansson@arm.com m_writeRequestTable.insert(default_entry); 2108615Snilay@cs.wisc.edu if (r.second) { 2118615Snilay@cs.wisc.edu RequestTable::iterator i = r.first; 2129465Snilay@cs.wisc.edu i->second = new SequencerRequest(pkt, request_type, curCycle()); 2138615Snilay@cs.wisc.edu m_outstanding_count++; 2148615Snilay@cs.wisc.edu } else { 2158615Snilay@cs.wisc.edu // There is an outstanding write request for the cache line 21610012Snilay@cs.wisc.edu m_store_waiting_on_store++; 2178615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2188615Snilay@cs.wisc.edu } 2198615Snilay@cs.wisc.edu } else { 2208615Snilay@cs.wisc.edu // Check if there is any outstanding write request for the same 2218615Snilay@cs.wisc.edu // cache line. 2228615Snilay@cs.wisc.edu if (m_writeRequestTable.count(line_addr) > 0) { 22310012Snilay@cs.wisc.edu m_load_waiting_on_store++; 2248615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2258615Snilay@cs.wisc.edu } 2267039Snate@binkert.org 2277455Snate@binkert.org pair<RequestTable::iterator, bool> r = 2289224Sandreas.hansson@arm.com m_readRequestTable.insert(default_entry); 2297039Snate@binkert.org 2308615Snilay@cs.wisc.edu if (r.second) { 2318615Snilay@cs.wisc.edu RequestTable::iterator i = r.first; 2329465Snilay@cs.wisc.edu i->second = new SequencerRequest(pkt, request_type, curCycle()); 2338615Snilay@cs.wisc.edu m_outstanding_count++; 2348615Snilay@cs.wisc.edu } else { 2358615Snilay@cs.wisc.edu // There is an outstanding read request for the cache line 23610012Snilay@cs.wisc.edu m_load_waiting_on_load++; 2378615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2387039Snate@binkert.org } 2396145Snate@binkert.org } 2406145Snate@binkert.org 24110012Snilay@cs.wisc.edu m_outstandReqHist.sample(m_outstanding_count); 2428641Snate@binkert.org assert(m_outstanding_count == 2438641Snate@binkert.org (m_writeRequestTable.size() + m_readRequestTable.size())); 2446145Snate@binkert.org 2458615Snilay@cs.wisc.edu return RequestStatus_Ready; 2466145Snate@binkert.org} 2476145Snate@binkert.org 2487039Snate@binkert.orgvoid 2497455Snate@binkert.orgSequencer::markRemoved() 2507455Snate@binkert.org{ 2517455Snate@binkert.org m_outstanding_count--; 2527455Snate@binkert.org assert(m_outstanding_count == 2537455Snate@binkert.org m_writeRequestTable.size() + m_readRequestTable.size()); 2547455Snate@binkert.org} 2557455Snate@binkert.org 2567455Snate@binkert.orgvoid 25711025Snilay@cs.wisc.eduSequencer::invalidateSC(Addr address) 2589563Sgope@wisc.edu{ 25911059Snilay@cs.wisc.edu AbstractCacheEntry *e = m_dataCache_ptr->lookup(address); 26011059Snilay@cs.wisc.edu // The controller has lost the coherence permissions, hence the lock 26111059Snilay@cs.wisc.edu // on the cache line maintained by the cache should be cleared. 26211059Snilay@cs.wisc.edu if (e && e->isLocked(m_version)) { 26311059Snilay@cs.wisc.edu e->clearLocked(); 2649563Sgope@wisc.edu } 2659563Sgope@wisc.edu} 2669563Sgope@wisc.edu 2677560SBrad.Beckmann@amd.combool 26811025Snilay@cs.wisc.eduSequencer::handleLlsc(Addr address, SequencerRequest* request) 2697550SBrad.Beckmann@amd.com{ 27011059Snilay@cs.wisc.edu AbstractCacheEntry *e = m_dataCache_ptr->lookup(address); 27111059Snilay@cs.wisc.edu if (!e) 27211059Snilay@cs.wisc.edu return true; 27311059Snilay@cs.wisc.edu 2747560SBrad.Beckmann@amd.com // The success flag indicates whether the LLSC operation was successful. 2757560SBrad.Beckmann@amd.com // LL ops will always succeed, but SC may fail if the cache line is no 2767560SBrad.Beckmann@amd.com // longer locked. 2777560SBrad.Beckmann@amd.com bool success = true; 2788615Snilay@cs.wisc.edu if (request->m_type == RubyRequestType_Store_Conditional) { 27911059Snilay@cs.wisc.edu if (!e->isLocked(m_version)) { 2807550SBrad.Beckmann@amd.com // 2817550SBrad.Beckmann@amd.com // For failed SC requests, indicate the failure to the cpu by 2827550SBrad.Beckmann@amd.com // setting the extra data to zero. 2837550SBrad.Beckmann@amd.com // 2848615Snilay@cs.wisc.edu request->pkt->req->setExtraData(0); 2857560SBrad.Beckmann@amd.com success = false; 2867550SBrad.Beckmann@amd.com } else { 2877550SBrad.Beckmann@amd.com // 2887550SBrad.Beckmann@amd.com // For successful SC requests, indicate the success to the cpu by 28910917Sbrandon.potter@amd.com // setting the extra data to one. 2907550SBrad.Beckmann@amd.com // 2918615Snilay@cs.wisc.edu request->pkt->req->setExtraData(1); 2927550SBrad.Beckmann@amd.com } 2937560SBrad.Beckmann@amd.com // 2947560SBrad.Beckmann@amd.com // Independent of success, all SC operations must clear the lock 2957560SBrad.Beckmann@amd.com // 29611059Snilay@cs.wisc.edu e->clearLocked(); 2978615Snilay@cs.wisc.edu } else if (request->m_type == RubyRequestType_Load_Linked) { 2987550SBrad.Beckmann@amd.com // 2997550SBrad.Beckmann@amd.com // Note: To fully follow Alpha LLSC semantics, should the LL clear any 3007550SBrad.Beckmann@amd.com // previously locked cache lines? 3017550SBrad.Beckmann@amd.com // 30211059Snilay@cs.wisc.edu e->setLocked(m_version); 30311059Snilay@cs.wisc.edu } else if (e->isLocked(m_version)) { 3047550SBrad.Beckmann@amd.com // 3057550SBrad.Beckmann@amd.com // Normal writes should clear the locked address 3067550SBrad.Beckmann@amd.com // 30711059Snilay@cs.wisc.edu e->clearLocked(); 3087550SBrad.Beckmann@amd.com } 3097560SBrad.Beckmann@amd.com return success; 3107550SBrad.Beckmann@amd.com} 3117550SBrad.Beckmann@amd.com 3127550SBrad.Beckmann@amd.comvoid 3139773Snilay@cs.wisc.eduSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type, 3149773Snilay@cs.wisc.edu const MachineType respondingMach, 3159773Snilay@cs.wisc.edu bool isExternalHit, Cycles issuedTime, 3169773Snilay@cs.wisc.edu Cycles initialRequestTime, 3179773Snilay@cs.wisc.edu Cycles forwardRequestTime, 3189773Snilay@cs.wisc.edu Cycles firstResponseTime, Cycles completionTime) 3197039Snate@binkert.org{ 32010012Snilay@cs.wisc.edu m_latencyHist.sample(cycles); 32110012Snilay@cs.wisc.edu m_typeLatencyHist[type]->sample(cycles); 3229773Snilay@cs.wisc.edu 3239773Snilay@cs.wisc.edu if (isExternalHit) { 32410012Snilay@cs.wisc.edu m_missLatencyHist.sample(cycles); 32510012Snilay@cs.wisc.edu m_missTypeLatencyHist[type]->sample(cycles); 3269773Snilay@cs.wisc.edu 3279773Snilay@cs.wisc.edu if (respondingMach != MachineType_NUM) { 32810012Snilay@cs.wisc.edu m_missMachLatencyHist[respondingMach]->sample(cycles); 32910012Snilay@cs.wisc.edu m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles); 3309773Snilay@cs.wisc.edu 3319773Snilay@cs.wisc.edu if ((issuedTime <= initialRequestTime) && 3329773Snilay@cs.wisc.edu (initialRequestTime <= forwardRequestTime) && 3339773Snilay@cs.wisc.edu (forwardRequestTime <= firstResponseTime) && 3349773Snilay@cs.wisc.edu (firstResponseTime <= completionTime)) { 3359773Snilay@cs.wisc.edu 33610012Snilay@cs.wisc.edu m_IssueToInitialDelayHist[respondingMach]->sample( 3379773Snilay@cs.wisc.edu initialRequestTime - issuedTime); 33810012Snilay@cs.wisc.edu m_InitialToForwardDelayHist[respondingMach]->sample( 3399773Snilay@cs.wisc.edu forwardRequestTime - initialRequestTime); 34010012Snilay@cs.wisc.edu m_ForwardToFirstResponseDelayHist[respondingMach]->sample( 3419773Snilay@cs.wisc.edu firstResponseTime - forwardRequestTime); 34210012Snilay@cs.wisc.edu m_FirstResponseToCompletionDelayHist[respondingMach]->sample( 3439773Snilay@cs.wisc.edu completionTime - firstResponseTime); 3449773Snilay@cs.wisc.edu } else { 3459773Snilay@cs.wisc.edu m_IncompleteTimes[respondingMach]++; 3469773Snilay@cs.wisc.edu } 3479773Snilay@cs.wisc.edu } 3489773Snilay@cs.wisc.edu } else { 34910012Snilay@cs.wisc.edu m_hitLatencyHist.sample(cycles); 35010012Snilay@cs.wisc.edu m_hitTypeLatencyHist[type]->sample(cycles); 3519773Snilay@cs.wisc.edu 3529773Snilay@cs.wisc.edu if (respondingMach != MachineType_NUM) { 35310012Snilay@cs.wisc.edu m_hitMachLatencyHist[respondingMach]->sample(cycles); 35410012Snilay@cs.wisc.edu m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles); 3559773Snilay@cs.wisc.edu } 3569773Snilay@cs.wisc.edu } 3577546SBrad.Beckmann@amd.com} 3587546SBrad.Beckmann@amd.com 3597546SBrad.Beckmann@amd.comvoid 36011025Snilay@cs.wisc.eduSequencer::writeCallback(Addr address, DataBlock& data, 3619773Snilay@cs.wisc.edu const bool externalHit, const MachineType mach, 3629773Snilay@cs.wisc.edu const Cycles initialRequestTime, 3639773Snilay@cs.wisc.edu const Cycles forwardRequestTime, 3649773Snilay@cs.wisc.edu const Cycles firstResponseTime) 3657565SBrad.Beckmann@amd.com{ 36611025Snilay@cs.wisc.edu assert(address == makeLineAddress(address)); 36711025Snilay@cs.wisc.edu assert(m_writeRequestTable.count(makeLineAddress(address))); 3686145Snate@binkert.org 3697455Snate@binkert.org RequestTable::iterator i = m_writeRequestTable.find(address); 3707455Snate@binkert.org assert(i != m_writeRequestTable.end()); 3717455Snate@binkert.org SequencerRequest* request = i->second; 3726145Snate@binkert.org 3737455Snate@binkert.org m_writeRequestTable.erase(i); 3747455Snate@binkert.org markRemoved(); 3756846Spdudnik@cs.wisc.edu 3768615Snilay@cs.wisc.edu assert((request->m_type == RubyRequestType_ST) || 3778615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_ATOMIC) || 3788615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_RMW_Read) || 3798615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_RMW_Write) || 3808615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Load_Linked) || 3818615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Store_Conditional) || 3828615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Locked_RMW_Read) || 3838615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Locked_RMW_Write) || 3848615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_FLUSH)); 3858184Ssomayeh@cs.wisc.edu 3867550SBrad.Beckmann@amd.com // 3877550SBrad.Beckmann@amd.com // For Alpha, properly handle LL, SC, and write requests with respect to 3887550SBrad.Beckmann@amd.com // locked cache blocks. 3897550SBrad.Beckmann@amd.com // 39011660Stushar@ece.gatech.edu // Not valid for Garnet_standalone protocl 3918171Stushar@csail.mit.edu // 3928171Stushar@csail.mit.edu bool success = true; 39311660Stushar@ece.gatech.edu if (!m_runningGarnetStandalone) 3948171Stushar@csail.mit.edu success = handleLlsc(address, request); 3957550SBrad.Beckmann@amd.com 39611448Sjthestness@gmail.com // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the 39711448Sjthestness@gmail.com // address variable here is assumed to be a line address, so when 39811448Sjthestness@gmail.com // blocking buffers, must check line addresses. 3998615Snilay@cs.wisc.edu if (request->m_type == RubyRequestType_Locked_RMW_Read) { 40011448Sjthestness@gmail.com // blockOnQueue blocks all first-level cache controller queues 40111448Sjthestness@gmail.com // waiting on memory accesses for the specified address that go to 40211448Sjthestness@gmail.com // the specified queue. In this case, a Locked_RMW_Write must go to 40311448Sjthestness@gmail.com // the mandatory_q before unblocking the first-level controller. 40411448Sjthestness@gmail.com // This will block standard loads, stores, ifetches, etc. 4057039Snate@binkert.org m_controller->blockOnQueue(address, m_mandatory_q_ptr); 4068615Snilay@cs.wisc.edu } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 4077039Snate@binkert.org m_controller->unblock(address); 4087039Snate@binkert.org } 4096863Sdrh5@cs.wisc.edu 4109773Snilay@cs.wisc.edu hitCallback(request, data, success, mach, externalHit, 4117565SBrad.Beckmann@amd.com initialRequestTime, forwardRequestTime, firstResponseTime); 4126145Snate@binkert.org} 4136145Snate@binkert.org 4147039Snate@binkert.orgvoid 41511025Snilay@cs.wisc.eduSequencer::readCallback(Addr address, DataBlock& data, 4169773Snilay@cs.wisc.edu bool externalHit, const MachineType mach, 4179507Snilay@cs.wisc.edu Cycles initialRequestTime, 4189507Snilay@cs.wisc.edu Cycles forwardRequestTime, 4199507Snilay@cs.wisc.edu Cycles firstResponseTime) 4207565SBrad.Beckmann@amd.com{ 42111025Snilay@cs.wisc.edu assert(address == makeLineAddress(address)); 42211025Snilay@cs.wisc.edu assert(m_readRequestTable.count(makeLineAddress(address))); 4236145Snate@binkert.org 4247455Snate@binkert.org RequestTable::iterator i = m_readRequestTable.find(address); 4257455Snate@binkert.org assert(i != m_readRequestTable.end()); 4267455Snate@binkert.org SequencerRequest* request = i->second; 4277455Snate@binkert.org 4287455Snate@binkert.org m_readRequestTable.erase(i); 4297455Snate@binkert.org markRemoved(); 4306145Snate@binkert.org 4318615Snilay@cs.wisc.edu assert((request->m_type == RubyRequestType_LD) || 4328615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_IFETCH)); 4336285Snate@binkert.org 4349773Snilay@cs.wisc.edu hitCallback(request, data, true, mach, externalHit, 4357565SBrad.Beckmann@amd.com initialRequestTime, forwardRequestTime, firstResponseTime); 4366145Snate@binkert.org} 4376145Snate@binkert.org 4387039Snate@binkert.orgvoid 4399773Snilay@cs.wisc.eduSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data, 4409773Snilay@cs.wisc.edu bool llscSuccess, 4419773Snilay@cs.wisc.edu const MachineType mach, const bool externalHit, 4429773Snilay@cs.wisc.edu const Cycles initialRequestTime, 4439773Snilay@cs.wisc.edu const Cycles forwardRequestTime, 4449773Snilay@cs.wisc.edu const Cycles firstResponseTime) 4457039Snate@binkert.org{ 44611087Snilay@cs.wisc.edu warn_once("Replacement policy updates recently became the responsibility " 44711087Snilay@cs.wisc.edu "of SLICC state machines. Make sure to setMRU() near callbacks " 44811087Snilay@cs.wisc.edu "in .sm files!"); 44911087Snilay@cs.wisc.edu 4508615Snilay@cs.wisc.edu PacketPtr pkt = srequest->pkt; 45111025Snilay@cs.wisc.edu Addr request_address(pkt->getAddr()); 4528615Snilay@cs.wisc.edu RubyRequestType type = srequest->m_type; 4539507Snilay@cs.wisc.edu Cycles issued_time = srequest->issue_time; 4546145Snate@binkert.org 4559465Snilay@cs.wisc.edu assert(curCycle() >= issued_time); 4569773Snilay@cs.wisc.edu Cycles total_latency = curCycle() - issued_time; 4576145Snate@binkert.org 4589773Snilay@cs.wisc.edu // Profile the latency for all demand accesses. 4599773Snilay@cs.wisc.edu recordMissLatency(total_latency, type, mach, externalHit, issued_time, 4609773Snilay@cs.wisc.edu initialRequestTime, forwardRequestTime, 4619773Snilay@cs.wisc.edu firstResponseTime, curCycle()); 4626285Snate@binkert.org 46311025Snilay@cs.wisc.edu DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n", 4649773Snilay@cs.wisc.edu curTick(), m_version, "Seq", 4659773Snilay@cs.wisc.edu llscSuccess ? "Done" : "SC_Failed", "", "", 46611118Snilay@cs.wisc.edu printAddress(request_address), total_latency); 4676285Snate@binkert.org 46810562Sandreas.hansson@arm.com // update the data unless it is a non-data-carrying flush 46910837Sjthestness@gmail.com if (RubySystem::getWarmupEnabled()) { 47010563Sandreas.hansson@arm.com data.setData(pkt->getConstPtr<uint8_t>(), 47111025Snilay@cs.wisc.edu getOffset(request_address), pkt->getSize()); 47210562Sandreas.hansson@arm.com } else if (!pkt->isFlush()) { 4737039Snate@binkert.org if ((type == RubyRequestType_LD) || 4747039Snate@binkert.org (type == RubyRequestType_IFETCH) || 4757039Snate@binkert.org (type == RubyRequestType_RMW_Read) || 4767908Shestness@cs.utexas.edu (type == RubyRequestType_Locked_RMW_Read) || 4777907Shestness@cs.utexas.edu (type == RubyRequestType_Load_Linked)) { 47810562Sandreas.hansson@arm.com memcpy(pkt->getPtr<uint8_t>(), 47911025Snilay@cs.wisc.edu data.getData(getOffset(request_address), pkt->getSize()), 4808615Snilay@cs.wisc.edu pkt->getSize()); 48110954SBrad.Beckmann@amd.com DPRINTF(RubySequencer, "read data %s\n", data); 48211519Smarco.elver@ed.ac.uk } else if (pkt->req->isSwap()) { 48311519Smarco.elver@ed.ac.uk std::vector<uint8_t> overwrite_val(pkt->getSize()); 48411519Smarco.elver@ed.ac.uk memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(), 48511519Smarco.elver@ed.ac.uk pkt->getSize()); 48611519Smarco.elver@ed.ac.uk memcpy(pkt->getPtr<uint8_t>(), 48711519Smarco.elver@ed.ac.uk data.getData(getOffset(request_address), pkt->getSize()), 48811519Smarco.elver@ed.ac.uk pkt->getSize()); 48911519Smarco.elver@ed.ac.uk data.setData(&overwrite_val[0], 49011519Smarco.elver@ed.ac.uk getOffset(request_address), pkt->getSize()); 49111519Smarco.elver@ed.ac.uk DPRINTF(RubySequencer, "swap data %s\n", data); 4927039Snate@binkert.org } else { 49310563Sandreas.hansson@arm.com data.setData(pkt->getConstPtr<uint8_t>(), 49411025Snilay@cs.wisc.edu getOffset(request_address), pkt->getSize()); 49510954SBrad.Beckmann@amd.com DPRINTF(RubySequencer, "set data %s\n", data); 4967039Snate@binkert.org } 4977039Snate@binkert.org } 4987023SBrad.Beckmann@amd.com 4997039Snate@binkert.org // If using the RubyTester, update the RubyTester sender state's 5007039Snate@binkert.org // subBlock with the recieved data. The tester will later access 5017039Snate@binkert.org // this state. 5027039Snate@binkert.org if (m_usingRubyTester) { 50310657Sandreas.hansson@arm.com DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n", 50410657Sandreas.hansson@arm.com pkt->cmdString(), pkt->getAddr()); 5057039Snate@binkert.org RubyTester::SenderState* testerSenderState = 50610089Sandreas.hansson@arm.com pkt->findNextSenderState<RubyTester::SenderState>(); 50710089Sandreas.hansson@arm.com assert(testerSenderState); 5089542Sandreas.hansson@arm.com testerSenderState->subBlock.mergeFrom(data); 5097039Snate@binkert.org } 5107023SBrad.Beckmann@amd.com 5117039Snate@binkert.org delete srequest; 5128688Snilay@cs.wisc.edu 51310919Sbrandon.potter@amd.com RubySystem *rs = m_ruby_system; 51410837Sjthestness@gmail.com if (RubySystem::getWarmupEnabled()) { 5159632Sjthestness@gmail.com assert(pkt->req); 5169632Sjthestness@gmail.com delete pkt->req; 5178688Snilay@cs.wisc.edu delete pkt; 51810919Sbrandon.potter@amd.com rs->m_cache_recorder->enqueueNextFetchRequest(); 51910837Sjthestness@gmail.com } else if (RubySystem::getCooldownEnabled()) { 5208688Snilay@cs.wisc.edu delete pkt; 52110919Sbrandon.potter@amd.com rs->m_cache_recorder->enqueueNextFlushRequest(); 5228688Snilay@cs.wisc.edu } else { 5238688Snilay@cs.wisc.edu ruby_hit_callback(pkt); 52411266SBrad.Beckmann@amd.com testDrainComplete(); 5258688Snilay@cs.wisc.edu } 5266285Snate@binkert.org} 5276285Snate@binkert.org 5287039Snate@binkert.orgbool 5297039Snate@binkert.orgSequencer::empty() const 5307039Snate@binkert.org{ 5317455Snate@binkert.org return m_writeRequestTable.empty() && m_readRequestTable.empty(); 5326145Snate@binkert.org} 5336145Snate@binkert.org 5347039Snate@binkert.orgRequestStatus 5358615Snilay@cs.wisc.eduSequencer::makeRequest(PacketPtr pkt) 5367039Snate@binkert.org{ 5378615Snilay@cs.wisc.edu if (m_outstanding_count >= m_max_outstanding_requests) { 5388615Snilay@cs.wisc.edu return RequestStatus_BufferFull; 5398615Snilay@cs.wisc.edu } 5408615Snilay@cs.wisc.edu 5418615Snilay@cs.wisc.edu RubyRequestType primary_type = RubyRequestType_NULL; 5428615Snilay@cs.wisc.edu RubyRequestType secondary_type = RubyRequestType_NULL; 5438615Snilay@cs.wisc.edu 5448615Snilay@cs.wisc.edu if (pkt->isLLSC()) { 5458615Snilay@cs.wisc.edu // 5468615Snilay@cs.wisc.edu // Alpha LL/SC instructions need to be handled carefully by the cache 5478615Snilay@cs.wisc.edu // coherence protocol to ensure they follow the proper semantics. In 5488615Snilay@cs.wisc.edu // particular, by identifying the operations as atomic, the protocol 5498615Snilay@cs.wisc.edu // should understand that migratory sharing optimizations should not 5508615Snilay@cs.wisc.edu // be performed (i.e. a load between the LL and SC should not steal 5518615Snilay@cs.wisc.edu // away exclusive permission). 5528615Snilay@cs.wisc.edu // 5538615Snilay@cs.wisc.edu if (pkt->isWrite()) { 5548615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing SC\n"); 5558615Snilay@cs.wisc.edu primary_type = RubyRequestType_Store_Conditional; 5568615Snilay@cs.wisc.edu } else { 5578615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing LL\n"); 5588615Snilay@cs.wisc.edu assert(pkt->isRead()); 5598615Snilay@cs.wisc.edu primary_type = RubyRequestType_Load_Linked; 5608615Snilay@cs.wisc.edu } 5618615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ATOMIC; 56210760Ssteve.reinhardt@amd.com } else if (pkt->req->isLockedRMW()) { 5638615Snilay@cs.wisc.edu // 5648615Snilay@cs.wisc.edu // x86 locked instructions are translated to store cache coherence 5658615Snilay@cs.wisc.edu // requests because these requests should always be treated as read 5668615Snilay@cs.wisc.edu // exclusive operations and should leverage any migratory sharing 5678615Snilay@cs.wisc.edu // optimization built into the protocol. 5688615Snilay@cs.wisc.edu // 5698615Snilay@cs.wisc.edu if (pkt->isWrite()) { 5708615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 5718615Snilay@cs.wisc.edu primary_type = RubyRequestType_Locked_RMW_Write; 5728615Snilay@cs.wisc.edu } else { 5738615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 5748615Snilay@cs.wisc.edu assert(pkt->isRead()); 5758615Snilay@cs.wisc.edu primary_type = RubyRequestType_Locked_RMW_Read; 5768615Snilay@cs.wisc.edu } 5778615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ST; 5788615Snilay@cs.wisc.edu } else { 57911519Smarco.elver@ed.ac.uk // 58011519Smarco.elver@ed.ac.uk // To support SwapReq, we need to check isWrite() first: a SwapReq 58111519Smarco.elver@ed.ac.uk // should always be treated like a write, but since a SwapReq implies 58211519Smarco.elver@ed.ac.uk // both isWrite() and isRead() are true, check isWrite() first here. 58311519Smarco.elver@ed.ac.uk // 58411519Smarco.elver@ed.ac.uk if (pkt->isWrite()) { 58511519Smarco.elver@ed.ac.uk // 58611519Smarco.elver@ed.ac.uk // Note: M5 packets do not differentiate ST from RMW_Write 58711519Smarco.elver@ed.ac.uk // 58811519Smarco.elver@ed.ac.uk primary_type = secondary_type = RubyRequestType_ST; 58911519Smarco.elver@ed.ac.uk } else if (pkt->isRead()) { 5908615Snilay@cs.wisc.edu if (pkt->req->isInstFetch()) { 5918615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_IFETCH; 5928615Snilay@cs.wisc.edu } else { 5938615Snilay@cs.wisc.edu bool storeCheck = false; 59410467Sandreas.hansson@arm.com // only X86 need the store check 59510467Sandreas.hansson@arm.com if (system->getArch() == Arch::X86ISA) { 59610467Sandreas.hansson@arm.com uint32_t flags = pkt->req->getFlags(); 59710467Sandreas.hansson@arm.com storeCheck = flags & 59810467Sandreas.hansson@arm.com (X86ISA::StoreCheck << X86ISA::FlagShift); 59910467Sandreas.hansson@arm.com } 6008615Snilay@cs.wisc.edu if (storeCheck) { 6018615Snilay@cs.wisc.edu primary_type = RubyRequestType_RMW_Read; 6028615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ST; 6038615Snilay@cs.wisc.edu } else { 6048615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_LD; 6058615Snilay@cs.wisc.edu } 6068615Snilay@cs.wisc.edu } 6078615Snilay@cs.wisc.edu } else if (pkt->isFlush()) { 6088615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_FLUSH; 6098615Snilay@cs.wisc.edu } else { 6108615Snilay@cs.wisc.edu panic("Unsupported ruby packet type\n"); 6118615Snilay@cs.wisc.edu } 6128615Snilay@cs.wisc.edu } 6138615Snilay@cs.wisc.edu 6148615Snilay@cs.wisc.edu RequestStatus status = insertRequest(pkt, primary_type); 6157039Snate@binkert.org if (status != RequestStatus_Ready) 6167039Snate@binkert.org return status; 6176349Spdudnik@gmail.com 6188615Snilay@cs.wisc.edu issueRequest(pkt, secondary_type); 6196145Snate@binkert.org 6207039Snate@binkert.org // TODO: issue hardware prefetches here 6217039Snate@binkert.org return RequestStatus_Issued; 6226145Snate@binkert.org} 6236145Snate@binkert.org 6247039Snate@binkert.orgvoid 6258615Snilay@cs.wisc.eduSequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 6267039Snate@binkert.org{ 6279216Sandreas.hansson@arm.com assert(pkt != NULL); 62811005Sandreas.sandberg@arm.com ContextID proc_id = pkt->req->hasContextId() ? 62911005Sandreas.sandberg@arm.com pkt->req->contextId() : InvalidContextID; 6306285Snate@binkert.org 63111308Santhony.gutierrez@amd.com ContextID core_id = coreId(); 63211308Santhony.gutierrez@amd.com 6338615Snilay@cs.wisc.edu // If valid, copy the pc to the ruby request 6348615Snilay@cs.wisc.edu Addr pc = 0; 6358615Snilay@cs.wisc.edu if (pkt->req->hasPC()) { 6368615Snilay@cs.wisc.edu pc = pkt->req->getPC(); 6377039Snate@binkert.org } 6386285Snate@binkert.org 63910562Sandreas.hansson@arm.com // check if the packet has data as for example prefetch and flush 64010562Sandreas.hansson@arm.com // requests do not 64110472Sandreas.hansson@arm.com std::shared_ptr<RubyRequest> msg = 64210472Sandreas.hansson@arm.com std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(), 64310562Sandreas.hansson@arm.com pkt->isFlush() ? 64410562Sandreas.hansson@arm.com nullptr : pkt->getPtr<uint8_t>(), 64510472Sandreas.hansson@arm.com pkt->getSize(), pc, secondary_type, 64610472Sandreas.hansson@arm.com RubyAccessMode_Supervisor, pkt, 64711308Santhony.gutierrez@amd.com PrefetchBit_No, proc_id, core_id); 6486285Snate@binkert.org 64911025Snilay@cs.wisc.edu DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n", 6508266Sksewell@umich.edu curTick(), m_version, "Seq", "Begin", "", "", 65111118Snilay@cs.wisc.edu printAddress(msg->getPhysicalAddress()), 6528615Snilay@cs.wisc.edu RubyRequestType_to_string(secondary_type)); 6536285Snate@binkert.org 65411019Sjthestness@gmail.com // The Sequencer currently assesses instruction and data cache hit latency 65511019Sjthestness@gmail.com // for the top-level caches at the beginning of a memory access. 65611019Sjthestness@gmail.com // TODO: Eventually, this latency should be moved to represent the actual 65711019Sjthestness@gmail.com // cache access latency portion of the memory access. This will require 65811019Sjthestness@gmail.com // changing cache controller protocol files to assess the latency on the 65911019Sjthestness@gmail.com // access response path. 66011019Sjthestness@gmail.com Cycles latency(0); // Initialize to zero to catch misconfigured latency 6618615Snilay@cs.wisc.edu if (secondary_type == RubyRequestType_IFETCH) 66211019Sjthestness@gmail.com latency = m_inst_cache_hit_latency; 6637039Snate@binkert.org else 66411019Sjthestness@gmail.com latency = m_data_cache_hit_latency; 6656285Snate@binkert.org 6667039Snate@binkert.org // Send the message to the cache controller 6677039Snate@binkert.org assert(latency > 0); 6686145Snate@binkert.org 6697039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 67011111Snilay@cs.wisc.edu m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency)); 6716145Snate@binkert.org} 6726145Snate@binkert.org 6737455Snate@binkert.orgtemplate <class KEY, class VALUE> 6747455Snate@binkert.orgstd::ostream & 67511168Sandreas.hansson@arm.comoperator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map) 6767455Snate@binkert.org{ 67711168Sandreas.hansson@arm.com auto i = map.begin(); 67811168Sandreas.hansson@arm.com auto end = map.end(); 6797455Snate@binkert.org 6807455Snate@binkert.org out << "["; 6817455Snate@binkert.org for (; i != end; ++i) 6827455Snate@binkert.org out << " " << i->first << "=" << i->second; 6837455Snate@binkert.org out << " ]"; 6847455Snate@binkert.org 6857455Snate@binkert.org return out; 6867455Snate@binkert.org} 6877455Snate@binkert.org 6887039Snate@binkert.orgvoid 6897039Snate@binkert.orgSequencer::print(ostream& out) const 6907039Snate@binkert.org{ 6917039Snate@binkert.org out << "[Sequencer: " << m_version 6927039Snate@binkert.org << ", outstanding requests: " << m_outstanding_count 6937039Snate@binkert.org << ", read request table: " << m_readRequestTable 6947039Snate@binkert.org << ", write request table: " << m_writeRequestTable 6957039Snate@binkert.org << "]"; 6967039Snate@binkert.org} 6977039Snate@binkert.org 6987039Snate@binkert.org// this can be called from setState whenever coherence permissions are 6997039Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the 7007039Snate@binkert.org// given block 7017039Snate@binkert.orgvoid 70211025Snilay@cs.wisc.eduSequencer::checkCoherence(Addr addr) 7037039Snate@binkert.org{ 7046145Snate@binkert.org#ifdef CHECK_COHERENCE 70510919Sbrandon.potter@amd.com m_ruby_system->checkGlobalCoherenceInvariant(addr); 7066145Snate@binkert.org#endif 7076145Snate@binkert.org} 7088717Snilay@cs.wisc.edu 7098717Snilay@cs.wisc.eduvoid 7109104Shestness@cs.utexas.eduSequencer::recordRequestType(SequencerRequestType requestType) { 7119104Shestness@cs.utexas.edu DPRINTF(RubyStats, "Recorded statistic: %s\n", 7129104Shestness@cs.utexas.edu SequencerRequestType_to_string(requestType)); 7139104Shestness@cs.utexas.edu} 7149104Shestness@cs.utexas.edu 7159104Shestness@cs.utexas.edu 7169104Shestness@cs.utexas.eduvoid 71711025Snilay@cs.wisc.eduSequencer::evictionCallback(Addr address) 7188717Snilay@cs.wisc.edu{ 7198717Snilay@cs.wisc.edu ruby_eviction_callback(address); 7208717Snilay@cs.wisc.edu} 72110012Snilay@cs.wisc.edu 72210012Snilay@cs.wisc.eduvoid 72310012Snilay@cs.wisc.eduSequencer::regStats() 72410012Snilay@cs.wisc.edu{ 72511523Sdavid.guillen@arm.com RubyPort::regStats(); 72611523Sdavid.guillen@arm.com 72710012Snilay@cs.wisc.edu m_store_waiting_on_load 72810012Snilay@cs.wisc.edu .name(name() + ".store_waiting_on_load") 72910012Snilay@cs.wisc.edu .desc("Number of times a store aliased with a pending load") 73010012Snilay@cs.wisc.edu .flags(Stats::nozero); 73110012Snilay@cs.wisc.edu m_store_waiting_on_store 73210012Snilay@cs.wisc.edu .name(name() + ".store_waiting_on_store") 73310012Snilay@cs.wisc.edu .desc("Number of times a store aliased with a pending store") 73410012Snilay@cs.wisc.edu .flags(Stats::nozero); 73510012Snilay@cs.wisc.edu m_load_waiting_on_load 73610012Snilay@cs.wisc.edu .name(name() + ".load_waiting_on_load") 73710012Snilay@cs.wisc.edu .desc("Number of times a load aliased with a pending load") 73810012Snilay@cs.wisc.edu .flags(Stats::nozero); 73910012Snilay@cs.wisc.edu m_load_waiting_on_store 74010012Snilay@cs.wisc.edu .name(name() + ".load_waiting_on_store") 74110012Snilay@cs.wisc.edu .desc("Number of times a load aliased with a pending store") 74210012Snilay@cs.wisc.edu .flags(Stats::nozero); 74310012Snilay@cs.wisc.edu 74410012Snilay@cs.wisc.edu // These statistical variables are not for display. 74510012Snilay@cs.wisc.edu // The profiler will collate these across different 74610012Snilay@cs.wisc.edu // sequencers and display those collated statistics. 74710012Snilay@cs.wisc.edu m_outstandReqHist.init(10); 74810012Snilay@cs.wisc.edu m_latencyHist.init(10); 74910012Snilay@cs.wisc.edu m_hitLatencyHist.init(10); 75010012Snilay@cs.wisc.edu m_missLatencyHist.init(10); 75110012Snilay@cs.wisc.edu 75210012Snilay@cs.wisc.edu for (int i = 0; i < RubyRequestType_NUM; i++) { 75310012Snilay@cs.wisc.edu m_typeLatencyHist.push_back(new Stats::Histogram()); 75410012Snilay@cs.wisc.edu m_typeLatencyHist[i]->init(10); 75510012Snilay@cs.wisc.edu 75610012Snilay@cs.wisc.edu m_hitTypeLatencyHist.push_back(new Stats::Histogram()); 75710012Snilay@cs.wisc.edu m_hitTypeLatencyHist[i]->init(10); 75810012Snilay@cs.wisc.edu 75910012Snilay@cs.wisc.edu m_missTypeLatencyHist.push_back(new Stats::Histogram()); 76010012Snilay@cs.wisc.edu m_missTypeLatencyHist[i]->init(10); 76110012Snilay@cs.wisc.edu } 76210012Snilay@cs.wisc.edu 76310012Snilay@cs.wisc.edu for (int i = 0; i < MachineType_NUM; i++) { 76410012Snilay@cs.wisc.edu m_hitMachLatencyHist.push_back(new Stats::Histogram()); 76510012Snilay@cs.wisc.edu m_hitMachLatencyHist[i]->init(10); 76610012Snilay@cs.wisc.edu 76710012Snilay@cs.wisc.edu m_missMachLatencyHist.push_back(new Stats::Histogram()); 76810012Snilay@cs.wisc.edu m_missMachLatencyHist[i]->init(10); 76910012Snilay@cs.wisc.edu 77010012Snilay@cs.wisc.edu m_IssueToInitialDelayHist.push_back(new Stats::Histogram()); 77110012Snilay@cs.wisc.edu m_IssueToInitialDelayHist[i]->init(10); 77210012Snilay@cs.wisc.edu 77310012Snilay@cs.wisc.edu m_InitialToForwardDelayHist.push_back(new Stats::Histogram()); 77410012Snilay@cs.wisc.edu m_InitialToForwardDelayHist[i]->init(10); 77510012Snilay@cs.wisc.edu 77610012Snilay@cs.wisc.edu m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram()); 77710012Snilay@cs.wisc.edu m_ForwardToFirstResponseDelayHist[i]->init(10); 77810012Snilay@cs.wisc.edu 77910012Snilay@cs.wisc.edu m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram()); 78010012Snilay@cs.wisc.edu m_FirstResponseToCompletionDelayHist[i]->init(10); 78110012Snilay@cs.wisc.edu } 78210012Snilay@cs.wisc.edu 78310012Snilay@cs.wisc.edu for (int i = 0; i < RubyRequestType_NUM; i++) { 78410012Snilay@cs.wisc.edu m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>()); 78510012Snilay@cs.wisc.edu m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>()); 78610012Snilay@cs.wisc.edu 78710012Snilay@cs.wisc.edu for (int j = 0; j < MachineType_NUM; j++) { 78810012Snilay@cs.wisc.edu m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram()); 78910012Snilay@cs.wisc.edu m_hitTypeMachLatencyHist[i][j]->init(10); 79010012Snilay@cs.wisc.edu 79110012Snilay@cs.wisc.edu m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram()); 79210012Snilay@cs.wisc.edu m_missTypeMachLatencyHist[i][j]->init(10); 79310012Snilay@cs.wisc.edu } 79410012Snilay@cs.wisc.edu } 79510012Snilay@cs.wisc.edu} 796