Sequencer.cc revision 9542
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 36145Snate@binkert.org * All rights reserved. 46145Snate@binkert.org * 56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 66145Snate@binkert.org * modification, are permitted provided that the following conditions are 76145Snate@binkert.org * met: redistributions of source code must retain the above copyright 86145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 116145Snate@binkert.org * documentation and/or other materials provided with the distribution; 126145Snate@binkert.org * neither the name of the copyright holders nor the names of its 136145Snate@binkert.org * contributors may be used to endorse or promote products derived from 146145Snate@binkert.org * this software without specific prior written permission. 156145Snate@binkert.org * 166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276145Snate@binkert.org */ 286145Snate@binkert.org 298229Snate@binkert.org#include "base/misc.hh" 307056Snate@binkert.org#include "base/str.hh" 318615Snilay@cs.wisc.edu#include "config/the_isa.hh" 328615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA 338615Snilay@cs.wisc.edu#include "arch/x86/insts/microldstop.hh" 348615Snilay@cs.wisc.edu#endif // X86_ISA 357632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh" 368232Snate@binkert.org#include "debug/MemoryAccess.hh" 378232Snate@binkert.org#include "debug/ProtocolTrace.hh" 388615Snilay@cs.wisc.edu#include "debug/RubySequencer.hh" 399104Shestness@cs.utexas.edu#include "debug/RubyStats.hh" 408615Snilay@cs.wisc.edu#include "mem/protocol/PrefetchBit.hh" 418615Snilay@cs.wisc.edu#include "mem/protocol/RubyAccessMode.hh" 427039Snate@binkert.org#include "mem/ruby/common/Global.hh" 437039Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh" 448229Snate@binkert.org#include "mem/ruby/slicc_interface/RubyRequest.hh" 456154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh" 466154Snate@binkert.org#include "mem/ruby/system/System.hh" 477550SBrad.Beckmann@amd.com#include "mem/packet.hh" 486876Ssteve.reinhardt@amd.com 497055Snate@binkert.orgusing namespace std; 507055Snate@binkert.org 516876Ssteve.reinhardt@amd.comSequencer * 526876Ssteve.reinhardt@amd.comRubySequencerParams::create() 536285Snate@binkert.org{ 546876Ssteve.reinhardt@amd.com return new Sequencer(this); 556285Snate@binkert.org} 567039Snate@binkert.org 576876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p) 586886SBrad.Beckmann@amd.com : RubyPort(p), deadlockCheckEvent(this) 596876Ssteve.reinhardt@amd.com{ 606876Ssteve.reinhardt@amd.com m_store_waiting_on_load_cycles = 0; 616876Ssteve.reinhardt@amd.com m_store_waiting_on_store_cycles = 0; 626876Ssteve.reinhardt@amd.com m_load_waiting_on_store_cycles = 0; 636876Ssteve.reinhardt@amd.com m_load_waiting_on_load_cycles = 0; 647039Snate@binkert.org 656876Ssteve.reinhardt@amd.com m_outstanding_count = 0; 666285Snate@binkert.org 676876Ssteve.reinhardt@amd.com m_instCache_ptr = p->icache; 686876Ssteve.reinhardt@amd.com m_dataCache_ptr = p->dcache; 696876Ssteve.reinhardt@amd.com m_max_outstanding_requests = p->max_outstanding_requests; 706876Ssteve.reinhardt@amd.com m_deadlock_threshold = p->deadlock_threshold; 716899SBrad.Beckmann@amd.com 726876Ssteve.reinhardt@amd.com assert(m_max_outstanding_requests > 0); 736876Ssteve.reinhardt@amd.com assert(m_deadlock_threshold > 0); 746876Ssteve.reinhardt@amd.com assert(m_instCache_ptr != NULL); 756876Ssteve.reinhardt@amd.com assert(m_dataCache_ptr != NULL); 768171Stushar@csail.mit.edu 778171Stushar@csail.mit.edu m_usingNetworkTester = p->using_network_tester; 786145Snate@binkert.org} 796145Snate@binkert.org 807039Snate@binkert.orgSequencer::~Sequencer() 817039Snate@binkert.org{ 826145Snate@binkert.org} 836145Snate@binkert.org 847039Snate@binkert.orgvoid 857039Snate@binkert.orgSequencer::wakeup() 867039Snate@binkert.org{ 879342SAndreas.Sandberg@arm.com assert(getDrainState() != Drainable::Draining); 889245Shestness@cs.wisc.edu 897039Snate@binkert.org // Check for deadlock of any of the requests 909501Snilay@cs.wisc.edu Cycles current_time = curCycle(); 916145Snate@binkert.org 927039Snate@binkert.org // Check across all outstanding requests 937039Snate@binkert.org int total_outstanding = 0; 946285Snate@binkert.org 957455Snate@binkert.org RequestTable::iterator read = m_readRequestTable.begin(); 967455Snate@binkert.org RequestTable::iterator read_end = m_readRequestTable.end(); 977455Snate@binkert.org for (; read != read_end; ++read) { 987455Snate@binkert.org SequencerRequest* request = read->second; 997455Snate@binkert.org if (current_time - request->issue_time < m_deadlock_threshold) 1007455Snate@binkert.org continue; 1017455Snate@binkert.org 1027805Snilay@cs.wisc.edu panic("Possible Deadlock detected. Aborting!\n" 1037921SBrad.Beckmann@amd.com "version: %d request.paddr: 0x%x m_readRequestTable: %d " 1047805Snilay@cs.wisc.edu "current time: %u issue_time: %d difference: %d\n", m_version, 1058615Snilay@cs.wisc.edu Address(request->pkt->getAddr()), m_readRequestTable.size(), 1069467Smalek.musleh@gmail.com current_time * clockPeriod(), request->issue_time * clockPeriod(), 1079467Smalek.musleh@gmail.com (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 1086145Snate@binkert.org } 1096145Snate@binkert.org 1107455Snate@binkert.org RequestTable::iterator write = m_writeRequestTable.begin(); 1117455Snate@binkert.org RequestTable::iterator write_end = m_writeRequestTable.end(); 1127455Snate@binkert.org for (; write != write_end; ++write) { 1137455Snate@binkert.org SequencerRequest* request = write->second; 1147455Snate@binkert.org if (current_time - request->issue_time < m_deadlock_threshold) 1157455Snate@binkert.org continue; 1167455Snate@binkert.org 1177805Snilay@cs.wisc.edu panic("Possible Deadlock detected. Aborting!\n" 1187921SBrad.Beckmann@amd.com "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 1197805Snilay@cs.wisc.edu "current time: %u issue_time: %d difference: %d\n", m_version, 1208615Snilay@cs.wisc.edu Address(request->pkt->getAddr()), m_writeRequestTable.size(), 1219467Smalek.musleh@gmail.com current_time * clockPeriod(), request->issue_time * clockPeriod(), 1229467Smalek.musleh@gmail.com (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 1236145Snate@binkert.org } 1246285Snate@binkert.org 1257039Snate@binkert.org total_outstanding += m_writeRequestTable.size(); 1267039Snate@binkert.org total_outstanding += m_readRequestTable.size(); 1276145Snate@binkert.org 1287039Snate@binkert.org assert(m_outstanding_count == total_outstanding); 1297039Snate@binkert.org 1307039Snate@binkert.org if (m_outstanding_count > 0) { 1317039Snate@binkert.org // If there are still outstanding requests, keep checking 1329465Snilay@cs.wisc.edu schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 1337039Snate@binkert.org } 1346145Snate@binkert.org} 1356145Snate@binkert.org 1367039Snate@binkert.orgvoid 1377039Snate@binkert.orgSequencer::printStats(ostream & out) const 1387039Snate@binkert.org{ 1397039Snate@binkert.org out << "Sequencer: " << m_name << endl 1407039Snate@binkert.org << " store_waiting_on_load_cycles: " 1417039Snate@binkert.org << m_store_waiting_on_load_cycles << endl 1427039Snate@binkert.org << " store_waiting_on_store_cycles: " 1437039Snate@binkert.org << m_store_waiting_on_store_cycles << endl 1447039Snate@binkert.org << " load_waiting_on_load_cycles: " 1457039Snate@binkert.org << m_load_waiting_on_load_cycles << endl 1467039Snate@binkert.org << " load_waiting_on_store_cycles: " 1477039Snate@binkert.org << m_load_waiting_on_store_cycles << endl; 1486859Sdrh5@cs.wisc.edu} 1496859Sdrh5@cs.wisc.edu 1507039Snate@binkert.orgvoid 1517039Snate@binkert.orgSequencer::printProgress(ostream& out) const 1527039Snate@binkert.org{ 1537039Snate@binkert.org#if 0 1547039Snate@binkert.org int total_demand = 0; 1557039Snate@binkert.org out << "Sequencer Stats Version " << m_version << endl; 1569171Snilay@cs.wisc.edu out << "Current time = " << g_system_ptr->getTime() << endl; 1577039Snate@binkert.org out << "---------------" << endl; 1587039Snate@binkert.org out << "outstanding requests" << endl; 1596145Snate@binkert.org 1607455Snate@binkert.org out << "proc " << m_Read 1617455Snate@binkert.org << " version Requests = " << m_readRequestTable.size() << endl; 1626145Snate@binkert.org 1637039Snate@binkert.org // print the request table 1647455Snate@binkert.org RequestTable::iterator read = m_readRequestTable.begin(); 1657455Snate@binkert.org RequestTable::iterator read_end = m_readRequestTable.end(); 1667455Snate@binkert.org for (; read != read_end; ++read) { 1677455Snate@binkert.org SequencerRequest* request = read->second; 1687039Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request->type 1697039Snate@binkert.org << " Address " << rkeys[i] 1707039Snate@binkert.org << " Posted " << request->issue_time 1717039Snate@binkert.org << " PF " << PrefetchBit_No << endl; 1726145Snate@binkert.org total_demand++; 1737039Snate@binkert.org } 1746145Snate@binkert.org 1757455Snate@binkert.org out << "proc " << m_version 1767455Snate@binkert.org << " Write Requests = " << m_writeRequestTable.size << endl; 1776285Snate@binkert.org 1787039Snate@binkert.org // print the request table 1797455Snate@binkert.org RequestTable::iterator write = m_writeRequestTable.begin(); 1807455Snate@binkert.org RequestTable::iterator write_end = m_writeRequestTable.end(); 1817455Snate@binkert.org for (; write != write_end; ++write) { 1827455Snate@binkert.org SequencerRequest* request = write->second; 1837039Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request.getType() 1847039Snate@binkert.org << " Address " << wkeys[i] 1857039Snate@binkert.org << " Posted " << request.getTime() 1867039Snate@binkert.org << " PF " << request.getPrefetch() << endl; 1877039Snate@binkert.org if (request.getPrefetch() == PrefetchBit_No) { 1887039Snate@binkert.org total_demand++; 1897039Snate@binkert.org } 1907039Snate@binkert.org } 1917039Snate@binkert.org 1927039Snate@binkert.org out << endl; 1937039Snate@binkert.org 1947039Snate@binkert.org out << "Total Number Outstanding: " << m_outstanding_count << endl 1957039Snate@binkert.org << "Total Number Demand : " << total_demand << endl 1967039Snate@binkert.org << "Total Number Prefetches : " << m_outstanding_count - total_demand 1977039Snate@binkert.org << endl << endl << endl; 1987039Snate@binkert.org#endif 1996145Snate@binkert.org} 2006145Snate@binkert.org 2016145Snate@binkert.org// Insert the request on the correct request table. Return true if 2026145Snate@binkert.org// the entry was already present. 2038615Snilay@cs.wisc.eduRequestStatus 2048615Snilay@cs.wisc.eduSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 2057039Snate@binkert.org{ 2068641Snate@binkert.org assert(m_outstanding_count == 2078641Snate@binkert.org (m_writeRequestTable.size() + m_readRequestTable.size())); 2086145Snate@binkert.org 2097039Snate@binkert.org // See if we should schedule a deadlock check 2109342SAndreas.Sandberg@arm.com if (!deadlockCheckEvent.scheduled() && 2119342SAndreas.Sandberg@arm.com getDrainState() != Drainable::Draining) { 2129465Snilay@cs.wisc.edu schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 2137039Snate@binkert.org } 2146145Snate@binkert.org 2158615Snilay@cs.wisc.edu Address line_addr(pkt->getAddr()); 2167039Snate@binkert.org line_addr.makeLineAddress(); 2179224Sandreas.hansson@arm.com // Create a default entry, mapping the address to NULL, the cast is 2189224Sandreas.hansson@arm.com // there to make gcc 4.4 happy 2199224Sandreas.hansson@arm.com RequestTable::value_type default_entry(line_addr, 2209224Sandreas.hansson@arm.com (SequencerRequest*) NULL); 2219224Sandreas.hansson@arm.com 2228615Snilay@cs.wisc.edu if ((request_type == RubyRequestType_ST) || 2238615Snilay@cs.wisc.edu (request_type == RubyRequestType_RMW_Read) || 2248615Snilay@cs.wisc.edu (request_type == RubyRequestType_RMW_Write) || 2258615Snilay@cs.wisc.edu (request_type == RubyRequestType_Load_Linked) || 2268615Snilay@cs.wisc.edu (request_type == RubyRequestType_Store_Conditional) || 2278615Snilay@cs.wisc.edu (request_type == RubyRequestType_Locked_RMW_Read) || 2288615Snilay@cs.wisc.edu (request_type == RubyRequestType_Locked_RMW_Write) || 2298615Snilay@cs.wisc.edu (request_type == RubyRequestType_FLUSH)) { 2308615Snilay@cs.wisc.edu 2318615Snilay@cs.wisc.edu // Check if there is any outstanding read request for the same 2328615Snilay@cs.wisc.edu // cache line. 2338615Snilay@cs.wisc.edu if (m_readRequestTable.count(line_addr) > 0) { 2348615Snilay@cs.wisc.edu m_store_waiting_on_load_cycles++; 2358615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2368615Snilay@cs.wisc.edu } 2378615Snilay@cs.wisc.edu 2387455Snate@binkert.org pair<RequestTable::iterator, bool> r = 2399224Sandreas.hansson@arm.com m_writeRequestTable.insert(default_entry); 2408615Snilay@cs.wisc.edu if (r.second) { 2418615Snilay@cs.wisc.edu RequestTable::iterator i = r.first; 2429465Snilay@cs.wisc.edu i->second = new SequencerRequest(pkt, request_type, curCycle()); 2438615Snilay@cs.wisc.edu m_outstanding_count++; 2448615Snilay@cs.wisc.edu } else { 2458615Snilay@cs.wisc.edu // There is an outstanding write request for the cache line 2468615Snilay@cs.wisc.edu m_store_waiting_on_store_cycles++; 2478615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2488615Snilay@cs.wisc.edu } 2498615Snilay@cs.wisc.edu } else { 2508615Snilay@cs.wisc.edu // Check if there is any outstanding write request for the same 2518615Snilay@cs.wisc.edu // cache line. 2528615Snilay@cs.wisc.edu if (m_writeRequestTable.count(line_addr) > 0) { 2538615Snilay@cs.wisc.edu m_load_waiting_on_store_cycles++; 2548615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2558615Snilay@cs.wisc.edu } 2567039Snate@binkert.org 2577455Snate@binkert.org pair<RequestTable::iterator, bool> r = 2589224Sandreas.hansson@arm.com m_readRequestTable.insert(default_entry); 2597039Snate@binkert.org 2608615Snilay@cs.wisc.edu if (r.second) { 2618615Snilay@cs.wisc.edu RequestTable::iterator i = r.first; 2629465Snilay@cs.wisc.edu i->second = new SequencerRequest(pkt, request_type, curCycle()); 2638615Snilay@cs.wisc.edu m_outstanding_count++; 2648615Snilay@cs.wisc.edu } else { 2658615Snilay@cs.wisc.edu // There is an outstanding read request for the cache line 2668615Snilay@cs.wisc.edu m_load_waiting_on_load_cycles++; 2678615Snilay@cs.wisc.edu return RequestStatus_Aliased; 2687039Snate@binkert.org } 2696145Snate@binkert.org } 2706145Snate@binkert.org 2717039Snate@binkert.org g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 2728641Snate@binkert.org assert(m_outstanding_count == 2738641Snate@binkert.org (m_writeRequestTable.size() + m_readRequestTable.size())); 2746145Snate@binkert.org 2758615Snilay@cs.wisc.edu return RequestStatus_Ready; 2766145Snate@binkert.org} 2776145Snate@binkert.org 2787039Snate@binkert.orgvoid 2797455Snate@binkert.orgSequencer::markRemoved() 2807455Snate@binkert.org{ 2817455Snate@binkert.org m_outstanding_count--; 2827455Snate@binkert.org assert(m_outstanding_count == 2837455Snate@binkert.org m_writeRequestTable.size() + m_readRequestTable.size()); 2847455Snate@binkert.org} 2857455Snate@binkert.org 2867455Snate@binkert.orgvoid 2877039Snate@binkert.orgSequencer::removeRequest(SequencerRequest* srequest) 2887039Snate@binkert.org{ 2897039Snate@binkert.org assert(m_outstanding_count == 2907039Snate@binkert.org m_writeRequestTable.size() + m_readRequestTable.size()); 2916145Snate@binkert.org 2928615Snilay@cs.wisc.edu Address line_addr(srequest->pkt->getAddr()); 2937039Snate@binkert.org line_addr.makeLineAddress(); 2948615Snilay@cs.wisc.edu if ((srequest->m_type == RubyRequestType_ST) || 2958615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_RMW_Read) || 2968615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_RMW_Write) || 2978615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_Load_Linked) || 2988615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_Store_Conditional) || 2998615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_Locked_RMW_Read) || 3008615Snilay@cs.wisc.edu (srequest->m_type == RubyRequestType_Locked_RMW_Write)) { 3017455Snate@binkert.org m_writeRequestTable.erase(line_addr); 3027039Snate@binkert.org } else { 3037455Snate@binkert.org m_readRequestTable.erase(line_addr); 3047039Snate@binkert.org } 3056285Snate@binkert.org 3067455Snate@binkert.org markRemoved(); 3076145Snate@binkert.org} 3086145Snate@binkert.org 3097560SBrad.Beckmann@amd.combool 3107560SBrad.Beckmann@amd.comSequencer::handleLlsc(const Address& address, SequencerRequest* request) 3117550SBrad.Beckmann@amd.com{ 3127560SBrad.Beckmann@amd.com // 3137560SBrad.Beckmann@amd.com // The success flag indicates whether the LLSC operation was successful. 3147560SBrad.Beckmann@amd.com // LL ops will always succeed, but SC may fail if the cache line is no 3157560SBrad.Beckmann@amd.com // longer locked. 3167560SBrad.Beckmann@amd.com // 3177560SBrad.Beckmann@amd.com bool success = true; 3188615Snilay@cs.wisc.edu if (request->m_type == RubyRequestType_Store_Conditional) { 3197550SBrad.Beckmann@amd.com if (!m_dataCache_ptr->isLocked(address, m_version)) { 3207550SBrad.Beckmann@amd.com // 3217550SBrad.Beckmann@amd.com // For failed SC requests, indicate the failure to the cpu by 3227550SBrad.Beckmann@amd.com // setting the extra data to zero. 3237550SBrad.Beckmann@amd.com // 3248615Snilay@cs.wisc.edu request->pkt->req->setExtraData(0); 3257560SBrad.Beckmann@amd.com success = false; 3267550SBrad.Beckmann@amd.com } else { 3277550SBrad.Beckmann@amd.com // 3287550SBrad.Beckmann@amd.com // For successful SC requests, indicate the success to the cpu by 3297550SBrad.Beckmann@amd.com // setting the extra data to one. 3307550SBrad.Beckmann@amd.com // 3318615Snilay@cs.wisc.edu request->pkt->req->setExtraData(1); 3327550SBrad.Beckmann@amd.com } 3337560SBrad.Beckmann@amd.com // 3347560SBrad.Beckmann@amd.com // Independent of success, all SC operations must clear the lock 3357560SBrad.Beckmann@amd.com // 3367550SBrad.Beckmann@amd.com m_dataCache_ptr->clearLocked(address); 3378615Snilay@cs.wisc.edu } else if (request->m_type == RubyRequestType_Load_Linked) { 3387550SBrad.Beckmann@amd.com // 3397550SBrad.Beckmann@amd.com // Note: To fully follow Alpha LLSC semantics, should the LL clear any 3407550SBrad.Beckmann@amd.com // previously locked cache lines? 3417550SBrad.Beckmann@amd.com // 3427550SBrad.Beckmann@amd.com m_dataCache_ptr->setLocked(address, m_version); 3438615Snilay@cs.wisc.edu } else if ((m_dataCache_ptr->isTagPresent(address)) && 3448615Snilay@cs.wisc.edu (m_dataCache_ptr->isLocked(address, m_version))) { 3457550SBrad.Beckmann@amd.com // 3467550SBrad.Beckmann@amd.com // Normal writes should clear the locked address 3477550SBrad.Beckmann@amd.com // 3487550SBrad.Beckmann@amd.com m_dataCache_ptr->clearLocked(address); 3497550SBrad.Beckmann@amd.com } 3507560SBrad.Beckmann@amd.com return success; 3517550SBrad.Beckmann@amd.com} 3527550SBrad.Beckmann@amd.com 3537550SBrad.Beckmann@amd.comvoid 3547039Snate@binkert.orgSequencer::writeCallback(const Address& address, DataBlock& data) 3557039Snate@binkert.org{ 3567546SBrad.Beckmann@amd.com writeCallback(address, GenericMachineType_NULL, data); 3577546SBrad.Beckmann@amd.com} 3587546SBrad.Beckmann@amd.com 3597546SBrad.Beckmann@amd.comvoid 3607546SBrad.Beckmann@amd.comSequencer::writeCallback(const Address& address, 3619507Snilay@cs.wisc.edu GenericMachineType mach, 3627546SBrad.Beckmann@amd.com DataBlock& data) 3637546SBrad.Beckmann@amd.com{ 3649507Snilay@cs.wisc.edu writeCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0)); 3657565SBrad.Beckmann@amd.com} 3667565SBrad.Beckmann@amd.com 3677565SBrad.Beckmann@amd.comvoid 3687565SBrad.Beckmann@amd.comSequencer::writeCallback(const Address& address, 3699507Snilay@cs.wisc.edu GenericMachineType mach, 3707565SBrad.Beckmann@amd.com DataBlock& data, 3719507Snilay@cs.wisc.edu Cycles initialRequestTime, 3729507Snilay@cs.wisc.edu Cycles forwardRequestTime, 3739507Snilay@cs.wisc.edu Cycles firstResponseTime) 3747565SBrad.Beckmann@amd.com{ 3757039Snate@binkert.org assert(address == line_address(address)); 3767455Snate@binkert.org assert(m_writeRequestTable.count(line_address(address))); 3776145Snate@binkert.org 3787455Snate@binkert.org RequestTable::iterator i = m_writeRequestTable.find(address); 3797455Snate@binkert.org assert(i != m_writeRequestTable.end()); 3807455Snate@binkert.org SequencerRequest* request = i->second; 3816145Snate@binkert.org 3827455Snate@binkert.org m_writeRequestTable.erase(i); 3837455Snate@binkert.org markRemoved(); 3846846Spdudnik@cs.wisc.edu 3858615Snilay@cs.wisc.edu assert((request->m_type == RubyRequestType_ST) || 3868615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_ATOMIC) || 3878615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_RMW_Read) || 3888615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_RMW_Write) || 3898615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Load_Linked) || 3908615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Store_Conditional) || 3918615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Locked_RMW_Read) || 3928615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_Locked_RMW_Write) || 3938615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_FLUSH)); 3948184Ssomayeh@cs.wisc.edu 3956145Snate@binkert.org 3967550SBrad.Beckmann@amd.com // 3977550SBrad.Beckmann@amd.com // For Alpha, properly handle LL, SC, and write requests with respect to 3987550SBrad.Beckmann@amd.com // locked cache blocks. 3997550SBrad.Beckmann@amd.com // 4008171Stushar@csail.mit.edu // Not valid for Network_test protocl 4018171Stushar@csail.mit.edu // 4028171Stushar@csail.mit.edu bool success = true; 4038171Stushar@csail.mit.edu if(!m_usingNetworkTester) 4048171Stushar@csail.mit.edu success = handleLlsc(address, request); 4057550SBrad.Beckmann@amd.com 4068615Snilay@cs.wisc.edu if (request->m_type == RubyRequestType_Locked_RMW_Read) { 4077039Snate@binkert.org m_controller->blockOnQueue(address, m_mandatory_q_ptr); 4088615Snilay@cs.wisc.edu } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 4097039Snate@binkert.org m_controller->unblock(address); 4107039Snate@binkert.org } 4116863Sdrh5@cs.wisc.edu 4129507Snilay@cs.wisc.edu hitCallback(request, mach, data, success, 4137565SBrad.Beckmann@amd.com initialRequestTime, forwardRequestTime, firstResponseTime); 4146145Snate@binkert.org} 4156145Snate@binkert.org 4167039Snate@binkert.orgvoid 4177039Snate@binkert.orgSequencer::readCallback(const Address& address, DataBlock& data) 4187039Snate@binkert.org{ 4197546SBrad.Beckmann@amd.com readCallback(address, GenericMachineType_NULL, data); 4207546SBrad.Beckmann@amd.com} 4217546SBrad.Beckmann@amd.com 4227546SBrad.Beckmann@amd.comvoid 4237546SBrad.Beckmann@amd.comSequencer::readCallback(const Address& address, 4247546SBrad.Beckmann@amd.com GenericMachineType mach, 4257546SBrad.Beckmann@amd.com DataBlock& data) 4267546SBrad.Beckmann@amd.com{ 4279507Snilay@cs.wisc.edu readCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0)); 4287565SBrad.Beckmann@amd.com} 4297565SBrad.Beckmann@amd.com 4307565SBrad.Beckmann@amd.comvoid 4317565SBrad.Beckmann@amd.comSequencer::readCallback(const Address& address, 4327565SBrad.Beckmann@amd.com GenericMachineType mach, 4337565SBrad.Beckmann@amd.com DataBlock& data, 4349507Snilay@cs.wisc.edu Cycles initialRequestTime, 4359507Snilay@cs.wisc.edu Cycles forwardRequestTime, 4369507Snilay@cs.wisc.edu Cycles firstResponseTime) 4377565SBrad.Beckmann@amd.com{ 4387039Snate@binkert.org assert(address == line_address(address)); 4397455Snate@binkert.org assert(m_readRequestTable.count(line_address(address))); 4406145Snate@binkert.org 4417455Snate@binkert.org RequestTable::iterator i = m_readRequestTable.find(address); 4427455Snate@binkert.org assert(i != m_readRequestTable.end()); 4437455Snate@binkert.org SequencerRequest* request = i->second; 4447455Snate@binkert.org 4457455Snate@binkert.org m_readRequestTable.erase(i); 4467455Snate@binkert.org markRemoved(); 4476145Snate@binkert.org 4488615Snilay@cs.wisc.edu assert((request->m_type == RubyRequestType_LD) || 4498615Snilay@cs.wisc.edu (request->m_type == RubyRequestType_IFETCH)); 4506285Snate@binkert.org 4519507Snilay@cs.wisc.edu hitCallback(request, mach, data, true, 4527565SBrad.Beckmann@amd.com initialRequestTime, forwardRequestTime, firstResponseTime); 4536145Snate@binkert.org} 4546145Snate@binkert.org 4557039Snate@binkert.orgvoid 4567546SBrad.Beckmann@amd.comSequencer::hitCallback(SequencerRequest* srequest, 4577546SBrad.Beckmann@amd.com GenericMachineType mach, 4587560SBrad.Beckmann@amd.com DataBlock& data, 4597565SBrad.Beckmann@amd.com bool success, 4609507Snilay@cs.wisc.edu Cycles initialRequestTime, 4619507Snilay@cs.wisc.edu Cycles forwardRequestTime, 4629507Snilay@cs.wisc.edu Cycles firstResponseTime) 4637039Snate@binkert.org{ 4648615Snilay@cs.wisc.edu PacketPtr pkt = srequest->pkt; 4658615Snilay@cs.wisc.edu Address request_address(pkt->getAddr()); 4668615Snilay@cs.wisc.edu Address request_line_address(pkt->getAddr()); 4677039Snate@binkert.org request_line_address.makeLineAddress(); 4688615Snilay@cs.wisc.edu RubyRequestType type = srequest->m_type; 4699507Snilay@cs.wisc.edu Cycles issued_time = srequest->issue_time; 4706145Snate@binkert.org 4717039Snate@binkert.org // Set this cache entry to the most recently used 4727039Snate@binkert.org if (type == RubyRequestType_IFETCH) { 4738828Snilay@cs.wisc.edu m_instCache_ptr->setMRU(request_line_address); 4747039Snate@binkert.org } else { 4758828Snilay@cs.wisc.edu m_dataCache_ptr->setMRU(request_line_address); 4767039Snate@binkert.org } 4776145Snate@binkert.org 4789465Snilay@cs.wisc.edu assert(curCycle() >= issued_time); 4799507Snilay@cs.wisc.edu Cycles miss_latency = curCycle() - issued_time; 4806145Snate@binkert.org 4817039Snate@binkert.org // Profile the miss latency for all non-zero demand misses 4827039Snate@binkert.org if (miss_latency != 0) { 4837546SBrad.Beckmann@amd.com g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 4846285Snate@binkert.org 4857565SBrad.Beckmann@amd.com if (mach == GenericMachineType_L1Cache_wCC) { 4867565SBrad.Beckmann@amd.com g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 4879465Snilay@cs.wisc.edu initialRequestTime, forwardRequestTime, 4889465Snilay@cs.wisc.edu firstResponseTime, curCycle()); 4897565SBrad.Beckmann@amd.com } 4907565SBrad.Beckmann@amd.com 4917565SBrad.Beckmann@amd.com if (mach == GenericMachineType_Directory) { 4927565SBrad.Beckmann@amd.com g_system_ptr->getProfiler()->missLatencyDir(issued_time, 4939465Snilay@cs.wisc.edu initialRequestTime, forwardRequestTime, 4949465Snilay@cs.wisc.edu firstResponseTime, curCycle()); 4957565SBrad.Beckmann@amd.com } 4967565SBrad.Beckmann@amd.com 4978266Sksewell@umich.edu DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", 4988266Sksewell@umich.edu curTick(), m_version, "Seq", 4998266Sksewell@umich.edu success ? "Done" : "SC_Failed", "", "", 5008615Snilay@cs.wisc.edu request_address, miss_latency); 5016285Snate@binkert.org } 5026285Snate@binkert.org 5037039Snate@binkert.org // update the data 5048688Snilay@cs.wisc.edu if (g_system_ptr->m_warmup_enabled) { 5058688Snilay@cs.wisc.edu assert(pkt->getPtr<uint8_t>(false) != NULL); 5068688Snilay@cs.wisc.edu data.setData(pkt->getPtr<uint8_t>(false), 5078688Snilay@cs.wisc.edu request_address.getOffset(), pkt->getSize()); 5088688Snilay@cs.wisc.edu } else if (pkt->getPtr<uint8_t>(true) != NULL) { 5097039Snate@binkert.org if ((type == RubyRequestType_LD) || 5107039Snate@binkert.org (type == RubyRequestType_IFETCH) || 5117039Snate@binkert.org (type == RubyRequestType_RMW_Read) || 5127908Shestness@cs.utexas.edu (type == RubyRequestType_Locked_RMW_Read) || 5137907Shestness@cs.utexas.edu (type == RubyRequestType_Load_Linked)) { 5148615Snilay@cs.wisc.edu memcpy(pkt->getPtr<uint8_t>(true), 5158615Snilay@cs.wisc.edu data.getData(request_address.getOffset(), pkt->getSize()), 5168615Snilay@cs.wisc.edu pkt->getSize()); 5177039Snate@binkert.org } else { 5188615Snilay@cs.wisc.edu data.setData(pkt->getPtr<uint8_t>(true), 5198615Snilay@cs.wisc.edu request_address.getOffset(), pkt->getSize()); 5207039Snate@binkert.org } 5216285Snate@binkert.org } else { 5227039Snate@binkert.org DPRINTF(MemoryAccess, 5237039Snate@binkert.org "WARNING. Data not transfered from Ruby to M5 for type %s\n", 5247039Snate@binkert.org RubyRequestType_to_string(type)); 5257039Snate@binkert.org } 5267023SBrad.Beckmann@amd.com 5277039Snate@binkert.org // If using the RubyTester, update the RubyTester sender state's 5287039Snate@binkert.org // subBlock with the recieved data. The tester will later access 5297039Snate@binkert.org // this state. 5307039Snate@binkert.org // Note: RubyPort will access it's sender state before the 5317039Snate@binkert.org // RubyTester. 5327039Snate@binkert.org if (m_usingRubyTester) { 5339542Sandreas.hansson@arm.com RubyPort::SenderState *reqSenderState = 5348615Snilay@cs.wisc.edu safe_cast<RubyPort::SenderState*>(pkt->senderState); 5359542Sandreas.hansson@arm.com // @todo This is a dangerous assumption on nothing else 5369542Sandreas.hansson@arm.com // modifying the senderState 5377039Snate@binkert.org RubyTester::SenderState* testerSenderState = 5389542Sandreas.hansson@arm.com safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor); 5399542Sandreas.hansson@arm.com testerSenderState->subBlock.mergeFrom(data); 5407039Snate@binkert.org } 5417023SBrad.Beckmann@amd.com 5427039Snate@binkert.org delete srequest; 5438688Snilay@cs.wisc.edu 5448688Snilay@cs.wisc.edu if (g_system_ptr->m_warmup_enabled) { 5458688Snilay@cs.wisc.edu delete pkt; 5468688Snilay@cs.wisc.edu g_system_ptr->m_cache_recorder->enqueueNextFetchRequest(); 5478688Snilay@cs.wisc.edu } else if (g_system_ptr->m_cooldown_enabled) { 5488688Snilay@cs.wisc.edu delete pkt; 5498688Snilay@cs.wisc.edu g_system_ptr->m_cache_recorder->enqueueNextFlushRequest(); 5508688Snilay@cs.wisc.edu } else { 5518688Snilay@cs.wisc.edu ruby_hit_callback(pkt); 5528688Snilay@cs.wisc.edu } 5536285Snate@binkert.org} 5546285Snate@binkert.org 5557039Snate@binkert.orgbool 5567039Snate@binkert.orgSequencer::empty() const 5577039Snate@binkert.org{ 5587455Snate@binkert.org return m_writeRequestTable.empty() && m_readRequestTable.empty(); 5596145Snate@binkert.org} 5606145Snate@binkert.org 5617039Snate@binkert.orgRequestStatus 5628615Snilay@cs.wisc.eduSequencer::makeRequest(PacketPtr pkt) 5637039Snate@binkert.org{ 5648615Snilay@cs.wisc.edu if (m_outstanding_count >= m_max_outstanding_requests) { 5658615Snilay@cs.wisc.edu return RequestStatus_BufferFull; 5668615Snilay@cs.wisc.edu } 5678615Snilay@cs.wisc.edu 5688615Snilay@cs.wisc.edu RubyRequestType primary_type = RubyRequestType_NULL; 5698615Snilay@cs.wisc.edu RubyRequestType secondary_type = RubyRequestType_NULL; 5708615Snilay@cs.wisc.edu 5718615Snilay@cs.wisc.edu if (pkt->isLLSC()) { 5728615Snilay@cs.wisc.edu // 5738615Snilay@cs.wisc.edu // Alpha LL/SC instructions need to be handled carefully by the cache 5748615Snilay@cs.wisc.edu // coherence protocol to ensure they follow the proper semantics. In 5758615Snilay@cs.wisc.edu // particular, by identifying the operations as atomic, the protocol 5768615Snilay@cs.wisc.edu // should understand that migratory sharing optimizations should not 5778615Snilay@cs.wisc.edu // be performed (i.e. a load between the LL and SC should not steal 5788615Snilay@cs.wisc.edu // away exclusive permission). 5798615Snilay@cs.wisc.edu // 5808615Snilay@cs.wisc.edu if (pkt->isWrite()) { 5818615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing SC\n"); 5828615Snilay@cs.wisc.edu primary_type = RubyRequestType_Store_Conditional; 5838615Snilay@cs.wisc.edu } else { 5848615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing LL\n"); 5858615Snilay@cs.wisc.edu assert(pkt->isRead()); 5868615Snilay@cs.wisc.edu primary_type = RubyRequestType_Load_Linked; 5878615Snilay@cs.wisc.edu } 5888615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ATOMIC; 5898615Snilay@cs.wisc.edu } else if (pkt->req->isLocked()) { 5908615Snilay@cs.wisc.edu // 5918615Snilay@cs.wisc.edu // x86 locked instructions are translated to store cache coherence 5928615Snilay@cs.wisc.edu // requests because these requests should always be treated as read 5938615Snilay@cs.wisc.edu // exclusive operations and should leverage any migratory sharing 5948615Snilay@cs.wisc.edu // optimization built into the protocol. 5958615Snilay@cs.wisc.edu // 5968615Snilay@cs.wisc.edu if (pkt->isWrite()) { 5978615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 5988615Snilay@cs.wisc.edu primary_type = RubyRequestType_Locked_RMW_Write; 5998615Snilay@cs.wisc.edu } else { 6008615Snilay@cs.wisc.edu DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 6018615Snilay@cs.wisc.edu assert(pkt->isRead()); 6028615Snilay@cs.wisc.edu primary_type = RubyRequestType_Locked_RMW_Read; 6038615Snilay@cs.wisc.edu } 6048615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ST; 6058615Snilay@cs.wisc.edu } else { 6068615Snilay@cs.wisc.edu if (pkt->isRead()) { 6078615Snilay@cs.wisc.edu if (pkt->req->isInstFetch()) { 6088615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_IFETCH; 6098615Snilay@cs.wisc.edu } else { 6108615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA 6118615Snilay@cs.wisc.edu uint32_t flags = pkt->req->getFlags(); 6128615Snilay@cs.wisc.edu bool storeCheck = flags & 6138615Snilay@cs.wisc.edu (TheISA::StoreCheck << TheISA::FlagShift); 6148615Snilay@cs.wisc.edu#else 6158615Snilay@cs.wisc.edu bool storeCheck = false; 6168615Snilay@cs.wisc.edu#endif // X86_ISA 6178615Snilay@cs.wisc.edu if (storeCheck) { 6188615Snilay@cs.wisc.edu primary_type = RubyRequestType_RMW_Read; 6198615Snilay@cs.wisc.edu secondary_type = RubyRequestType_ST; 6208615Snilay@cs.wisc.edu } else { 6218615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_LD; 6228615Snilay@cs.wisc.edu } 6238615Snilay@cs.wisc.edu } 6248615Snilay@cs.wisc.edu } else if (pkt->isWrite()) { 6258615Snilay@cs.wisc.edu // 6268615Snilay@cs.wisc.edu // Note: M5 packets do not differentiate ST from RMW_Write 6278615Snilay@cs.wisc.edu // 6288615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_ST; 6298615Snilay@cs.wisc.edu } else if (pkt->isFlush()) { 6308615Snilay@cs.wisc.edu primary_type = secondary_type = RubyRequestType_FLUSH; 6318615Snilay@cs.wisc.edu } else { 6328615Snilay@cs.wisc.edu panic("Unsupported ruby packet type\n"); 6338615Snilay@cs.wisc.edu } 6348615Snilay@cs.wisc.edu } 6358615Snilay@cs.wisc.edu 6368615Snilay@cs.wisc.edu RequestStatus status = insertRequest(pkt, primary_type); 6377039Snate@binkert.org if (status != RequestStatus_Ready) 6387039Snate@binkert.org return status; 6396349Spdudnik@gmail.com 6408615Snilay@cs.wisc.edu issueRequest(pkt, secondary_type); 6416145Snate@binkert.org 6427039Snate@binkert.org // TODO: issue hardware prefetches here 6437039Snate@binkert.org return RequestStatus_Issued; 6446145Snate@binkert.org} 6456145Snate@binkert.org 6467039Snate@binkert.orgvoid 6478615Snilay@cs.wisc.eduSequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 6487039Snate@binkert.org{ 6499216Sandreas.hansson@arm.com assert(pkt != NULL); 6508615Snilay@cs.wisc.edu int proc_id = -1; 6519216Sandreas.hansson@arm.com if (pkt->req->hasContextId()) { 6528615Snilay@cs.wisc.edu proc_id = pkt->req->contextId(); 6537039Snate@binkert.org } 6546285Snate@binkert.org 6558615Snilay@cs.wisc.edu // If valid, copy the pc to the ruby request 6568615Snilay@cs.wisc.edu Addr pc = 0; 6578615Snilay@cs.wisc.edu if (pkt->req->hasPC()) { 6588615Snilay@cs.wisc.edu pc = pkt->req->getPC(); 6597039Snate@binkert.org } 6606285Snate@binkert.org 6619508Snilay@cs.wisc.edu RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(), 6628615Snilay@cs.wisc.edu pkt->getPtr<uint8_t>(true), 6638615Snilay@cs.wisc.edu pkt->getSize(), pc, secondary_type, 6648615Snilay@cs.wisc.edu RubyAccessMode_Supervisor, pkt, 6658188SLisa.Hsu@amd.com PrefetchBit_No, proc_id); 6666285Snate@binkert.org 6678266Sksewell@umich.edu DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", 6688266Sksewell@umich.edu curTick(), m_version, "Seq", "Begin", "", "", 6698615Snilay@cs.wisc.edu msg->getPhysicalAddress(), 6708615Snilay@cs.wisc.edu RubyRequestType_to_string(secondary_type)); 6716285Snate@binkert.org 6729499Snilay@cs.wisc.edu Cycles latency(0); // initialzed to an null value 6736285Snate@binkert.org 6748615Snilay@cs.wisc.edu if (secondary_type == RubyRequestType_IFETCH) 6757039Snate@binkert.org latency = m_instCache_ptr->getLatency(); 6767039Snate@binkert.org else 6777039Snate@binkert.org latency = m_dataCache_ptr->getLatency(); 6786285Snate@binkert.org 6797039Snate@binkert.org // Send the message to the cache controller 6807039Snate@binkert.org assert(latency > 0); 6816145Snate@binkert.org 6827039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 6837039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg, latency); 6846145Snate@binkert.org} 6856145Snate@binkert.org 6867455Snate@binkert.orgtemplate <class KEY, class VALUE> 6877455Snate@binkert.orgstd::ostream & 6887455Snate@binkert.orgoperator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 6897455Snate@binkert.org{ 6907455Snate@binkert.org typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 6917455Snate@binkert.org typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 6927455Snate@binkert.org 6937455Snate@binkert.org out << "["; 6947455Snate@binkert.org for (; i != end; ++i) 6957455Snate@binkert.org out << " " << i->first << "=" << i->second; 6967455Snate@binkert.org out << " ]"; 6977455Snate@binkert.org 6987455Snate@binkert.org return out; 6997455Snate@binkert.org} 7007455Snate@binkert.org 7017039Snate@binkert.orgvoid 7027039Snate@binkert.orgSequencer::print(ostream& out) const 7037039Snate@binkert.org{ 7047039Snate@binkert.org out << "[Sequencer: " << m_version 7057039Snate@binkert.org << ", outstanding requests: " << m_outstanding_count 7067039Snate@binkert.org << ", read request table: " << m_readRequestTable 7077039Snate@binkert.org << ", write request table: " << m_writeRequestTable 7087039Snate@binkert.org << "]"; 7097039Snate@binkert.org} 7107039Snate@binkert.org 7117039Snate@binkert.org// this can be called from setState whenever coherence permissions are 7127039Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the 7137039Snate@binkert.org// given block 7147039Snate@binkert.orgvoid 7157039Snate@binkert.orgSequencer::checkCoherence(const Address& addr) 7167039Snate@binkert.org{ 7176145Snate@binkert.org#ifdef CHECK_COHERENCE 7187039Snate@binkert.org g_system_ptr->checkGlobalCoherenceInvariant(addr); 7196145Snate@binkert.org#endif 7206145Snate@binkert.org} 7218717Snilay@cs.wisc.edu 7228717Snilay@cs.wisc.eduvoid 7239104Shestness@cs.utexas.eduSequencer::recordRequestType(SequencerRequestType requestType) { 7249104Shestness@cs.utexas.edu DPRINTF(RubyStats, "Recorded statistic: %s\n", 7259104Shestness@cs.utexas.edu SequencerRequestType_to_string(requestType)); 7269104Shestness@cs.utexas.edu} 7279104Shestness@cs.utexas.edu 7289104Shestness@cs.utexas.edu 7299104Shestness@cs.utexas.eduvoid 7308717Snilay@cs.wisc.eduSequencer::evictionCallback(const Address& address) 7318717Snilay@cs.wisc.edu{ 7328717Snilay@cs.wisc.edu ruby_eviction_callback(address); 7338717Snilay@cs.wisc.edu} 734