Sequencer.cc revision 7055
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 36145Snate@binkert.org * All rights reserved. 46145Snate@binkert.org * 56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 66145Snate@binkert.org * modification, are permitted provided that the following conditions are 76145Snate@binkert.org * met: redistributions of source code must retain the above copyright 86145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 116145Snate@binkert.org * documentation and/or other materials provided with the distribution; 126145Snate@binkert.org * neither the name of the copyright holders nor the names of its 136145Snate@binkert.org * contributors may be used to endorse or promote products derived from 146145Snate@binkert.org * this software without specific prior written permission. 156145Snate@binkert.org * 166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276145Snate@binkert.org */ 286145Snate@binkert.org 297039Snate@binkert.org#include "cpu/rubytest/RubyTester.hh" 307039Snate@binkert.org#include "mem/gems_common/Map.hh" 317039Snate@binkert.org#include "mem/protocol/CacheMsg.hh" 327039Snate@binkert.org#include "mem/protocol/Protocol.hh" 337039Snate@binkert.org#include "mem/protocol/Protocol.hh" 347039Snate@binkert.org#include "mem/ruby/buffers/MessageBuffer.hh" 357039Snate@binkert.org#include "mem/ruby/common/Global.hh" 367039Snate@binkert.org#include "mem/ruby/common/SubBlock.hh" 376845Sdrh5@cs.wisc.edu#include "mem/ruby/libruby.hh" 387039Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh" 397039Snate@binkert.org#include "mem/ruby/recorder/Tracer.hh" 407039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh" 417039Snate@binkert.org#include "mem/ruby/system/CacheMemory.hh" 426154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh" 436154Snate@binkert.org#include "mem/ruby/system/System.hh" 446876Ssteve.reinhardt@amd.com#include "params/RubySequencer.hh" 456876Ssteve.reinhardt@amd.com 467055Snate@binkert.orgusing namespace std; 477055Snate@binkert.org 486876Ssteve.reinhardt@amd.comSequencer * 496876Ssteve.reinhardt@amd.comRubySequencerParams::create() 506285Snate@binkert.org{ 516876Ssteve.reinhardt@amd.com return new Sequencer(this); 526285Snate@binkert.org} 537039Snate@binkert.org 546876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p) 556886SBrad.Beckmann@amd.com : RubyPort(p), deadlockCheckEvent(this) 566876Ssteve.reinhardt@amd.com{ 576876Ssteve.reinhardt@amd.com m_store_waiting_on_load_cycles = 0; 586876Ssteve.reinhardt@amd.com m_store_waiting_on_store_cycles = 0; 596876Ssteve.reinhardt@amd.com m_load_waiting_on_store_cycles = 0; 606876Ssteve.reinhardt@amd.com m_load_waiting_on_load_cycles = 0; 617039Snate@binkert.org 626876Ssteve.reinhardt@amd.com m_outstanding_count = 0; 636285Snate@binkert.org 646876Ssteve.reinhardt@amd.com m_max_outstanding_requests = 0; 656876Ssteve.reinhardt@amd.com m_deadlock_threshold = 0; 666876Ssteve.reinhardt@amd.com m_instCache_ptr = NULL; 676876Ssteve.reinhardt@amd.com m_dataCache_ptr = NULL; 686145Snate@binkert.org 696876Ssteve.reinhardt@amd.com m_instCache_ptr = p->icache; 706876Ssteve.reinhardt@amd.com m_dataCache_ptr = p->dcache; 716876Ssteve.reinhardt@amd.com m_max_outstanding_requests = p->max_outstanding_requests; 726876Ssteve.reinhardt@amd.com m_deadlock_threshold = p->deadlock_threshold; 736899SBrad.Beckmann@amd.com m_usingRubyTester = p->using_ruby_tester; 746899SBrad.Beckmann@amd.com 756876Ssteve.reinhardt@amd.com assert(m_max_outstanding_requests > 0); 766876Ssteve.reinhardt@amd.com assert(m_deadlock_threshold > 0); 776876Ssteve.reinhardt@amd.com assert(m_instCache_ptr != NULL); 786876Ssteve.reinhardt@amd.com assert(m_dataCache_ptr != NULL); 796145Snate@binkert.org} 806145Snate@binkert.org 817039Snate@binkert.orgSequencer::~Sequencer() 827039Snate@binkert.org{ 836145Snate@binkert.org} 846145Snate@binkert.org 857039Snate@binkert.orgvoid 867039Snate@binkert.orgSequencer::wakeup() 877039Snate@binkert.org{ 887039Snate@binkert.org // Check for deadlock of any of the requests 897039Snate@binkert.org Time current_time = g_eventQueue_ptr->getTime(); 906145Snate@binkert.org 917039Snate@binkert.org // Check across all outstanding requests 927039Snate@binkert.org int total_outstanding = 0; 936285Snate@binkert.org 947039Snate@binkert.org Vector<Address> keys = m_readRequestTable.keys(); 957039Snate@binkert.org for (int i = 0; i < keys.size(); i++) { 967039Snate@binkert.org SequencerRequest* request = m_readRequestTable.lookup(keys[i]); 977039Snate@binkert.org if (current_time - request->issue_time >= m_deadlock_threshold) { 987039Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 997039Snate@binkert.org WARN_EXPR(request); 1007039Snate@binkert.org WARN_EXPR(m_version); 1017039Snate@binkert.org WARN_EXPR(request->ruby_request.paddr); 1027039Snate@binkert.org WARN_EXPR(keys.size()); 1037039Snate@binkert.org WARN_EXPR(current_time); 1047039Snate@binkert.org WARN_EXPR(request->issue_time); 1057039Snate@binkert.org WARN_EXPR(current_time - request->issue_time); 1067039Snate@binkert.org ERROR_MSG("Aborting"); 1077039Snate@binkert.org } 1086145Snate@binkert.org } 1096145Snate@binkert.org 1107039Snate@binkert.org keys = m_writeRequestTable.keys(); 1117039Snate@binkert.org for (int i = 0; i < keys.size(); i++) { 1127039Snate@binkert.org SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); 1137039Snate@binkert.org if (current_time - request->issue_time >= m_deadlock_threshold) { 1147039Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 1157039Snate@binkert.org WARN_EXPR(request); 1167039Snate@binkert.org WARN_EXPR(m_version); 1177039Snate@binkert.org WARN_EXPR(current_time); 1187039Snate@binkert.org WARN_EXPR(request->issue_time); 1197039Snate@binkert.org WARN_EXPR(current_time - request->issue_time); 1207039Snate@binkert.org WARN_EXPR(keys.size()); 1217039Snate@binkert.org ERROR_MSG("Aborting"); 1227039Snate@binkert.org } 1236145Snate@binkert.org } 1246285Snate@binkert.org 1257039Snate@binkert.org total_outstanding += m_writeRequestTable.size(); 1267039Snate@binkert.org total_outstanding += m_readRequestTable.size(); 1276145Snate@binkert.org 1287039Snate@binkert.org assert(m_outstanding_count == total_outstanding); 1297039Snate@binkert.org 1307039Snate@binkert.org if (m_outstanding_count > 0) { 1317039Snate@binkert.org // If there are still outstanding requests, keep checking 1327039Snate@binkert.org schedule(deadlockCheckEvent, 1337039Snate@binkert.org m_deadlock_threshold * g_eventQueue_ptr->getClock() + 1347039Snate@binkert.org curTick); 1357039Snate@binkert.org } 1366145Snate@binkert.org} 1376145Snate@binkert.org 1387039Snate@binkert.orgvoid 1397039Snate@binkert.orgSequencer::printStats(ostream & out) const 1407039Snate@binkert.org{ 1417039Snate@binkert.org out << "Sequencer: " << m_name << endl 1427039Snate@binkert.org << " store_waiting_on_load_cycles: " 1437039Snate@binkert.org << m_store_waiting_on_load_cycles << endl 1447039Snate@binkert.org << " store_waiting_on_store_cycles: " 1457039Snate@binkert.org << m_store_waiting_on_store_cycles << endl 1467039Snate@binkert.org << " load_waiting_on_load_cycles: " 1477039Snate@binkert.org << m_load_waiting_on_load_cycles << endl 1487039Snate@binkert.org << " load_waiting_on_store_cycles: " 1497039Snate@binkert.org << m_load_waiting_on_store_cycles << endl; 1506859Sdrh5@cs.wisc.edu} 1516859Sdrh5@cs.wisc.edu 1527039Snate@binkert.orgvoid 1537039Snate@binkert.orgSequencer::printProgress(ostream& out) const 1547039Snate@binkert.org{ 1557039Snate@binkert.org#if 0 1567039Snate@binkert.org int total_demand = 0; 1577039Snate@binkert.org out << "Sequencer Stats Version " << m_version << endl; 1587039Snate@binkert.org out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 1597039Snate@binkert.org out << "---------------" << endl; 1607039Snate@binkert.org out << "outstanding requests" << endl; 1616145Snate@binkert.org 1627039Snate@binkert.org Vector<Address> rkeys = m_readRequestTable.keys(); 1637039Snate@binkert.org int read_size = rkeys.size(); 1647039Snate@binkert.org out << "proc " << m_version << " Read Requests = " << read_size << endl; 1656145Snate@binkert.org 1667039Snate@binkert.org // print the request table 1677039Snate@binkert.org for (int i = 0; i < read_size; ++i) { 1687039Snate@binkert.org SequencerRequest *request = m_readRequestTable.lookup(rkeys[i]); 1697039Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request->type 1707039Snate@binkert.org << " Address " << rkeys[i] 1717039Snate@binkert.org << " Posted " << request->issue_time 1727039Snate@binkert.org << " PF " << PrefetchBit_No << endl; 1736145Snate@binkert.org total_demand++; 1747039Snate@binkert.org } 1756145Snate@binkert.org 1767039Snate@binkert.org Vector<Address> wkeys = m_writeRequestTable.keys(); 1777039Snate@binkert.org int write_size = wkeys.size(); 1787039Snate@binkert.org out << "proc " << m_version << " Write Requests = " << write_size << endl; 1796285Snate@binkert.org 1807039Snate@binkert.org // print the request table 1817039Snate@binkert.org for (int i = 0; i < write_size; ++i){ 1827039Snate@binkert.org CacheMsg &request = m_writeRequestTable.lookup(wkeys[i]); 1837039Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request.getType() 1847039Snate@binkert.org << " Address " << wkeys[i] 1857039Snate@binkert.org << " Posted " << request.getTime() 1867039Snate@binkert.org << " PF " << request.getPrefetch() << endl; 1877039Snate@binkert.org if (request.getPrefetch() == PrefetchBit_No) { 1887039Snate@binkert.org total_demand++; 1897039Snate@binkert.org } 1907039Snate@binkert.org } 1917039Snate@binkert.org 1927039Snate@binkert.org out << endl; 1937039Snate@binkert.org 1947039Snate@binkert.org out << "Total Number Outstanding: " << m_outstanding_count << endl 1957039Snate@binkert.org << "Total Number Demand : " << total_demand << endl 1967039Snate@binkert.org << "Total Number Prefetches : " << m_outstanding_count - total_demand 1977039Snate@binkert.org << endl << endl << endl; 1987039Snate@binkert.org#endif 1996145Snate@binkert.org} 2006145Snate@binkert.org 2017039Snate@binkert.orgvoid 2027039Snate@binkert.orgSequencer::printConfig(ostream& out) const 2037039Snate@binkert.org{ 2047039Snate@binkert.org out << "Seqeuncer config: " << m_name << endl 2057039Snate@binkert.org << " controller: " << m_controller->getName() << endl 2067039Snate@binkert.org << " version: " << m_version << endl 2077039Snate@binkert.org << " max_outstanding_requests: " << m_max_outstanding_requests << endl 2087039Snate@binkert.org << " deadlock_threshold: " << m_deadlock_threshold << endl; 2096145Snate@binkert.org} 2106145Snate@binkert.org 2116145Snate@binkert.org// Insert the request on the correct request table. Return true if 2126145Snate@binkert.org// the entry was already present. 2137039Snate@binkert.orgbool 2147039Snate@binkert.orgSequencer::insertRequest(SequencerRequest* request) 2157039Snate@binkert.org{ 2167039Snate@binkert.org int total_outstanding = 2177039Snate@binkert.org m_writeRequestTable.size() + m_readRequestTable.size(); 2186285Snate@binkert.org 2197039Snate@binkert.org assert(m_outstanding_count == total_outstanding); 2206145Snate@binkert.org 2217039Snate@binkert.org // See if we should schedule a deadlock check 2227039Snate@binkert.org if (deadlockCheckEvent.scheduled() == false) { 2237039Snate@binkert.org schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); 2247039Snate@binkert.org } 2256145Snate@binkert.org 2267039Snate@binkert.org Address line_addr(request->ruby_request.paddr); 2277039Snate@binkert.org line_addr.makeLineAddress(); 2287039Snate@binkert.org if ((request->ruby_request.type == RubyRequestType_ST) || 2297039Snate@binkert.org (request->ruby_request.type == RubyRequestType_RMW_Read) || 2307039Snate@binkert.org (request->ruby_request.type == RubyRequestType_RMW_Write) || 2317039Snate@binkert.org (request->ruby_request.type == RubyRequestType_Locked_Read) || 2327039Snate@binkert.org (request->ruby_request.type == RubyRequestType_Locked_Write)) { 2337039Snate@binkert.org if (m_writeRequestTable.exist(line_addr)) { 2347039Snate@binkert.org m_writeRequestTable.lookup(line_addr) = request; 2357039Snate@binkert.org // return true; 2367039Snate@binkert.org 2377039Snate@binkert.org // drh5: isn't this an error? do you lose the initial request? 2387039Snate@binkert.org assert(0); 2397039Snate@binkert.org } 2407039Snate@binkert.org m_writeRequestTable.allocate(line_addr); 2417039Snate@binkert.org m_writeRequestTable.lookup(line_addr) = request; 2427039Snate@binkert.org m_outstanding_count++; 2437039Snate@binkert.org } else { 2447039Snate@binkert.org if (m_readRequestTable.exist(line_addr)) { 2457039Snate@binkert.org m_readRequestTable.lookup(line_addr) = request; 2467039Snate@binkert.org // return true; 2477039Snate@binkert.org 2487039Snate@binkert.org // drh5: isn't this an error? do you lose the initial request? 2497039Snate@binkert.org assert(0); 2507039Snate@binkert.org } 2517039Snate@binkert.org m_readRequestTable.allocate(line_addr); 2527039Snate@binkert.org m_readRequestTable.lookup(line_addr) = request; 2537039Snate@binkert.org m_outstanding_count++; 2546145Snate@binkert.org } 2556145Snate@binkert.org 2567039Snate@binkert.org g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 2576145Snate@binkert.org 2587039Snate@binkert.org total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 2597039Snate@binkert.org assert(m_outstanding_count == total_outstanding); 2606145Snate@binkert.org 2617039Snate@binkert.org return false; 2626145Snate@binkert.org} 2636145Snate@binkert.org 2647039Snate@binkert.orgvoid 2657039Snate@binkert.orgSequencer::removeRequest(SequencerRequest* srequest) 2667039Snate@binkert.org{ 2677039Snate@binkert.org assert(m_outstanding_count == 2687039Snate@binkert.org m_writeRequestTable.size() + m_readRequestTable.size()); 2696145Snate@binkert.org 2707039Snate@binkert.org const RubyRequest & ruby_request = srequest->ruby_request; 2717039Snate@binkert.org Address line_addr(ruby_request.paddr); 2727039Snate@binkert.org line_addr.makeLineAddress(); 2737039Snate@binkert.org if ((ruby_request.type == RubyRequestType_ST) || 2747039Snate@binkert.org (ruby_request.type == RubyRequestType_RMW_Read) || 2757039Snate@binkert.org (ruby_request.type == RubyRequestType_RMW_Write) || 2767039Snate@binkert.org (ruby_request.type == RubyRequestType_Locked_Read) || 2777039Snate@binkert.org (ruby_request.type == RubyRequestType_Locked_Write)) { 2787039Snate@binkert.org m_writeRequestTable.deallocate(line_addr); 2797039Snate@binkert.org } else { 2807039Snate@binkert.org m_readRequestTable.deallocate(line_addr); 2817039Snate@binkert.org } 2827039Snate@binkert.org m_outstanding_count--; 2836285Snate@binkert.org 2847039Snate@binkert.org assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 2856145Snate@binkert.org} 2866145Snate@binkert.org 2877039Snate@binkert.orgvoid 2887039Snate@binkert.orgSequencer::writeCallback(const Address& address, DataBlock& data) 2897039Snate@binkert.org{ 2907039Snate@binkert.org assert(address == line_address(address)); 2917039Snate@binkert.org assert(m_writeRequestTable.exist(line_address(address))); 2926145Snate@binkert.org 2937039Snate@binkert.org SequencerRequest* request = m_writeRequestTable.lookup(address); 2946145Snate@binkert.org 2957039Snate@binkert.org removeRequest(request); 2966846Spdudnik@cs.wisc.edu 2977039Snate@binkert.org assert((request->ruby_request.type == RubyRequestType_ST) || 2987039Snate@binkert.org (request->ruby_request.type == RubyRequestType_RMW_Read) || 2997039Snate@binkert.org (request->ruby_request.type == RubyRequestType_RMW_Write) || 3007039Snate@binkert.org (request->ruby_request.type == RubyRequestType_Locked_Read) || 3017039Snate@binkert.org (request->ruby_request.type == RubyRequestType_Locked_Write)); 3026145Snate@binkert.org 3037039Snate@binkert.org if (request->ruby_request.type == RubyRequestType_Locked_Read) { 3047039Snate@binkert.org m_dataCache_ptr->setLocked(address, m_version); 3057039Snate@binkert.org } else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 3067039Snate@binkert.org m_controller->blockOnQueue(address, m_mandatory_q_ptr); 3077039Snate@binkert.org } else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 3087039Snate@binkert.org m_controller->unblock(address); 3097039Snate@binkert.org } 3106863Sdrh5@cs.wisc.edu 3117039Snate@binkert.org hitCallback(request, data); 3126145Snate@binkert.org} 3136145Snate@binkert.org 3147039Snate@binkert.orgvoid 3157039Snate@binkert.orgSequencer::readCallback(const Address& address, DataBlock& data) 3167039Snate@binkert.org{ 3177039Snate@binkert.org assert(address == line_address(address)); 3187039Snate@binkert.org assert(m_readRequestTable.exist(line_address(address))); 3196145Snate@binkert.org 3207039Snate@binkert.org SequencerRequest* request = m_readRequestTable.lookup(address); 3217039Snate@binkert.org removeRequest(request); 3226145Snate@binkert.org 3237039Snate@binkert.org assert((request->ruby_request.type == RubyRequestType_LD) || 3247039Snate@binkert.org (request->ruby_request.type == RubyRequestType_RMW_Read) || 3257039Snate@binkert.org (request->ruby_request.type == RubyRequestType_IFETCH)); 3266285Snate@binkert.org 3277039Snate@binkert.org hitCallback(request, data); 3286145Snate@binkert.org} 3296145Snate@binkert.org 3307039Snate@binkert.orgvoid 3317039Snate@binkert.orgSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) 3327039Snate@binkert.org{ 3337039Snate@binkert.org const RubyRequest & ruby_request = srequest->ruby_request; 3347039Snate@binkert.org Address request_address(ruby_request.paddr); 3357039Snate@binkert.org Address request_line_address(ruby_request.paddr); 3367039Snate@binkert.org request_line_address.makeLineAddress(); 3377039Snate@binkert.org RubyRequestType type = ruby_request.type; 3387039Snate@binkert.org Time issued_time = srequest->issue_time; 3396145Snate@binkert.org 3407039Snate@binkert.org // Set this cache entry to the most recently used 3417039Snate@binkert.org if (type == RubyRequestType_IFETCH) { 3427039Snate@binkert.org if (m_instCache_ptr->isTagPresent(request_line_address)) 3437039Snate@binkert.org m_instCache_ptr->setMRU(request_line_address); 3447039Snate@binkert.org } else { 3457039Snate@binkert.org if (m_dataCache_ptr->isTagPresent(request_line_address)) 3467039Snate@binkert.org m_dataCache_ptr->setMRU(request_line_address); 3477039Snate@binkert.org } 3486145Snate@binkert.org 3497039Snate@binkert.org assert(g_eventQueue_ptr->getTime() >= issued_time); 3507039Snate@binkert.org Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 3516145Snate@binkert.org 3527039Snate@binkert.org // Profile the miss latency for all non-zero demand misses 3537039Snate@binkert.org if (miss_latency != 0) { 3547039Snate@binkert.org g_system_ptr->getProfiler()->missLatency(miss_latency, type); 3556285Snate@binkert.org 3567039Snate@binkert.org if (Debug::getProtocolTrace()) { 3577039Snate@binkert.org g_system_ptr->getProfiler()-> 3587039Snate@binkert.org profileTransition("Seq", m_version, 3597039Snate@binkert.org Address(ruby_request.paddr), "", "Done", "", 3607039Snate@binkert.org csprintf("%d cycles", miss_latency)); 3617039Snate@binkert.org } 3626285Snate@binkert.org } 3637039Snate@binkert.org#if 0 3647039Snate@binkert.org if (request.getPrefetch() == PrefetchBit_Yes) { 3657039Snate@binkert.org return; // Ignore the prefetch 3667039Snate@binkert.org } 3677039Snate@binkert.org#endif 3686285Snate@binkert.org 3697039Snate@binkert.org // update the data 3707039Snate@binkert.org if (ruby_request.data != NULL) { 3717039Snate@binkert.org if ((type == RubyRequestType_LD) || 3727039Snate@binkert.org (type == RubyRequestType_IFETCH) || 3737039Snate@binkert.org (type == RubyRequestType_RMW_Read) || 3747039Snate@binkert.org (type == RubyRequestType_Locked_Read)) { 3757023SBrad.Beckmann@amd.com 3767039Snate@binkert.org memcpy(ruby_request.data, 3777039Snate@binkert.org data.getData(request_address.getOffset(), ruby_request.len), 3787039Snate@binkert.org ruby_request.len); 3797039Snate@binkert.org } else { 3807039Snate@binkert.org data.setData(ruby_request.data, request_address.getOffset(), 3817039Snate@binkert.org ruby_request.len); 3827039Snate@binkert.org } 3836285Snate@binkert.org } else { 3847039Snate@binkert.org DPRINTF(MemoryAccess, 3857039Snate@binkert.org "WARNING. Data not transfered from Ruby to M5 for type %s\n", 3867039Snate@binkert.org RubyRequestType_to_string(type)); 3877039Snate@binkert.org } 3887023SBrad.Beckmann@amd.com 3897039Snate@binkert.org // If using the RubyTester, update the RubyTester sender state's 3907039Snate@binkert.org // subBlock with the recieved data. The tester will later access 3917039Snate@binkert.org // this state. 3927039Snate@binkert.org // Note: RubyPort will access it's sender state before the 3937039Snate@binkert.org // RubyTester. 3947039Snate@binkert.org if (m_usingRubyTester) { 3957039Snate@binkert.org RubyPort::SenderState *requestSenderState = 3967039Snate@binkert.org safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 3977039Snate@binkert.org RubyTester::SenderState* testerSenderState = 3987039Snate@binkert.org safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 3997039Snate@binkert.org testerSenderState->subBlock->mergeFrom(data); 4007039Snate@binkert.org } 4017023SBrad.Beckmann@amd.com 4027039Snate@binkert.org ruby_hit_callback(ruby_request.pkt); 4037039Snate@binkert.org delete srequest; 4046285Snate@binkert.org} 4056285Snate@binkert.org 4066285Snate@binkert.org// Returns true if the sequencer already has a load or store outstanding 4077039Snate@binkert.orgRequestStatus 4087039Snate@binkert.orgSequencer::getRequestStatus(const RubyRequest& request) 4097039Snate@binkert.org{ 4107039Snate@binkert.org bool is_outstanding_store = 4117039Snate@binkert.org m_writeRequestTable.exist(line_address(Address(request.paddr))); 4127039Snate@binkert.org bool is_outstanding_load = 4137039Snate@binkert.org m_readRequestTable.exist(line_address(Address(request.paddr))); 4147039Snate@binkert.org if (is_outstanding_store) { 4157039Snate@binkert.org if ((request.type == RubyRequestType_LD) || 4167039Snate@binkert.org (request.type == RubyRequestType_IFETCH) || 4177039Snate@binkert.org (request.type == RubyRequestType_RMW_Read)) { 4187039Snate@binkert.org m_store_waiting_on_load_cycles++; 4197039Snate@binkert.org } else { 4207039Snate@binkert.org m_store_waiting_on_store_cycles++; 4217039Snate@binkert.org } 4227039Snate@binkert.org return RequestStatus_Aliased; 4237039Snate@binkert.org } else if (is_outstanding_load) { 4247039Snate@binkert.org if ((request.type == RubyRequestType_ST) || 4257039Snate@binkert.org (request.type == RubyRequestType_RMW_Write)) { 4267039Snate@binkert.org m_load_waiting_on_store_cycles++; 4277039Snate@binkert.org } else { 4287039Snate@binkert.org m_load_waiting_on_load_cycles++; 4297039Snate@binkert.org } 4307039Snate@binkert.org return RequestStatus_Aliased; 4316859Sdrh5@cs.wisc.edu } 4327039Snate@binkert.org 4337039Snate@binkert.org if (m_outstanding_count >= m_max_outstanding_requests) { 4347039Snate@binkert.org return RequestStatus_BufferFull; 4356859Sdrh5@cs.wisc.edu } 4366145Snate@binkert.org 4377039Snate@binkert.org return RequestStatus_Ready; 4386145Snate@binkert.org} 4396145Snate@binkert.org 4407039Snate@binkert.orgbool 4417039Snate@binkert.orgSequencer::empty() const 4427039Snate@binkert.org{ 4437039Snate@binkert.org return m_writeRequestTable.size() == 0 && m_readRequestTable.size() == 0; 4446145Snate@binkert.org} 4456145Snate@binkert.org 4467039Snate@binkert.orgRequestStatus 4477039Snate@binkert.orgSequencer::makeRequest(const RubyRequest &request) 4487039Snate@binkert.org{ 4497039Snate@binkert.org assert(Address(request.paddr).getOffset() + request.len <= 4507039Snate@binkert.org RubySystem::getBlockSizeBytes()); 4517039Snate@binkert.org RequestStatus status = getRequestStatus(request); 4527039Snate@binkert.org if (status != RequestStatus_Ready) 4537039Snate@binkert.org return status; 4546349Spdudnik@gmail.com 4557039Snate@binkert.org SequencerRequest *srequest = 4567039Snate@binkert.org new SequencerRequest(request, g_eventQueue_ptr->getTime()); 4576285Snate@binkert.org bool found = insertRequest(srequest); 4587039Snate@binkert.org if (found) { 4597039Snate@binkert.org panic("Sequencer::makeRequest should never be called if the " 4607039Snate@binkert.org "request is already outstanding\n"); 4617039Snate@binkert.org return RequestStatus_NULL; 4627039Snate@binkert.org } 4637023SBrad.Beckmann@amd.com 4647039Snate@binkert.org if (request.type == RubyRequestType_Locked_Write) { 4657039Snate@binkert.org // NOTE: it is OK to check the locked flag here as the 4667039Snate@binkert.org // mandatory queue will be checked first ensuring that nothing 4677039Snate@binkert.org // comes between checking the flag and servicing the store. 4687023SBrad.Beckmann@amd.com 4697039Snate@binkert.org Address line_addr = line_address(Address(request.paddr)); 4707039Snate@binkert.org if (!m_dataCache_ptr->isLocked(line_addr, m_version)) { 4717039Snate@binkert.org removeRequest(srequest); 4727039Snate@binkert.org if (Debug::getProtocolTrace()) { 4737039Snate@binkert.org g_system_ptr->getProfiler()-> 4747039Snate@binkert.org profileTransition("Seq", m_version, 4757039Snate@binkert.org Address(request.paddr), 4767039Snate@binkert.org "", "SC Fail", "", 4777039Snate@binkert.org RubyRequestType_to_string(request.type)); 4787023SBrad.Beckmann@amd.com } 4797023SBrad.Beckmann@amd.com return RequestStatus_LlscFailed; 4807039Snate@binkert.org } else { 4817039Snate@binkert.org m_dataCache_ptr->clearLocked(line_addr); 4826349Spdudnik@gmail.com } 4837039Snate@binkert.org } 4847039Snate@binkert.org issueRequest(request); 4856145Snate@binkert.org 4867039Snate@binkert.org // TODO: issue hardware prefetches here 4877039Snate@binkert.org return RequestStatus_Issued; 4886145Snate@binkert.org} 4896145Snate@binkert.org 4907039Snate@binkert.orgvoid 4917039Snate@binkert.orgSequencer::issueRequest(const RubyRequest& request) 4927039Snate@binkert.org{ 4937039Snate@binkert.org // TODO: get rid of CacheMsg, CacheRequestType, and 4947039Snate@binkert.org // AccessModeTYpe, & have SLICC use RubyRequest and subtypes 4957039Snate@binkert.org // natively 4967039Snate@binkert.org CacheRequestType ctype; 4977039Snate@binkert.org switch(request.type) { 4987039Snate@binkert.org case RubyRequestType_IFETCH: 4997039Snate@binkert.org ctype = CacheRequestType_IFETCH; 5007039Snate@binkert.org break; 5017039Snate@binkert.org case RubyRequestType_LD: 5027039Snate@binkert.org ctype = CacheRequestType_LD; 5037039Snate@binkert.org break; 5047039Snate@binkert.org case RubyRequestType_ST: 5057039Snate@binkert.org ctype = CacheRequestType_ST; 5067039Snate@binkert.org break; 5077039Snate@binkert.org case RubyRequestType_Locked_Read: 5087039Snate@binkert.org case RubyRequestType_Locked_Write: 5097039Snate@binkert.org ctype = CacheRequestType_ATOMIC; 5107039Snate@binkert.org break; 5117039Snate@binkert.org case RubyRequestType_RMW_Read: 5127039Snate@binkert.org ctype = CacheRequestType_ATOMIC; 5137039Snate@binkert.org break; 5147039Snate@binkert.org case RubyRequestType_RMW_Write: 5157039Snate@binkert.org ctype = CacheRequestType_ATOMIC; 5167039Snate@binkert.org break; 5177039Snate@binkert.org default: 5187039Snate@binkert.org assert(0); 5197039Snate@binkert.org } 5206285Snate@binkert.org 5217039Snate@binkert.org AccessModeType amtype; 5227039Snate@binkert.org switch(request.access_mode){ 5237039Snate@binkert.org case RubyAccessMode_User: 5247039Snate@binkert.org amtype = AccessModeType_UserMode; 5257039Snate@binkert.org break; 5267039Snate@binkert.org case RubyAccessMode_Supervisor: 5277039Snate@binkert.org amtype = AccessModeType_SupervisorMode; 5287039Snate@binkert.org break; 5297039Snate@binkert.org case RubyAccessMode_Device: 5307039Snate@binkert.org amtype = AccessModeType_UserMode; 5317039Snate@binkert.org break; 5327039Snate@binkert.org default: 5337039Snate@binkert.org assert(0); 5347039Snate@binkert.org } 5356285Snate@binkert.org 5367039Snate@binkert.org Address line_addr(request.paddr); 5377039Snate@binkert.org line_addr.makeLineAddress(); 5387039Snate@binkert.org CacheMsg msg(line_addr, Address(request.paddr), ctype, 5397039Snate@binkert.org Address(request.pc), amtype, request.len, PrefetchBit_No, 5407039Snate@binkert.org request.proc_id); 5416285Snate@binkert.org 5427039Snate@binkert.org if (Debug::getProtocolTrace()) { 5437039Snate@binkert.org g_system_ptr->getProfiler()-> 5447039Snate@binkert.org profileTransition("Seq", m_version, Address(request.paddr), 5457039Snate@binkert.org "", "Begin", "", 5467039Snate@binkert.org RubyRequestType_to_string(request.type)); 5477039Snate@binkert.org } 5486285Snate@binkert.org 5497039Snate@binkert.org if (g_system_ptr->getTracer()->traceEnabled()) { 5507039Snate@binkert.org g_system_ptr->getTracer()-> 5517039Snate@binkert.org traceRequest(this, line_addr, Address(request.pc), 5527039Snate@binkert.org request.type, g_eventQueue_ptr->getTime()); 5537039Snate@binkert.org } 5546285Snate@binkert.org 5557039Snate@binkert.org Time latency = 0; // initialzed to an null value 5566285Snate@binkert.org 5577039Snate@binkert.org if (request.type == RubyRequestType_IFETCH) 5587039Snate@binkert.org latency = m_instCache_ptr->getLatency(); 5597039Snate@binkert.org else 5607039Snate@binkert.org latency = m_dataCache_ptr->getLatency(); 5616285Snate@binkert.org 5627039Snate@binkert.org // Send the message to the cache controller 5637039Snate@binkert.org assert(latency > 0); 5646145Snate@binkert.org 5657039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 5667039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg, latency); 5676145Snate@binkert.org} 5686145Snate@binkert.org 5697039Snate@binkert.org#if 0 5707039Snate@binkert.orgbool 5717039Snate@binkert.orgSequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 5727039Snate@binkert.org AccessModeType access_mode, 5737039Snate@binkert.org int size, DataBlock*& data_ptr) 5747039Snate@binkert.org{ 5757039Snate@binkert.org CacheMemory *cache = 5767039Snate@binkert.org (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 5777039Snate@binkert.org 5787039Snate@binkert.org return cache->tryCacheAccess(line_address(addr), type, data_ptr); 5797039Snate@binkert.org} 5807039Snate@binkert.org#endif 5817039Snate@binkert.org 5827039Snate@binkert.orgvoid 5837039Snate@binkert.orgSequencer::print(ostream& out) const 5847039Snate@binkert.org{ 5857039Snate@binkert.org out << "[Sequencer: " << m_version 5867039Snate@binkert.org << ", outstanding requests: " << m_outstanding_count 5877039Snate@binkert.org << ", read request table: " << m_readRequestTable 5887039Snate@binkert.org << ", write request table: " << m_writeRequestTable 5897039Snate@binkert.org << "]"; 5907039Snate@binkert.org} 5917039Snate@binkert.org 5927039Snate@binkert.org// this can be called from setState whenever coherence permissions are 5937039Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the 5947039Snate@binkert.org// given block 5957039Snate@binkert.orgvoid 5967039Snate@binkert.orgSequencer::checkCoherence(const Address& addr) 5977039Snate@binkert.org{ 5986145Snate@binkert.org#ifdef CHECK_COHERENCE 5997039Snate@binkert.org g_system_ptr->checkGlobalCoherenceInvariant(addr); 6006145Snate@binkert.org#endif 6016145Snate@binkert.org} 602