Sequencer.cc revision 6859
16145Snate@binkert.org 26145Snate@binkert.org/* 36145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 46145Snate@binkert.org * All rights reserved. 56145Snate@binkert.org * 66145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 76145Snate@binkert.org * modification, are permitted provided that the following conditions are 86145Snate@binkert.org * met: redistributions of source code must retain the above copyright 96145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 106145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 116145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 126145Snate@binkert.org * documentation and/or other materials provided with the distribution; 136145Snate@binkert.org * neither the name of the copyright holders nor the names of its 146145Snate@binkert.org * contributors may be used to endorse or promote products derived from 156145Snate@binkert.org * this software without specific prior written permission. 166145Snate@binkert.org * 176145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 186145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 196145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 206145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 216145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 226145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 236145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 246145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 256145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 266145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 276145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 286145Snate@binkert.org */ 296145Snate@binkert.org 306845Sdrh5@cs.wisc.edu#include "mem/ruby/libruby.hh" 316154Snate@binkert.org#include "mem/ruby/common/Global.hh" 326154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh" 336154Snate@binkert.org#include "mem/ruby/system/System.hh" 346154Snate@binkert.org#include "mem/protocol/Protocol.hh" 356154Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh" 366154Snate@binkert.org#include "mem/ruby/system/CacheMemory.hh" 376285Snate@binkert.org#include "mem/protocol/CacheMsg.hh" 386285Snate@binkert.org#include "mem/ruby/recorder/Tracer.hh" 396154Snate@binkert.org#include "mem/ruby/common/SubBlock.hh" 406154Snate@binkert.org#include "mem/protocol/Protocol.hh" 416154Snate@binkert.org#include "mem/gems_common/Map.hh" 426285Snate@binkert.org#include "mem/ruby/buffers/MessageBuffer.hh" 436285Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh" 446145Snate@binkert.org 456285Snate@binkert.org//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q) 466145Snate@binkert.org 476355Spdudnik@gmail.com#define LLSC_FAIL -2 486850Spdudnik@gmail.comlong int already = 0; 496285Snate@binkert.orgSequencer::Sequencer(const string & name) 506285Snate@binkert.org :RubyPort(name) 516285Snate@binkert.org{ 526859Sdrh5@cs.wisc.edu m_store_waiting_on_load_cycles = 0; 536859Sdrh5@cs.wisc.edu m_store_waiting_on_store_cycles = 0; 546859Sdrh5@cs.wisc.edu m_load_waiting_on_store_cycles = 0; 556859Sdrh5@cs.wisc.edu m_load_waiting_on_load_cycles = 0; 566285Snate@binkert.org} 576285Snate@binkert.org 586285Snate@binkert.orgvoid Sequencer::init(const vector<string> & argv) 596285Snate@binkert.org{ 606145Snate@binkert.org m_deadlock_check_scheduled = false; 616145Snate@binkert.org m_outstanding_count = 0; 626145Snate@binkert.org 636285Snate@binkert.org m_max_outstanding_requests = 0; 646285Snate@binkert.org m_deadlock_threshold = 0; 656285Snate@binkert.org m_version = -1; 666285Snate@binkert.org m_instCache_ptr = NULL; 676285Snate@binkert.org m_dataCache_ptr = NULL; 686285Snate@binkert.org m_controller = NULL; 696846Spdudnik@cs.wisc.edu m_atomic_reads = 0; 706846Spdudnik@cs.wisc.edu m_atomic_writes = 0; 716285Snate@binkert.org for (size_t i=0; i<argv.size(); i+=2) { 726285Snate@binkert.org if ( argv[i] == "controller") { 736285Snate@binkert.org m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache" 746285Snate@binkert.org m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 756285Snate@binkert.org } else if ( argv[i] == "icache") 766285Snate@binkert.org m_instCache_ptr = RubySystem::getCache(argv[i+1]); 776285Snate@binkert.org else if ( argv[i] == "dcache") 786285Snate@binkert.org m_dataCache_ptr = RubySystem::getCache(argv[i+1]); 796285Snate@binkert.org else if ( argv[i] == "version") 806285Snate@binkert.org m_version = atoi(argv[i+1].c_str()); 816285Snate@binkert.org else if ( argv[i] == "max_outstanding_requests") 826285Snate@binkert.org m_max_outstanding_requests = atoi(argv[i+1].c_str()); 836285Snate@binkert.org else if ( argv[i] == "deadlock_threshold") 846285Snate@binkert.org m_deadlock_threshold = atoi(argv[i+1].c_str()); 856285Snate@binkert.org else { 866285Snate@binkert.org cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl; 876285Snate@binkert.org assert(false); 886285Snate@binkert.org } 896145Snate@binkert.org } 906285Snate@binkert.org assert(m_max_outstanding_requests > 0); 916285Snate@binkert.org assert(m_deadlock_threshold > 0); 926285Snate@binkert.org assert(m_version > -1); 936285Snate@binkert.org assert(m_instCache_ptr != NULL); 946285Snate@binkert.org assert(m_dataCache_ptr != NULL); 956285Snate@binkert.org assert(m_controller != NULL); 966145Snate@binkert.org} 976145Snate@binkert.org 986145Snate@binkert.orgSequencer::~Sequencer() { 996285Snate@binkert.org 1006145Snate@binkert.org} 1016145Snate@binkert.org 1026145Snate@binkert.orgvoid Sequencer::wakeup() { 1036145Snate@binkert.org // Check for deadlock of any of the requests 1046145Snate@binkert.org Time current_time = g_eventQueue_ptr->getTime(); 1056145Snate@binkert.org 1066145Snate@binkert.org // Check across all outstanding requests 1076145Snate@binkert.org int total_outstanding = 0; 1086285Snate@binkert.org 1096285Snate@binkert.org Vector<Address> keys = m_readRequestTable.keys(); 1106285Snate@binkert.org for (int i=0; i<keys.size(); i++) { 1116285Snate@binkert.org SequencerRequest* request = m_readRequestTable.lookup(keys[i]); 1126285Snate@binkert.org if (current_time - request->issue_time >= m_deadlock_threshold) { 1136285Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 1146285Snate@binkert.org WARN_EXPR(request); 1156285Snate@binkert.org WARN_EXPR(m_version); 1166825Spdudnik@gmail.com WARN_EXPR(request->ruby_request.paddr); 1176285Snate@binkert.org WARN_EXPR(keys.size()); 1186285Snate@binkert.org WARN_EXPR(current_time); 1196285Snate@binkert.org WARN_EXPR(request->issue_time); 1206285Snate@binkert.org WARN_EXPR(current_time - request->issue_time); 1216285Snate@binkert.org ERROR_MSG("Aborting"); 1226145Snate@binkert.org } 1236285Snate@binkert.org } 1246145Snate@binkert.org 1256285Snate@binkert.org keys = m_writeRequestTable.keys(); 1266285Snate@binkert.org for (int i=0; i<keys.size(); i++) { 1276285Snate@binkert.org SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); 1286285Snate@binkert.org if (current_time - request->issue_time >= m_deadlock_threshold) { 1296285Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 1306285Snate@binkert.org WARN_EXPR(request); 1316285Snate@binkert.org WARN_EXPR(m_version); 1326285Snate@binkert.org WARN_EXPR(current_time); 1336285Snate@binkert.org WARN_EXPR(request->issue_time); 1346285Snate@binkert.org WARN_EXPR(current_time - request->issue_time); 1356285Snate@binkert.org WARN_EXPR(keys.size()); 1366285Snate@binkert.org ERROR_MSG("Aborting"); 1376145Snate@binkert.org } 1386285Snate@binkert.org } 1396285Snate@binkert.org total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size(); 1406285Snate@binkert.org 1416145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 1426145Snate@binkert.org 1436145Snate@binkert.org if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking 1446285Snate@binkert.org g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 1456145Snate@binkert.org } else { 1466145Snate@binkert.org m_deadlock_check_scheduled = false; 1476145Snate@binkert.org } 1486145Snate@binkert.org} 1496145Snate@binkert.org 1506859Sdrh5@cs.wisc.eduvoid Sequencer::printStats(ostream & out) const { 1516859Sdrh5@cs.wisc.edu out << "Sequencer: " << m_name << endl; 1526859Sdrh5@cs.wisc.edu out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl; 1536859Sdrh5@cs.wisc.edu out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl; 1546859Sdrh5@cs.wisc.edu out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl; 1556859Sdrh5@cs.wisc.edu out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl; 1566859Sdrh5@cs.wisc.edu} 1576859Sdrh5@cs.wisc.edu 1586145Snate@binkert.orgvoid Sequencer::printProgress(ostream& out) const{ 1596285Snate@binkert.org /* 1606145Snate@binkert.org int total_demand = 0; 1616145Snate@binkert.org out << "Sequencer Stats Version " << m_version << endl; 1626145Snate@binkert.org out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 1636145Snate@binkert.org out << "---------------" << endl; 1646145Snate@binkert.org out << "outstanding requests" << endl; 1656145Snate@binkert.org 1666285Snate@binkert.org Vector<Address> rkeys = m_readRequestTable.keys(); 1676285Snate@binkert.org int read_size = rkeys.size(); 1686285Snate@binkert.org out << "proc " << m_version << " Read Requests = " << read_size << endl; 1696285Snate@binkert.org // print the request table 1706285Snate@binkert.org for(int i=0; i < read_size; ++i){ 1716285Snate@binkert.org SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]); 1726285Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl; 1736285Snate@binkert.org total_demand++; 1746285Snate@binkert.org } 1756145Snate@binkert.org 1766285Snate@binkert.org Vector<Address> wkeys = m_writeRequestTable.keys(); 1776285Snate@binkert.org int write_size = wkeys.size(); 1786285Snate@binkert.org out << "proc " << m_version << " Write Requests = " << write_size << endl; 1796285Snate@binkert.org // print the request table 1806285Snate@binkert.org for(int i=0; i < write_size; ++i){ 1816285Snate@binkert.org CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]); 1826145Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 1836145Snate@binkert.org if( request.getPrefetch() == PrefetchBit_No ){ 1846145Snate@binkert.org total_demand++; 1856145Snate@binkert.org } 1866285Snate@binkert.org } 1876145Snate@binkert.org 1886285Snate@binkert.org out << endl; 1896285Snate@binkert.org 1906145Snate@binkert.org out << "Total Number Outstanding: " << m_outstanding_count << endl; 1916145Snate@binkert.org out << "Total Number Demand : " << total_demand << endl; 1926145Snate@binkert.org out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; 1936145Snate@binkert.org out << endl; 1946145Snate@binkert.org out << endl; 1956285Snate@binkert.org */ 1966145Snate@binkert.org} 1976145Snate@binkert.org 1986285Snate@binkert.orgvoid Sequencer::printConfig(ostream& out) const { 1996285Snate@binkert.org out << "Seqeuncer config: " << m_name << endl; 2006285Snate@binkert.org out << " controller: " << m_controller->getName() << endl; 2016285Snate@binkert.org out << " version: " << m_version << endl; 2026285Snate@binkert.org out << " max_outstanding_requests: " << m_max_outstanding_requests << endl; 2036285Snate@binkert.org out << " deadlock_threshold: " << m_deadlock_threshold << endl; 2046145Snate@binkert.org} 2056145Snate@binkert.org 2066145Snate@binkert.org// Insert the request on the correct request table. Return true if 2076145Snate@binkert.org// the entry was already present. 2086285Snate@binkert.orgbool Sequencer::insertRequest(SequencerRequest* request) { 2096285Snate@binkert.org int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 2106285Snate@binkert.org 2116145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 2126145Snate@binkert.org 2136145Snate@binkert.org // See if we should schedule a deadlock check 2146145Snate@binkert.org if (m_deadlock_check_scheduled == false) { 2156285Snate@binkert.org g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 2166145Snate@binkert.org m_deadlock_check_scheduled = true; 2176145Snate@binkert.org } 2186145Snate@binkert.org 2196285Snate@binkert.org Address line_addr(request->ruby_request.paddr); 2206285Snate@binkert.org line_addr.makeLineAddress(); 2216285Snate@binkert.org if ((request->ruby_request.type == RubyRequestType_ST) || 2226355Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_RMW_Read) || 2236355Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_RMW_Write) || 2246350Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_Locked_Read) || 2256350Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_Locked_Write)) { 2266285Snate@binkert.org if (m_writeRequestTable.exist(line_addr)) { 2276285Snate@binkert.org m_writeRequestTable.lookup(line_addr) = request; 2286285Snate@binkert.org // return true; 2296285Snate@binkert.org assert(0); // drh5: isn't this an error? do you lose the initial request? 2306145Snate@binkert.org } 2316285Snate@binkert.org m_writeRequestTable.allocate(line_addr); 2326285Snate@binkert.org m_writeRequestTable.lookup(line_addr) = request; 2336145Snate@binkert.org m_outstanding_count++; 2346145Snate@binkert.org } else { 2356285Snate@binkert.org if (m_readRequestTable.exist(line_addr)) { 2366285Snate@binkert.org m_readRequestTable.lookup(line_addr) = request; 2376285Snate@binkert.org // return true; 2386285Snate@binkert.org assert(0); // drh5: isn't this an error? do you lose the initial request? 2396145Snate@binkert.org } 2406285Snate@binkert.org m_readRequestTable.allocate(line_addr); 2416285Snate@binkert.org m_readRequestTable.lookup(line_addr) = request; 2426145Snate@binkert.org m_outstanding_count++; 2436145Snate@binkert.org } 2446145Snate@binkert.org 2456145Snate@binkert.org g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 2466145Snate@binkert.org 2476285Snate@binkert.org total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 2486285Snate@binkert.org assert(m_outstanding_count == total_outstanding); 2496145Snate@binkert.org 2506145Snate@binkert.org return false; 2516145Snate@binkert.org} 2526145Snate@binkert.org 2536285Snate@binkert.orgvoid Sequencer::removeRequest(SequencerRequest* srequest) { 2546145Snate@binkert.org 2556285Snate@binkert.org assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 2566285Snate@binkert.org 2576285Snate@binkert.org const RubyRequest & ruby_request = srequest->ruby_request; 2586285Snate@binkert.org Address line_addr(ruby_request.paddr); 2596285Snate@binkert.org line_addr.makeLineAddress(); 2606285Snate@binkert.org if ((ruby_request.type == RubyRequestType_ST) || 2616355Spdudnik@gmail.com (ruby_request.type == RubyRequestType_RMW_Read) || 2626355Spdudnik@gmail.com (ruby_request.type == RubyRequestType_RMW_Write) || 2636350Spdudnik@gmail.com (ruby_request.type == RubyRequestType_Locked_Read) || 2646350Spdudnik@gmail.com (ruby_request.type == RubyRequestType_Locked_Write)) { 2656285Snate@binkert.org m_writeRequestTable.deallocate(line_addr); 2666145Snate@binkert.org } else { 2676285Snate@binkert.org m_readRequestTable.deallocate(line_addr); 2686145Snate@binkert.org } 2696145Snate@binkert.org m_outstanding_count--; 2706145Snate@binkert.org 2716285Snate@binkert.org assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 2726145Snate@binkert.org} 2736145Snate@binkert.org 2746145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address, DataBlock& data) { 2756145Snate@binkert.org 2766145Snate@binkert.org assert(address == line_address(address)); 2776285Snate@binkert.org assert(m_writeRequestTable.exist(line_address(address))); 2786145Snate@binkert.org 2796285Snate@binkert.org SequencerRequest* request = m_writeRequestTable.lookup(address); 2806846Spdudnik@cs.wisc.edu 2816145Snate@binkert.org removeRequest(request); 2826145Snate@binkert.org 2836285Snate@binkert.org assert((request->ruby_request.type == RubyRequestType_ST) || 2846355Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_RMW_Read) || 2856355Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_RMW_Write) || 2866350Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_Locked_Read) || 2876350Spdudnik@gmail.com (request->ruby_request.type == RubyRequestType_Locked_Write)); 2886347Spdudnik@gmail.com // POLINA: the assumption is that atomics are only on data cache and not instruction cache 2896350Spdudnik@gmail.com if (request->ruby_request.type == RubyRequestType_Locked_Read) { 2906347Spdudnik@gmail.com m_dataCache_ptr->setLocked(address, m_version); 2916347Spdudnik@gmail.com } 2926506Spdudnik@gmail.com else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 2936506Spdudnik@gmail.com m_controller->set_atomic(address); 2946506Spdudnik@gmail.com } 2956506Spdudnik@gmail.com else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 2966846Spdudnik@cs.wisc.edu m_controller->clear_atomic(address); 2976506Spdudnik@gmail.com } 2986145Snate@binkert.org 2996285Snate@binkert.org hitCallback(request, data); 3006145Snate@binkert.org} 3016145Snate@binkert.org 3026145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address, DataBlock& data) { 3036145Snate@binkert.org 3046285Snate@binkert.org assert(address == line_address(address)); 3056285Snate@binkert.org assert(m_readRequestTable.exist(line_address(address))); 3066145Snate@binkert.org 3076285Snate@binkert.org SequencerRequest* request = m_readRequestTable.lookup(address); 3086285Snate@binkert.org removeRequest(request); 3096285Snate@binkert.org 3106285Snate@binkert.org assert((request->ruby_request.type == RubyRequestType_LD) || 3116381Sdrh5@cs.wisc.edu (request->ruby_request.type == RubyRequestType_RMW_Read) || 3126285Snate@binkert.org (request->ruby_request.type == RubyRequestType_IFETCH)); 3136285Snate@binkert.org 3146285Snate@binkert.org hitCallback(request, data); 3156145Snate@binkert.org} 3166145Snate@binkert.org 3176285Snate@binkert.orgvoid Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) { 3186285Snate@binkert.org const RubyRequest & ruby_request = srequest->ruby_request; 3196285Snate@binkert.org Address request_address(ruby_request.paddr); 3206285Snate@binkert.org Address request_line_address(ruby_request.paddr); 3216285Snate@binkert.org request_line_address.makeLineAddress(); 3226285Snate@binkert.org RubyRequestType type = ruby_request.type; 3236285Snate@binkert.org Time issued_time = srequest->issue_time; 3246145Snate@binkert.org 3256145Snate@binkert.org // Set this cache entry to the most recently used 3266285Snate@binkert.org if (type == RubyRequestType_IFETCH) { 3276285Snate@binkert.org if (m_instCache_ptr->isTagPresent(request_line_address) ) 3286285Snate@binkert.org m_instCache_ptr->setMRU(request_line_address); 3296145Snate@binkert.org } else { 3306285Snate@binkert.org if (m_dataCache_ptr->isTagPresent(request_line_address) ) 3316285Snate@binkert.org m_dataCache_ptr->setMRU(request_line_address); 3326145Snate@binkert.org } 3336145Snate@binkert.org 3346145Snate@binkert.org assert(g_eventQueue_ptr->getTime() >= issued_time); 3356145Snate@binkert.org Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 3366145Snate@binkert.org 3376285Snate@binkert.org // Profile the miss latency for all non-zero demand misses 3386285Snate@binkert.org if (miss_latency != 0) { 3396285Snate@binkert.org g_system_ptr->getProfiler()->missLatency(miss_latency, type); 3406285Snate@binkert.org 3416285Snate@binkert.org if (Debug::getProtocolTrace()) { 3426285Snate@binkert.org g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr), 3436285Snate@binkert.org "", "Done", "", int_to_string(miss_latency)+" cycles"); 3446285Snate@binkert.org } 3456285Snate@binkert.org } 3466285Snate@binkert.org /* 3476285Snate@binkert.org if (request.getPrefetch() == PrefetchBit_Yes) { 3486285Snate@binkert.org return; // Ignore the prefetch 3496285Snate@binkert.org } 3506285Snate@binkert.org */ 3516285Snate@binkert.org 3526285Snate@binkert.org // update the data 3536285Snate@binkert.org if (ruby_request.data != NULL) { 3546285Snate@binkert.org if ((type == RubyRequestType_LD) || 3556381Sdrh5@cs.wisc.edu (type == RubyRequestType_IFETCH) || 3566381Sdrh5@cs.wisc.edu (type == RubyRequestType_RMW_Read)) { 3576285Snate@binkert.org memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len); 3586285Snate@binkert.org } else { 3596285Snate@binkert.org data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len); 3606285Snate@binkert.org } 3616145Snate@binkert.org } 3626846Spdudnik@cs.wisc.edu 3636285Snate@binkert.org m_hit_callback(srequest->id); 3646285Snate@binkert.org delete srequest; 3656285Snate@binkert.org} 3666285Snate@binkert.org 3676285Snate@binkert.org// Returns true if the sequencer already has a load or store outstanding 3686845Sdrh5@cs.wisc.eduint Sequencer::isReady(const RubyRequest& request) { 3696859Sdrh5@cs.wisc.edu bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr))); 3706859Sdrh5@cs.wisc.edu bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr))); 3716859Sdrh5@cs.wisc.edu if ( is_outstanding_store ) { 3726859Sdrh5@cs.wisc.edu if ((request.type == RubyRequestType_LD) || 3736859Sdrh5@cs.wisc.edu (request.type == RubyRequestType_IFETCH) || 3746859Sdrh5@cs.wisc.edu (request.type == RubyRequestType_RMW_Read)) { 3756859Sdrh5@cs.wisc.edu m_store_waiting_on_load_cycles++; 3766859Sdrh5@cs.wisc.edu } else { 3776859Sdrh5@cs.wisc.edu m_store_waiting_on_store_cycles++; 3786859Sdrh5@cs.wisc.edu } 3796859Sdrh5@cs.wisc.edu return LIBRUBY_ALIASED_REQUEST; 3806859Sdrh5@cs.wisc.edu } else if ( is_outstanding_load ) { 3816859Sdrh5@cs.wisc.edu if ((request.type == RubyRequestType_ST) || 3826859Sdrh5@cs.wisc.edu (request.type == RubyRequestType_RMW_Write) ) { 3836859Sdrh5@cs.wisc.edu m_load_waiting_on_store_cycles++; 3846859Sdrh5@cs.wisc.edu } else { 3856859Sdrh5@cs.wisc.edu m_load_waiting_on_load_cycles++; 3866859Sdrh5@cs.wisc.edu } 3876856Sdrh5@cs.wisc.edu return LIBRUBY_ALIASED_REQUEST; 3886856Sdrh5@cs.wisc.edu } 3896856Sdrh5@cs.wisc.edu 3906285Snate@binkert.org if (m_outstanding_count >= m_max_outstanding_requests) { 3916845Sdrh5@cs.wisc.edu return LIBRUBY_BUFFER_FULL; 3926145Snate@binkert.org } 3936845Sdrh5@cs.wisc.edu 3946845Sdrh5@cs.wisc.edu return 1; 3956145Snate@binkert.org} 3966145Snate@binkert.org 3976285Snate@binkert.orgbool Sequencer::empty() const { 3986285Snate@binkert.org return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0); 3996145Snate@binkert.org} 4006145Snate@binkert.org 4016349Spdudnik@gmail.com 4026285Snate@binkert.orgint64_t Sequencer::makeRequest(const RubyRequest & request) 4036285Snate@binkert.org{ 4046285Snate@binkert.org assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes()); 4056845Sdrh5@cs.wisc.edu int ready = isReady(request); 4066845Sdrh5@cs.wisc.edu if (ready > 0) { 4076285Snate@binkert.org int64_t id = makeUniqueRequestID(); 4086285Snate@binkert.org SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime()); 4096285Snate@binkert.org bool found = insertRequest(srequest); 4106825Spdudnik@gmail.com if (!found) { 4116350Spdudnik@gmail.com if (request.type == RubyRequestType_Locked_Write) { 4126355Spdudnik@gmail.com // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first 4136355Spdudnik@gmail.com // ensuring that nothing comes between checking the flag and servicing the store 4146349Spdudnik@gmail.com if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) { 4156355Spdudnik@gmail.com return LLSC_FAIL; 4166349Spdudnik@gmail.com } 4176349Spdudnik@gmail.com else { 4186349Spdudnik@gmail.com m_dataCache_ptr->clearLocked(line_address(Address(request.paddr))); 4196349Spdudnik@gmail.com } 4206349Spdudnik@gmail.com } 4216285Snate@binkert.org issueRequest(request); 4226145Snate@binkert.org 4236859Sdrh5@cs.wisc.edu // TODO: issue hardware prefetches here 4246859Sdrh5@cs.wisc.edu return id; 4256825Spdudnik@gmail.com } 4266825Spdudnik@gmail.com else { 4276825Spdudnik@gmail.com assert(0); 4286859Sdrh5@cs.wisc.edu return 0; 4296825Spdudnik@gmail.com } 4306859Sdrh5@cs.wisc.edu } else { 4316845Sdrh5@cs.wisc.edu return ready; 4326145Snate@binkert.org } 4336145Snate@binkert.org} 4346145Snate@binkert.org 4356285Snate@binkert.orgvoid Sequencer::issueRequest(const RubyRequest& request) { 4366285Snate@binkert.org 4376285Snate@binkert.org // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively 4386285Snate@binkert.org CacheRequestType ctype; 4396285Snate@binkert.org switch(request.type) { 4406285Snate@binkert.org case RubyRequestType_IFETCH: 4416846Spdudnik@cs.wisc.edu if (m_atomic_reads > 0 && m_atomic_writes == 0) { 4426846Spdudnik@cs.wisc.edu m_controller->reset_atomics(); 4436850Spdudnik@gmail.com m_atomic_writes = 0; 4446850Spdudnik@gmail.com m_atomic_reads = 0; 4456846Spdudnik@cs.wisc.edu } 4466846Spdudnik@cs.wisc.edu else if (m_atomic_writes > 0) { 4476846Spdudnik@cs.wisc.edu assert(m_atomic_reads > m_atomic_writes); 4486846Spdudnik@cs.wisc.edu cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl; 4496846Spdudnik@cs.wisc.edu assert(false); 4506846Spdudnik@cs.wisc.edu } 4516285Snate@binkert.org ctype = CacheRequestType_IFETCH; 4526285Snate@binkert.org break; 4536285Snate@binkert.org case RubyRequestType_LD: 4546846Spdudnik@cs.wisc.edu if (m_atomic_reads > 0 && m_atomic_writes == 0) { 4556846Spdudnik@cs.wisc.edu m_controller->reset_atomics(); 4566850Spdudnik@gmail.com m_atomic_writes = 0; 4576850Spdudnik@gmail.com m_atomic_reads = 0; 4586846Spdudnik@cs.wisc.edu } 4596846Spdudnik@cs.wisc.edu else if (m_atomic_writes > 0) { 4606846Spdudnik@cs.wisc.edu assert(m_atomic_reads > m_atomic_writes); 4616846Spdudnik@cs.wisc.edu cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl; 4626846Spdudnik@cs.wisc.edu assert(false); 4636846Spdudnik@cs.wisc.edu } 4646285Snate@binkert.org ctype = CacheRequestType_LD; 4656285Snate@binkert.org break; 4666285Snate@binkert.org case RubyRequestType_ST: 4676846Spdudnik@cs.wisc.edu if (m_atomic_reads > 0 && m_atomic_writes == 0) { 4686846Spdudnik@cs.wisc.edu m_controller->reset_atomics(); 4696850Spdudnik@gmail.com m_atomic_writes = 0; 4706850Spdudnik@gmail.com m_atomic_reads = 0; 4716846Spdudnik@cs.wisc.edu } 4726846Spdudnik@cs.wisc.edu else if (m_atomic_writes > 0) { 4736846Spdudnik@cs.wisc.edu assert(m_atomic_reads > m_atomic_writes); 4746846Spdudnik@cs.wisc.edu cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl; 4756846Spdudnik@cs.wisc.edu assert(false); 4766846Spdudnik@cs.wisc.edu } 4776285Snate@binkert.org ctype = CacheRequestType_ST; 4786285Snate@binkert.org break; 4796350Spdudnik@gmail.com case RubyRequestType_Locked_Read: 4806350Spdudnik@gmail.com case RubyRequestType_Locked_Write: 4816846Spdudnik@cs.wisc.edu ctype = CacheRequestType_ATOMIC; 4826846Spdudnik@cs.wisc.edu break; 4836355Spdudnik@gmail.com case RubyRequestType_RMW_Read: 4846846Spdudnik@cs.wisc.edu assert(m_atomic_writes == 0); 4856846Spdudnik@cs.wisc.edu m_atomic_reads++; 4866846Spdudnik@cs.wisc.edu ctype = CacheRequestType_ATOMIC; 4876846Spdudnik@cs.wisc.edu break; 4886355Spdudnik@gmail.com case RubyRequestType_RMW_Write: 4896846Spdudnik@cs.wisc.edu assert(m_atomic_reads > 0); 4906846Spdudnik@cs.wisc.edu assert(m_atomic_writes < m_atomic_reads); 4916846Spdudnik@cs.wisc.edu m_atomic_writes++; 4926846Spdudnik@cs.wisc.edu if (m_atomic_reads == m_atomic_writes) { 4936846Spdudnik@cs.wisc.edu m_atomic_reads = 0; 4946846Spdudnik@cs.wisc.edu m_atomic_writes = 0; 4956846Spdudnik@cs.wisc.edu } 4966355Spdudnik@gmail.com ctype = CacheRequestType_ATOMIC; 4976355Spdudnik@gmail.com break; 4986285Snate@binkert.org default: 4996285Snate@binkert.org assert(0); 5006145Snate@binkert.org } 5016285Snate@binkert.org AccessModeType amtype; 5026285Snate@binkert.org switch(request.access_mode){ 5036285Snate@binkert.org case RubyAccessMode_User: 5046285Snate@binkert.org amtype = AccessModeType_UserMode; 5056285Snate@binkert.org break; 5066285Snate@binkert.org case RubyAccessMode_Supervisor: 5076285Snate@binkert.org amtype = AccessModeType_SupervisorMode; 5086285Snate@binkert.org break; 5096285Snate@binkert.org case RubyAccessMode_Device: 5106285Snate@binkert.org amtype = AccessModeType_UserMode; 5116285Snate@binkert.org break; 5126285Snate@binkert.org default: 5136285Snate@binkert.org assert(0); 5146285Snate@binkert.org } 5156285Snate@binkert.org Address line_addr(request.paddr); 5166285Snate@binkert.org line_addr.makeLineAddress(); 5176505Spdudnik@gmail.com CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id); 5186285Snate@binkert.org 5196285Snate@binkert.org if (Debug::getProtocolTrace()) { 5206285Snate@binkert.org g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr), 5216285Snate@binkert.org "", "Begin", "", RubyRequestType_to_string(request.type)); 5226285Snate@binkert.org } 5236285Snate@binkert.org 5246285Snate@binkert.org if (g_system_ptr->getTracer()->traceEnabled()) { 5256285Snate@binkert.org g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc), 5266285Snate@binkert.org request.type, g_eventQueue_ptr->getTime()); 5276285Snate@binkert.org } 5286285Snate@binkert.org 5296285Snate@binkert.org Time latency = 0; // initialzed to an null value 5306285Snate@binkert.org 5316285Snate@binkert.org if (request.type == RubyRequestType_IFETCH) 5326285Snate@binkert.org latency = m_instCache_ptr->getLatency(); 5336285Snate@binkert.org else 5346285Snate@binkert.org latency = m_dataCache_ptr->getLatency(); 5356285Snate@binkert.org 5366285Snate@binkert.org // Send the message to the cache controller 5376285Snate@binkert.org assert(latency > 0); 5386285Snate@binkert.org 5396285Snate@binkert.org 5406285Snate@binkert.org m_mandatory_q_ptr->enqueue(msg, latency); 5416285Snate@binkert.org} 5426285Snate@binkert.org/* 5436285Snate@binkert.orgbool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 5446285Snate@binkert.org AccessModeType access_mode, 5456285Snate@binkert.org int size, DataBlock*& data_ptr) { 5466285Snate@binkert.org if (type == CacheRequestType_IFETCH) { 5476285Snate@binkert.org return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 5486285Snate@binkert.org } else { 5496285Snate@binkert.org return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 5506145Snate@binkert.org } 5516145Snate@binkert.org} 5526285Snate@binkert.org*/ 5536145Snate@binkert.org 5546145Snate@binkert.orgvoid Sequencer::print(ostream& out) const { 5556285Snate@binkert.org out << "[Sequencer: " << m_version 5566145Snate@binkert.org << ", outstanding requests: " << m_outstanding_count; 5576145Snate@binkert.org 5586285Snate@binkert.org out << ", read request table: " << m_readRequestTable 5596285Snate@binkert.org << ", write request table: " << m_writeRequestTable; 5606145Snate@binkert.org out << "]"; 5616145Snate@binkert.org} 5626145Snate@binkert.org 5636145Snate@binkert.org// this can be called from setState whenever coherence permissions are upgraded 5646145Snate@binkert.org// when invoked, coherence violations will be checked for the given block 5656145Snate@binkert.orgvoid Sequencer::checkCoherence(const Address& addr) { 5666145Snate@binkert.org#ifdef CHECK_COHERENCE 5676145Snate@binkert.org g_system_ptr->checkGlobalCoherenceInvariant(addr); 5686145Snate@binkert.org#endif 5696145Snate@binkert.org} 5706145Snate@binkert.org 571