Sequencer.cc revision 6154
16145Snate@binkert.org 26145Snate@binkert.org/* 36145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 46145Snate@binkert.org * All rights reserved. 56145Snate@binkert.org * 66145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 76145Snate@binkert.org * modification, are permitted provided that the following conditions are 86145Snate@binkert.org * met: redistributions of source code must retain the above copyright 96145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 106145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 116145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 126145Snate@binkert.org * documentation and/or other materials provided with the distribution; 136145Snate@binkert.org * neither the name of the copyright holders nor the names of its 146145Snate@binkert.org * contributors may be used to endorse or promote products derived from 156145Snate@binkert.org * this software without specific prior written permission. 166145Snate@binkert.org * 176145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 186145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 196145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 206145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 216145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 226145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 236145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 246145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 256145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 266145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 276145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 286145Snate@binkert.org */ 296145Snate@binkert.org 306145Snate@binkert.org/* 316145Snate@binkert.org * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $ 326145Snate@binkert.org * 336145Snate@binkert.org */ 346145Snate@binkert.org 356154Snate@binkert.org#include "mem/ruby/common/Global.hh" 366154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh" 376154Snate@binkert.org#include "mem/ruby/system/System.hh" 386154Snate@binkert.org#include "mem/protocol/Protocol.hh" 396154Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh" 406154Snate@binkert.org#include "mem/ruby/system/CacheMemory.hh" 416154Snate@binkert.org#include "mem/ruby/config/RubyConfig.hh" 426154Snate@binkert.org//#include "mem/ruby/recorder/Tracer.hh" 436154Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractChip.hh" 446154Snate@binkert.org#include "mem/protocol/Chip.hh" 456154Snate@binkert.org#include "mem/ruby/tester/Tester.hh" 466154Snate@binkert.org#include "mem/ruby/common/SubBlock.hh" 476154Snate@binkert.org#include "mem/protocol/Protocol.hh" 486154Snate@binkert.org#include "mem/gems_common/Map.hh" 496145Snate@binkert.org 506145Snate@binkert.orgSequencer::Sequencer(AbstractChip* chip_ptr, int version) { 516145Snate@binkert.org m_chip_ptr = chip_ptr; 526145Snate@binkert.org m_version = version; 536145Snate@binkert.org 546145Snate@binkert.org m_deadlock_check_scheduled = false; 556145Snate@binkert.org m_outstanding_count = 0; 566145Snate@binkert.org 576145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 586145Snate@binkert.org m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads]; 596145Snate@binkert.org m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads]; 606145Snate@binkert.org 616145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 626145Snate@binkert.org m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>; 636145Snate@binkert.org m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>; 646145Snate@binkert.org } 656145Snate@binkert.org 666145Snate@binkert.org} 676145Snate@binkert.org 686145Snate@binkert.orgSequencer::~Sequencer() { 696145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 706145Snate@binkert.org for(int i=0; i < smt_threads; ++i){ 716145Snate@binkert.org if(m_writeRequestTable_ptr[i]){ 726145Snate@binkert.org delete m_writeRequestTable_ptr[i]; 736145Snate@binkert.org } 746145Snate@binkert.org if(m_readRequestTable_ptr[i]){ 756145Snate@binkert.org delete m_readRequestTable_ptr[i]; 766145Snate@binkert.org } 776145Snate@binkert.org } 786145Snate@binkert.org if(m_writeRequestTable_ptr){ 796145Snate@binkert.org delete [] m_writeRequestTable_ptr; 806145Snate@binkert.org } 816145Snate@binkert.org if(m_readRequestTable_ptr){ 826145Snate@binkert.org delete [] m_readRequestTable_ptr; 836145Snate@binkert.org } 846145Snate@binkert.org} 856145Snate@binkert.org 866145Snate@binkert.orgvoid Sequencer::wakeup() { 876145Snate@binkert.org // Check for deadlock of any of the requests 886145Snate@binkert.org Time current_time = g_eventQueue_ptr->getTime(); 896145Snate@binkert.org bool deadlock = false; 906145Snate@binkert.org 916145Snate@binkert.org // Check across all outstanding requests 926145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 936145Snate@binkert.org int total_outstanding = 0; 946145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 956145Snate@binkert.org Vector<Address> keys = m_readRequestTable_ptr[p]->keys(); 966145Snate@binkert.org for (int i=0; i<keys.size(); i++) { 976145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]); 986145Snate@binkert.org if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) { 996145Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 1006145Snate@binkert.org WARN_EXPR(request); 1016145Snate@binkert.org WARN_EXPR(m_chip_ptr->getID()); 1026145Snate@binkert.org WARN_EXPR(m_version); 1036145Snate@binkert.org WARN_EXPR(keys.size()); 1046145Snate@binkert.org WARN_EXPR(current_time); 1056145Snate@binkert.org WARN_EXPR(request.getTime()); 1066145Snate@binkert.org WARN_EXPR(current_time - request.getTime()); 1076145Snate@binkert.org WARN_EXPR(*m_readRequestTable_ptr[p]); 1086145Snate@binkert.org ERROR_MSG("Aborting"); 1096145Snate@binkert.org deadlock = true; 1106145Snate@binkert.org } 1116145Snate@binkert.org } 1126145Snate@binkert.org 1136145Snate@binkert.org keys = m_writeRequestTable_ptr[p]->keys(); 1146145Snate@binkert.org for (int i=0; i<keys.size(); i++) { 1156145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]); 1166145Snate@binkert.org if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) { 1176145Snate@binkert.org WARN_MSG("Possible Deadlock detected"); 1186145Snate@binkert.org WARN_EXPR(request); 1196145Snate@binkert.org WARN_EXPR(m_chip_ptr->getID()); 1206145Snate@binkert.org WARN_EXPR(m_version); 1216145Snate@binkert.org WARN_EXPR(current_time); 1226145Snate@binkert.org WARN_EXPR(request.getTime()); 1236145Snate@binkert.org WARN_EXPR(current_time - request.getTime()); 1246145Snate@binkert.org WARN_EXPR(keys.size()); 1256145Snate@binkert.org WARN_EXPR(*m_writeRequestTable_ptr[p]); 1266145Snate@binkert.org ERROR_MSG("Aborting"); 1276145Snate@binkert.org deadlock = true; 1286145Snate@binkert.org } 1296145Snate@binkert.org } 1306145Snate@binkert.org total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size(); 1316145Snate@binkert.org } // across all request tables 1326145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 1336145Snate@binkert.org 1346145Snate@binkert.org if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking 1356145Snate@binkert.org g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD); 1366145Snate@binkert.org } else { 1376145Snate@binkert.org m_deadlock_check_scheduled = false; 1386145Snate@binkert.org } 1396145Snate@binkert.org} 1406145Snate@binkert.org 1416145Snate@binkert.org//returns the total number of requests 1426145Snate@binkert.orgint Sequencer::getNumberOutstanding(){ 1436145Snate@binkert.org return m_outstanding_count; 1446145Snate@binkert.org} 1456145Snate@binkert.org 1466145Snate@binkert.org// returns the total number of demand requests 1476145Snate@binkert.orgint Sequencer::getNumberOutstandingDemand(){ 1486145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 1496145Snate@binkert.org int total_demand = 0; 1506145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 1516145Snate@binkert.org Vector<Address> keys = m_readRequestTable_ptr[p]->keys(); 1526145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 1536145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]); 1546152Sdrh5@cs.wisc.edu if(request.getPrefetch() == PrefetchBit_No){ 1556152Sdrh5@cs.wisc.edu total_demand++; 1566145Snate@binkert.org } 1576145Snate@binkert.org } 1586145Snate@binkert.org 1596145Snate@binkert.org keys = m_writeRequestTable_ptr[p]->keys(); 1606145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 1616145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]); 1626145Snate@binkert.org if(request.getPrefetch() == PrefetchBit_No){ 1636145Snate@binkert.org total_demand++; 1646145Snate@binkert.org } 1656145Snate@binkert.org } 1666145Snate@binkert.org } 1676145Snate@binkert.org 1686145Snate@binkert.org return total_demand; 1696145Snate@binkert.org} 1706145Snate@binkert.org 1716145Snate@binkert.orgint Sequencer::getNumberOutstandingPrefetch(){ 1726145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 1736145Snate@binkert.org int total_prefetch = 0; 1746145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 1756145Snate@binkert.org Vector<Address> keys = m_readRequestTable_ptr[p]->keys(); 1766145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 1776145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]); 1786145Snate@binkert.org if(request.getPrefetch() == PrefetchBit_Yes){ 1796145Snate@binkert.org total_prefetch++; 1806145Snate@binkert.org } 1816145Snate@binkert.org } 1826145Snate@binkert.org 1836145Snate@binkert.org keys = m_writeRequestTable_ptr[p]->keys(); 1846145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 1856145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]); 1866145Snate@binkert.org if(request.getPrefetch() == PrefetchBit_Yes){ 1876145Snate@binkert.org total_prefetch++; 1886145Snate@binkert.org } 1896145Snate@binkert.org } 1906145Snate@binkert.org } 1916145Snate@binkert.org 1926145Snate@binkert.org return total_prefetch; 1936145Snate@binkert.org} 1946145Snate@binkert.org 1956145Snate@binkert.orgbool Sequencer::isPrefetchRequest(const Address & lineaddr){ 1966145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 1976145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 1986145Snate@binkert.org // check load requests 1996145Snate@binkert.org Vector<Address> keys = m_readRequestTable_ptr[p]->keys(); 2006145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 2016145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]); 2026145Snate@binkert.org if(line_address(request.getAddress()) == lineaddr){ 2036145Snate@binkert.org if(request.getPrefetch() == PrefetchBit_Yes){ 2046145Snate@binkert.org return true; 2056145Snate@binkert.org } 2066145Snate@binkert.org else{ 2076145Snate@binkert.org return false; 2086145Snate@binkert.org } 2096145Snate@binkert.org } 2106145Snate@binkert.org } 2116145Snate@binkert.org 2126145Snate@binkert.org // check store requests 2136145Snate@binkert.org keys = m_writeRequestTable_ptr[p]->keys(); 2146145Snate@binkert.org for (int i=0; i< keys.size(); i++) { 2156145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]); 2166145Snate@binkert.org if(line_address(request.getAddress()) == lineaddr){ 2176145Snate@binkert.org if(request.getPrefetch() == PrefetchBit_Yes){ 2186145Snate@binkert.org return true; 2196145Snate@binkert.org } 2206145Snate@binkert.org else{ 2216145Snate@binkert.org return false; 2226145Snate@binkert.org } 2236145Snate@binkert.org } 2246145Snate@binkert.org } 2256145Snate@binkert.org } 2266145Snate@binkert.org // we should've found a matching request 2276145Snate@binkert.org cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl; 2286145Snate@binkert.org printProgress(cout); 2296145Snate@binkert.org assert(0); 2306145Snate@binkert.org} 2316145Snate@binkert.org 2326145Snate@binkert.orgAccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){ 2336145Snate@binkert.org if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){ 2346145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr); 2356145Snate@binkert.org return request.getAccessMode(); 2366145Snate@binkert.org } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){ 2376145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr); 2386145Snate@binkert.org return request.getAccessMode(); 2396145Snate@binkert.org } else { 2406145Snate@binkert.org printProgress(cout); 2416145Snate@binkert.org ERROR_MSG("Request not found in RequestTables"); 2426145Snate@binkert.org } 2436145Snate@binkert.org} 2446145Snate@binkert.org 2456145Snate@binkert.orgAddress Sequencer::getLogicalAddressOfRequest(Address addr, int thread){ 2466145Snate@binkert.org assert(thread >= 0); 2476145Snate@binkert.org if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){ 2486145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr); 2496145Snate@binkert.org return request.getLogicalAddress(); 2506145Snate@binkert.org } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){ 2516145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr); 2526145Snate@binkert.org return request.getLogicalAddress(); 2536145Snate@binkert.org } else { 2546145Snate@binkert.org printProgress(cout); 2556145Snate@binkert.org WARN_MSG("Request not found in RequestTables"); 2566145Snate@binkert.org WARN_MSG(addr); 2576145Snate@binkert.org WARN_MSG(thread); 2586145Snate@binkert.org ASSERT(0); 2596145Snate@binkert.org } 2606145Snate@binkert.org} 2616145Snate@binkert.org 2626145Snate@binkert.org// returns the ThreadID of the request 2636145Snate@binkert.orgint Sequencer::getRequestThreadID(const Address & addr){ 2646145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 2656145Snate@binkert.org int thread = -1; 2666145Snate@binkert.org int num_found = 0; 2676145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 2686145Snate@binkert.org if(m_readRequestTable_ptr[p]->exist(addr)){ 2696145Snate@binkert.org num_found++; 2706145Snate@binkert.org thread = p; 2716145Snate@binkert.org } 2726145Snate@binkert.org if(m_writeRequestTable_ptr[p]->exist(addr)){ 2736145Snate@binkert.org num_found++; 2746145Snate@binkert.org thread = p; 2756145Snate@binkert.org } 2766145Snate@binkert.org } 2776145Snate@binkert.org if(num_found != 1){ 2786145Snate@binkert.org cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl; 2796145Snate@binkert.org printProgress(cout); 2806145Snate@binkert.org } 2816145Snate@binkert.org ASSERT(num_found == 1); 2826145Snate@binkert.org ASSERT(thread != -1); 2836145Snate@binkert.org 2846145Snate@binkert.org return thread; 2856145Snate@binkert.org} 2866145Snate@binkert.org 2876145Snate@binkert.org// given a line address, return the request's physical address 2886145Snate@binkert.orgAddress Sequencer::getRequestPhysicalAddress(const Address & lineaddr){ 2896145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 2906145Snate@binkert.org Address physaddr; 2916145Snate@binkert.org int num_found = 0; 2926145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 2936145Snate@binkert.org if(m_readRequestTable_ptr[p]->exist(lineaddr)){ 2946145Snate@binkert.org num_found++; 2956145Snate@binkert.org physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress(); 2966145Snate@binkert.org } 2976145Snate@binkert.org if(m_writeRequestTable_ptr[p]->exist(lineaddr)){ 2986145Snate@binkert.org num_found++; 2996145Snate@binkert.org physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress(); 3006145Snate@binkert.org } 3016145Snate@binkert.org } 3026145Snate@binkert.org if(num_found != 1){ 3036145Snate@binkert.org cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl; 3046145Snate@binkert.org printProgress(cout); 3056145Snate@binkert.org } 3066145Snate@binkert.org ASSERT(num_found == 1); 3076145Snate@binkert.org 3086145Snate@binkert.org return physaddr; 3096145Snate@binkert.org} 3106145Snate@binkert.org 3116145Snate@binkert.orgvoid Sequencer::printProgress(ostream& out) const{ 3126145Snate@binkert.org 3136145Snate@binkert.org int total_demand = 0; 3146145Snate@binkert.org out << "Sequencer Stats Version " << m_version << endl; 3156145Snate@binkert.org out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 3166145Snate@binkert.org out << "---------------" << endl; 3176145Snate@binkert.org out << "outstanding requests" << endl; 3186145Snate@binkert.org 3196145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 3206145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 3216145Snate@binkert.org Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys(); 3226145Snate@binkert.org int read_size = rkeys.size(); 3236145Snate@binkert.org out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl; 3246145Snate@binkert.org // print the request table 3256145Snate@binkert.org for(int i=0; i < read_size; ++i){ 3266145Snate@binkert.org CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]); 3276145Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 3286145Snate@binkert.org if( request.getPrefetch() == PrefetchBit_No ){ 3296145Snate@binkert.org total_demand++; 3306145Snate@binkert.org } 3316145Snate@binkert.org } 3326145Snate@binkert.org 3336145Snate@binkert.org Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys(); 3346145Snate@binkert.org int write_size = wkeys.size(); 3356145Snate@binkert.org out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl; 3366145Snate@binkert.org // print the request table 3376145Snate@binkert.org for(int i=0; i < write_size; ++i){ 3386145Snate@binkert.org CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]); 3396145Snate@binkert.org out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 3406145Snate@binkert.org if( request.getPrefetch() == PrefetchBit_No ){ 3416145Snate@binkert.org total_demand++; 3426145Snate@binkert.org } 3436145Snate@binkert.org } 3446145Snate@binkert.org 3456145Snate@binkert.org out << endl; 3466145Snate@binkert.org } 3476145Snate@binkert.org out << "Total Number Outstanding: " << m_outstanding_count << endl; 3486145Snate@binkert.org out << "Total Number Demand : " << total_demand << endl; 3496145Snate@binkert.org out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; 3506145Snate@binkert.org out << endl; 3516145Snate@binkert.org out << endl; 3526145Snate@binkert.org 3536145Snate@binkert.org} 3546145Snate@binkert.org 3556145Snate@binkert.orgvoid Sequencer::printConfig(ostream& out) { 3566145Snate@binkert.org if (TSO) { 3576145Snate@binkert.org out << "sequencer: Sequencer - TSO" << endl; 3586145Snate@binkert.org } else { 3596145Snate@binkert.org out << "sequencer: Sequencer - SC" << endl; 3606145Snate@binkert.org } 3616145Snate@binkert.org out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl; 3626145Snate@binkert.org} 3636145Snate@binkert.org 3646145Snate@binkert.orgbool Sequencer::empty() const { 3656145Snate@binkert.org return m_outstanding_count == 0; 3666145Snate@binkert.org} 3676145Snate@binkert.org 3686145Snate@binkert.org// Insert the request on the correct request table. Return true if 3696145Snate@binkert.org// the entry was already present. 3706145Snate@binkert.orgbool Sequencer::insertRequest(const CacheMsg& request) { 3716145Snate@binkert.org int thread = request.getThreadID(); 3726145Snate@binkert.org assert(thread >= 0); 3736145Snate@binkert.org int total_outstanding = 0; 3746145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 3756145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 3766145Snate@binkert.org total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size(); 3776145Snate@binkert.org } 3786145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 3796145Snate@binkert.org 3806145Snate@binkert.org // See if we should schedule a deadlock check 3816145Snate@binkert.org if (m_deadlock_check_scheduled == false) { 3826145Snate@binkert.org g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD); 3836145Snate@binkert.org m_deadlock_check_scheduled = true; 3846145Snate@binkert.org } 3856145Snate@binkert.org 3866145Snate@binkert.org if ((request.getType() == CacheRequestType_ST) || 3876145Snate@binkert.org (request.getType() == CacheRequestType_ATOMIC)) { 3886145Snate@binkert.org if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) { 3896145Snate@binkert.org m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request; 3906145Snate@binkert.org return true; 3916145Snate@binkert.org } 3926145Snate@binkert.org m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress())); 3936145Snate@binkert.org m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request; 3946145Snate@binkert.org m_outstanding_count++; 3956145Snate@binkert.org } else { 3966145Snate@binkert.org if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) { 3976145Snate@binkert.org m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request; 3986145Snate@binkert.org return true; 3996145Snate@binkert.org } 4006145Snate@binkert.org m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress())); 4016145Snate@binkert.org m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request; 4026145Snate@binkert.org m_outstanding_count++; 4036145Snate@binkert.org } 4046145Snate@binkert.org 4056145Snate@binkert.org g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 4066145Snate@binkert.org 4076145Snate@binkert.org total_outstanding = 0; 4086145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 4096145Snate@binkert.org total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size(); 4106145Snate@binkert.org } 4116145Snate@binkert.org 4126145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 4136145Snate@binkert.org return false; 4146145Snate@binkert.org} 4156145Snate@binkert.org 4166145Snate@binkert.orgvoid Sequencer::removeRequest(const CacheMsg& request) { 4176145Snate@binkert.org int thread = request.getThreadID(); 4186145Snate@binkert.org assert(thread >= 0); 4196145Snate@binkert.org int total_outstanding = 0; 4206145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 4216145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 4226145Snate@binkert.org total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size(); 4236145Snate@binkert.org } 4246145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 4256145Snate@binkert.org 4266145Snate@binkert.org if ((request.getType() == CacheRequestType_ST) || 4276145Snate@binkert.org (request.getType() == CacheRequestType_ATOMIC)) { 4286145Snate@binkert.org m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress())); 4296145Snate@binkert.org } else { 4306145Snate@binkert.org m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress())); 4316145Snate@binkert.org } 4326145Snate@binkert.org m_outstanding_count--; 4336145Snate@binkert.org 4346145Snate@binkert.org total_outstanding = 0; 4356145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 4366145Snate@binkert.org total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size(); 4376145Snate@binkert.org } 4386145Snate@binkert.org assert(m_outstanding_count == total_outstanding); 4396145Snate@binkert.org} 4406145Snate@binkert.org 4416145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address) { 4426145Snate@binkert.org DataBlock data; 4436145Snate@binkert.org writeCallback(address, data); 4446145Snate@binkert.org} 4456145Snate@binkert.org 4466145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address, DataBlock& data) { 4476145Snate@binkert.org // process oldest thread first 4486145Snate@binkert.org int thread = -1; 4496145Snate@binkert.org Time oldest_time = 0; 4506145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 4516145Snate@binkert.org for(int t=0; t < smt_threads; ++t){ 4526145Snate@binkert.org if(m_writeRequestTable_ptr[t]->exist(address)){ 4536145Snate@binkert.org CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address); 4546145Snate@binkert.org if(thread == -1 || (request.getTime() < oldest_time) ){ 4556145Snate@binkert.org thread = t; 4566145Snate@binkert.org oldest_time = request.getTime(); 4576145Snate@binkert.org } 4586145Snate@binkert.org } 4596145Snate@binkert.org } 4606145Snate@binkert.org // make sure we found an oldest thread 4616145Snate@binkert.org ASSERT(thread != -1); 4626145Snate@binkert.org 4636145Snate@binkert.org CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address); 4646145Snate@binkert.org 4656145Snate@binkert.org writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread); 4666145Snate@binkert.org} 4676145Snate@binkert.org 4686145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) { 4696145Snate@binkert.org 4706145Snate@binkert.org assert(address == line_address(address)); 4716145Snate@binkert.org assert(thread >= 0); 4726145Snate@binkert.org assert(m_writeRequestTable_ptr[thread]->exist(line_address(address))); 4736145Snate@binkert.org 4746145Snate@binkert.org writeCallback(address, data, respondingMach, thread); 4756145Snate@binkert.org 4766145Snate@binkert.org} 4776145Snate@binkert.org 4786145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) { 4796145Snate@binkert.org assert(address == line_address(address)); 4806145Snate@binkert.org assert(m_writeRequestTable_ptr[thread]->exist(line_address(address))); 4816145Snate@binkert.org CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address); 4826145Snate@binkert.org assert( request.getThreadID() == thread); 4836145Snate@binkert.org removeRequest(request); 4846145Snate@binkert.org 4856145Snate@binkert.org assert((request.getType() == CacheRequestType_ST) || 4866145Snate@binkert.org (request.getType() == CacheRequestType_ATOMIC)); 4876145Snate@binkert.org 4886145Snate@binkert.org hitCallback(request, data, respondingMach, thread); 4896145Snate@binkert.org 4906145Snate@binkert.org} 4916145Snate@binkert.org 4926145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address) { 4936145Snate@binkert.org DataBlock data; 4946145Snate@binkert.org readCallback(address, data); 4956145Snate@binkert.org} 4966145Snate@binkert.org 4976145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address, DataBlock& data) { 4986145Snate@binkert.org // process oldest thread first 4996145Snate@binkert.org int thread = -1; 5006145Snate@binkert.org Time oldest_time = 0; 5016145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 5026145Snate@binkert.org for(int t=0; t < smt_threads; ++t){ 5036145Snate@binkert.org if(m_readRequestTable_ptr[t]->exist(address)){ 5046145Snate@binkert.org CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address); 5056145Snate@binkert.org if(thread == -1 || (request.getTime() < oldest_time) ){ 5066145Snate@binkert.org thread = t; 5076145Snate@binkert.org oldest_time = request.getTime(); 5086145Snate@binkert.org } 5096145Snate@binkert.org } 5106145Snate@binkert.org } 5116145Snate@binkert.org // make sure we found an oldest thread 5126145Snate@binkert.org ASSERT(thread != -1); 5136145Snate@binkert.org 5146145Snate@binkert.org CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address); 5156145Snate@binkert.org 5166145Snate@binkert.org readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread); 5176145Snate@binkert.org} 5186145Snate@binkert.org 5196145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) { 5206145Snate@binkert.org 5216145Snate@binkert.org assert(address == line_address(address)); 5226145Snate@binkert.org assert(m_readRequestTable_ptr[thread]->exist(line_address(address))); 5236145Snate@binkert.org 5246145Snate@binkert.org readCallback(address, data, respondingMach, thread); 5256145Snate@binkert.org} 5266145Snate@binkert.org 5276145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) { 5286145Snate@binkert.org assert(address == line_address(address)); 5296145Snate@binkert.org assert(m_readRequestTable_ptr[thread]->exist(line_address(address))); 5306145Snate@binkert.org 5316145Snate@binkert.org CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address); 5326145Snate@binkert.org assert( request.getThreadID() == thread ); 5336145Snate@binkert.org removeRequest(request); 5346145Snate@binkert.org 5356145Snate@binkert.org assert((request.getType() == CacheRequestType_LD) || 5366145Snate@binkert.org (request.getType() == CacheRequestType_IFETCH) 5376145Snate@binkert.org ); 5386145Snate@binkert.org 5396145Snate@binkert.org hitCallback(request, data, respondingMach, thread); 5406145Snate@binkert.org} 5416145Snate@binkert.org 5426145Snate@binkert.orgvoid Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) { 5436145Snate@binkert.org int size = request.getSize(); 5446145Snate@binkert.org Address request_address = request.getAddress(); 5456145Snate@binkert.org Address request_logical_address = request.getLogicalAddress(); 5466145Snate@binkert.org Address request_line_address = line_address(request_address); 5476145Snate@binkert.org CacheRequestType type = request.getType(); 5486145Snate@binkert.org int threadID = request.getThreadID(); 5496145Snate@binkert.org Time issued_time = request.getTime(); 5506145Snate@binkert.org int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID; 5516145Snate@binkert.org 5526145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, size); 5536145Snate@binkert.org 5546145Snate@binkert.org // Set this cache entry to the most recently used 5556145Snate@binkert.org if (type == CacheRequestType_IFETCH) { 5566145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 5576145Snate@binkert.org if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) { 5586145Snate@binkert.org m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address); 5596145Snate@binkert.org } 5606145Snate@binkert.org } 5616145Snate@binkert.org else { 5626145Snate@binkert.org if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) { 5636145Snate@binkert.org m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address); 5646145Snate@binkert.org } 5656145Snate@binkert.org } 5666145Snate@binkert.org } else { 5676145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 5686145Snate@binkert.org if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) { 5696145Snate@binkert.org m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address); 5706145Snate@binkert.org } 5716145Snate@binkert.org } 5726145Snate@binkert.org else { 5736145Snate@binkert.org if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) { 5746145Snate@binkert.org m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address); 5756145Snate@binkert.org } 5766145Snate@binkert.org } 5776145Snate@binkert.org } 5786145Snate@binkert.org 5796145Snate@binkert.org assert(g_eventQueue_ptr->getTime() >= issued_time); 5806145Snate@binkert.org Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 5816145Snate@binkert.org 5826145Snate@binkert.org if (PROTOCOL_DEBUG_TRACE) { 5836145Snate@binkert.org g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "", 5846145Snate@binkert.org int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch())); 5856145Snate@binkert.org } 5866145Snate@binkert.org 5876145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address); 5886145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch()); 5896145Snate@binkert.org if (request.getPrefetch() == PrefetchBit_Yes) { 5906145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return"); 5916145Snate@binkert.org g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach); 5926145Snate@binkert.org return; // Ignore the software prefetch, don't callback the driver 5936145Snate@binkert.org } 5946145Snate@binkert.org 5956145Snate@binkert.org // Profile the miss latency for all non-zero demand misses 5966145Snate@binkert.org if (miss_latency != 0) { 5976145Snate@binkert.org g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach); 5986145Snate@binkert.org 5996145Snate@binkert.org } 6006145Snate@binkert.org 6016145Snate@binkert.org bool write = 6026145Snate@binkert.org (type == CacheRequestType_ST) || 6036145Snate@binkert.org (type == CacheRequestType_ATOMIC); 6046145Snate@binkert.org 6056145Snate@binkert.org if (TSO && write) { 6066145Snate@binkert.org m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data); 6076145Snate@binkert.org } else { 6086145Snate@binkert.org 6096145Snate@binkert.org // Copy the correct bytes out of the cache line into the subblock 6106145Snate@binkert.org SubBlock subblock(request_address, request_logical_address, size); 6116145Snate@binkert.org subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock 6126145Snate@binkert.org 6136145Snate@binkert.org // Scan the store buffer to see if there are any outstanding stores we need to collect 6146145Snate@binkert.org if (TSO) { 6156145Snate@binkert.org m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock); 6166145Snate@binkert.org } 6176145Snate@binkert.org 6186153Sgibson@cs.wisc.edu // Call into the Driver and let it read and/or modify the sub-block 6196145Snate@binkert.org g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID); 6206145Snate@binkert.org 6216145Snate@binkert.org // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock 6226145Snate@binkert.org // (This is only triggered for the non-TSO case) 6236145Snate@binkert.org if (write) { 6246145Snate@binkert.org assert(!TSO); 6256145Snate@binkert.org subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock 6266145Snate@binkert.org } 6276145Snate@binkert.org } 6286145Snate@binkert.org} 6296145Snate@binkert.org 6306153Sgibson@cs.wisc.eduvoid Sequencer::readConflictCallback(const Address& address) { 6316153Sgibson@cs.wisc.edu // process oldest thread first 6326153Sgibson@cs.wisc.edu int thread = -1; 6336153Sgibson@cs.wisc.edu Time oldest_time = 0; 6346153Sgibson@cs.wisc.edu int smt_threads = RubyConfig::numberofSMTThreads(); 6356153Sgibson@cs.wisc.edu for(int t=0; t < smt_threads; ++t){ 6366153Sgibson@cs.wisc.edu if(m_readRequestTable_ptr[t]->exist(address)){ 6376153Sgibson@cs.wisc.edu CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address); 6386153Sgibson@cs.wisc.edu if(thread == -1 || (request.getTime() < oldest_time) ){ 6396153Sgibson@cs.wisc.edu thread = t; 6406153Sgibson@cs.wisc.edu oldest_time = request.getTime(); 6416153Sgibson@cs.wisc.edu } 6426153Sgibson@cs.wisc.edu } 6436153Sgibson@cs.wisc.edu } 6446153Sgibson@cs.wisc.edu // make sure we found an oldest thread 6456153Sgibson@cs.wisc.edu ASSERT(thread != -1); 6466153Sgibson@cs.wisc.edu 6476153Sgibson@cs.wisc.edu CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address); 6486153Sgibson@cs.wisc.edu 6496153Sgibson@cs.wisc.edu readConflictCallback(address, GenericMachineType_NULL, thread); 6506153Sgibson@cs.wisc.edu} 6516153Sgibson@cs.wisc.edu 6526153Sgibson@cs.wisc.eduvoid Sequencer::readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) { 6536153Sgibson@cs.wisc.edu assert(address == line_address(address)); 6546153Sgibson@cs.wisc.edu assert(m_readRequestTable_ptr[thread]->exist(line_address(address))); 6556153Sgibson@cs.wisc.edu 6566153Sgibson@cs.wisc.edu CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address); 6576153Sgibson@cs.wisc.edu assert( request.getThreadID() == thread ); 6586153Sgibson@cs.wisc.edu removeRequest(request); 6596153Sgibson@cs.wisc.edu 6606153Sgibson@cs.wisc.edu assert((request.getType() == CacheRequestType_LD) || 6616153Sgibson@cs.wisc.edu (request.getType() == CacheRequestType_LD_XACT) || 6626153Sgibson@cs.wisc.edu (request.getType() == CacheRequestType_IFETCH) 6636153Sgibson@cs.wisc.edu ); 6646153Sgibson@cs.wisc.edu 6656153Sgibson@cs.wisc.edu conflictCallback(request, respondingMach, thread); 6666153Sgibson@cs.wisc.edu} 6676153Sgibson@cs.wisc.edu 6686153Sgibson@cs.wisc.eduvoid Sequencer::writeConflictCallback(const Address& address) { 6696153Sgibson@cs.wisc.edu // process oldest thread first 6706153Sgibson@cs.wisc.edu int thread = -1; 6716153Sgibson@cs.wisc.edu Time oldest_time = 0; 6726153Sgibson@cs.wisc.edu int smt_threads = RubyConfig::numberofSMTThreads(); 6736153Sgibson@cs.wisc.edu for(int t=0; t < smt_threads; ++t){ 6746153Sgibson@cs.wisc.edu if(m_writeRequestTable_ptr[t]->exist(address)){ 6756153Sgibson@cs.wisc.edu CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address); 6766153Sgibson@cs.wisc.edu if(thread == -1 || (request.getTime() < oldest_time) ){ 6776153Sgibson@cs.wisc.edu thread = t; 6786153Sgibson@cs.wisc.edu oldest_time = request.getTime(); 6796153Sgibson@cs.wisc.edu } 6806153Sgibson@cs.wisc.edu } 6816153Sgibson@cs.wisc.edu } 6826153Sgibson@cs.wisc.edu // make sure we found an oldest thread 6836153Sgibson@cs.wisc.edu ASSERT(thread != -1); 6846153Sgibson@cs.wisc.edu 6856153Sgibson@cs.wisc.edu CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address); 6866153Sgibson@cs.wisc.edu 6876153Sgibson@cs.wisc.edu writeConflictCallback(address, GenericMachineType_NULL, thread); 6886153Sgibson@cs.wisc.edu} 6896153Sgibson@cs.wisc.edu 6906153Sgibson@cs.wisc.eduvoid Sequencer::writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) { 6916153Sgibson@cs.wisc.edu assert(address == line_address(address)); 6926153Sgibson@cs.wisc.edu assert(m_writeRequestTable_ptr[thread]->exist(line_address(address))); 6936153Sgibson@cs.wisc.edu CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address); 6946153Sgibson@cs.wisc.edu assert( request.getThreadID() == thread); 6956153Sgibson@cs.wisc.edu removeRequest(request); 6966153Sgibson@cs.wisc.edu 6976153Sgibson@cs.wisc.edu assert((request.getType() == CacheRequestType_ST) || 6986153Sgibson@cs.wisc.edu (request.getType() == CacheRequestType_ST_XACT) || 6996153Sgibson@cs.wisc.edu (request.getType() == CacheRequestType_LDX_XACT) || 7006153Sgibson@cs.wisc.edu (request.getType() == CacheRequestType_ATOMIC)); 7016153Sgibson@cs.wisc.edu 7026153Sgibson@cs.wisc.edu conflictCallback(request, respondingMach, thread); 7036153Sgibson@cs.wisc.edu 7046153Sgibson@cs.wisc.edu} 7056153Sgibson@cs.wisc.edu 7066153Sgibson@cs.wisc.eduvoid Sequencer::conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread) { 7076153Sgibson@cs.wisc.edu assert(XACT_MEMORY); 7086153Sgibson@cs.wisc.edu int size = request.getSize(); 7096153Sgibson@cs.wisc.edu Address request_address = request.getAddress(); 7106153Sgibson@cs.wisc.edu Address request_logical_address = request.getLogicalAddress(); 7116153Sgibson@cs.wisc.edu Address request_line_address = line_address(request_address); 7126153Sgibson@cs.wisc.edu CacheRequestType type = request.getType(); 7136153Sgibson@cs.wisc.edu int threadID = request.getThreadID(); 7146153Sgibson@cs.wisc.edu Time issued_time = request.getTime(); 7156153Sgibson@cs.wisc.edu int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID; 7166153Sgibson@cs.wisc.edu 7176153Sgibson@cs.wisc.edu DEBUG_MSG(SEQUENCER_COMP, MedPrio, size); 7186153Sgibson@cs.wisc.edu 7196153Sgibson@cs.wisc.edu assert(g_eventQueue_ptr->getTime() >= issued_time); 7206153Sgibson@cs.wisc.edu Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 7216153Sgibson@cs.wisc.edu 7226153Sgibson@cs.wisc.edu if (PROTOCOL_DEBUG_TRACE) { 7236153Sgibson@cs.wisc.edu g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Conflict", "", 7246153Sgibson@cs.wisc.edu int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch())); 7256153Sgibson@cs.wisc.edu } 7266153Sgibson@cs.wisc.edu 7276153Sgibson@cs.wisc.edu DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address); 7286153Sgibson@cs.wisc.edu DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch()); 7296153Sgibson@cs.wisc.edu if (request.getPrefetch() == PrefetchBit_Yes) { 7306153Sgibson@cs.wisc.edu DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return"); 7316153Sgibson@cs.wisc.edu g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach); 7326153Sgibson@cs.wisc.edu return; // Ignore the software prefetch, don't callback the driver 7336153Sgibson@cs.wisc.edu } 7346153Sgibson@cs.wisc.edu 7356153Sgibson@cs.wisc.edu bool write = 7366153Sgibson@cs.wisc.edu (type == CacheRequestType_ST) || 7376153Sgibson@cs.wisc.edu (type == CacheRequestType_ST_XACT) || 7386153Sgibson@cs.wisc.edu (type == CacheRequestType_LDX_XACT) || 7396153Sgibson@cs.wisc.edu (type == CacheRequestType_ATOMIC); 7406153Sgibson@cs.wisc.edu 7416153Sgibson@cs.wisc.edu // Copy the correct bytes out of the cache line into the subblock 7426153Sgibson@cs.wisc.edu SubBlock subblock(request_address, request_logical_address, size); 7436153Sgibson@cs.wisc.edu 7446153Sgibson@cs.wisc.edu // Call into the Driver 7456153Sgibson@cs.wisc.edu g_system_ptr->getDriver()->conflictCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID); 7466153Sgibson@cs.wisc.edu 7476153Sgibson@cs.wisc.edu // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock 7486153Sgibson@cs.wisc.edu // (This is only triggered for the non-TSO case) 7496153Sgibson@cs.wisc.edu if (write) { 7506153Sgibson@cs.wisc.edu assert(!TSO); 7516153Sgibson@cs.wisc.edu } 7526153Sgibson@cs.wisc.edu} 7536153Sgibson@cs.wisc.edu 7546145Snate@binkert.orgvoid Sequencer::printDebug(){ 7556145Snate@binkert.org //notify driver of debug 7566145Snate@binkert.org g_system_ptr->getDriver()->printDebug(); 7576145Snate@binkert.org} 7586145Snate@binkert.org 7596145Snate@binkert.org// Returns true if the sequencer already has a load or store outstanding 7606151Sdrh5@cs.wisc.edubool 7616151Sdrh5@cs.wisc.eduSequencer::isReady(const Packet* pkt) const 7626151Sdrh5@cs.wisc.edu{ 7636145Snate@binkert.org 7646151Sdrh5@cs.wisc.edu int cpu_number = pkt->req->contextId(); 7656151Sdrh5@cs.wisc.edu la_t logical_addr = pkt->req->getVaddr(); 7666151Sdrh5@cs.wisc.edu pa_t physical_addr = pkt->req->getPaddr(); 7676151Sdrh5@cs.wisc.edu CacheRequestType type_of_request; 7686151Sdrh5@cs.wisc.edu if ( pkt->req->isInstFetch() ) { 7696151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_IFETCH; 7706151Sdrh5@cs.wisc.edu } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) { 7716151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_ATOMIC; 7726151Sdrh5@cs.wisc.edu } else if ( pkt->isRead() ) { 7736151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_LD; 7746151Sdrh5@cs.wisc.edu } else if ( pkt->isWrite() ) { 7756151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_ST; 7766151Sdrh5@cs.wisc.edu } else { 7776151Sdrh5@cs.wisc.edu assert(false); 7786151Sdrh5@cs.wisc.edu } 7796151Sdrh5@cs.wisc.edu int thread = pkt->req->threadId(); 7806151Sdrh5@cs.wisc.edu 7816151Sdrh5@cs.wisc.edu CacheMsg request(Address( physical_addr ), 7826151Sdrh5@cs.wisc.edu Address( physical_addr ), 7836151Sdrh5@cs.wisc.edu type_of_request, 7846151Sdrh5@cs.wisc.edu Address(0), 7856151Sdrh5@cs.wisc.edu AccessModeType_UserMode, // User/supervisor mode 7866151Sdrh5@cs.wisc.edu 0, // Size in bytes of request 7876151Sdrh5@cs.wisc.edu PrefetchBit_No, // Not a prefetch 7886151Sdrh5@cs.wisc.edu 0, // Version number 7896151Sdrh5@cs.wisc.edu Address(logical_addr), // Virtual Address 7906152Sdrh5@cs.wisc.edu thread // SMT thread 7916151Sdrh5@cs.wisc.edu ); 7926151Sdrh5@cs.wisc.edu isReady(request); 7936151Sdrh5@cs.wisc.edu} 7946151Sdrh5@cs.wisc.edu 7956151Sdrh5@cs.wisc.edubool 7966151Sdrh5@cs.wisc.eduSequencer::isReady(const CacheMsg& request) const 7976151Sdrh5@cs.wisc.edu{ 7986145Snate@binkert.org if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) { 7996145Snate@binkert.org //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl; 8006145Snate@binkert.org //printProgress(cout); 8016145Snate@binkert.org return false; 8026145Snate@binkert.org } 8036145Snate@binkert.org 8046145Snate@binkert.org // This code allows reads to be performed even when we have a write 8056145Snate@binkert.org // request outstanding for the line 8066145Snate@binkert.org bool write = 8076145Snate@binkert.org (request.getType() == CacheRequestType_ST) || 8086145Snate@binkert.org (request.getType() == CacheRequestType_ATOMIC); 8096145Snate@binkert.org 8106145Snate@binkert.org // LUKE - disallow more than one request type per address 8116145Snate@binkert.org // INVARIANT: at most one request type per address, per processor 8126145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 8136145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 8146145Snate@binkert.org if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) || 8156145Snate@binkert.org m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){ 8166145Snate@binkert.org //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl; 8176145Snate@binkert.org //printProgress(cout); 8186145Snate@binkert.org return false; 8196145Snate@binkert.org } 8206145Snate@binkert.org } 8216145Snate@binkert.org 8226145Snate@binkert.org if (TSO) { 8236145Snate@binkert.org return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady(); 8246145Snate@binkert.org } 8256145Snate@binkert.org return true; 8266145Snate@binkert.org} 8276145Snate@binkert.org 8286153Sgibson@cs.wisc.edu// Called by Driver 8296151Sdrh5@cs.wisc.eduvoid 8306151Sdrh5@cs.wisc.eduSequencer::makeRequest(const Packet* pkt, void* data) 8316151Sdrh5@cs.wisc.edu{ 8326151Sdrh5@cs.wisc.edu int cpu_number = pkt->req->contextId(); 8336151Sdrh5@cs.wisc.edu la_t logical_addr = pkt->req->getVaddr(); 8346151Sdrh5@cs.wisc.edu pa_t physical_addr = pkt->req->getPaddr(); 8356151Sdrh5@cs.wisc.edu int request_size = pkt->getSize(); 8366151Sdrh5@cs.wisc.edu CacheRequestType type_of_request; 8376151Sdrh5@cs.wisc.edu if ( pkt->req->isInstFetch() ) { 8386151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_IFETCH; 8396151Sdrh5@cs.wisc.edu } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) { 8406151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_ATOMIC; 8416151Sdrh5@cs.wisc.edu } else if ( pkt->isRead() ) { 8426151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_LD; 8436151Sdrh5@cs.wisc.edu } else if ( pkt->isWrite() ) { 8446151Sdrh5@cs.wisc.edu type_of_request = CacheRequestType_ST; 8456151Sdrh5@cs.wisc.edu } else { 8466151Sdrh5@cs.wisc.edu assert(false); 8476151Sdrh5@cs.wisc.edu } 8486151Sdrh5@cs.wisc.edu la_t virtual_pc = pkt->req->getPC(); 8496151Sdrh5@cs.wisc.edu int isPriv = false; // TODO: get permission data 8506151Sdrh5@cs.wisc.edu int thread = pkt->req->threadId(); 8516151Sdrh5@cs.wisc.edu 8526151Sdrh5@cs.wisc.edu AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission 8536151Sdrh5@cs.wisc.edu 8546151Sdrh5@cs.wisc.edu CacheMsg request(Address( physical_addr ), 8556151Sdrh5@cs.wisc.edu Address( physical_addr ), 8566151Sdrh5@cs.wisc.edu type_of_request, 8576151Sdrh5@cs.wisc.edu Address(virtual_pc), 8586151Sdrh5@cs.wisc.edu access_mode, // User/supervisor mode 8596151Sdrh5@cs.wisc.edu request_size, // Size in bytes of request 8606151Sdrh5@cs.wisc.edu PrefetchBit_No, // Not a prefetch 8616151Sdrh5@cs.wisc.edu 0, // Version number 8626151Sdrh5@cs.wisc.edu Address(logical_addr), // Virtual Address 8636152Sdrh5@cs.wisc.edu thread // SMT thread 8646151Sdrh5@cs.wisc.edu ); 8656151Sdrh5@cs.wisc.edu makeRequest(request); 8666151Sdrh5@cs.wisc.edu} 8676151Sdrh5@cs.wisc.edu 8686151Sdrh5@cs.wisc.eduvoid 8696151Sdrh5@cs.wisc.eduSequencer::makeRequest(const CacheMsg& request) 8706151Sdrh5@cs.wisc.edu{ 8716145Snate@binkert.org bool write = (request.getType() == CacheRequestType_ST) || 8726145Snate@binkert.org (request.getType() == CacheRequestType_ATOMIC); 8736145Snate@binkert.org 8746145Snate@binkert.org if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) { 8756145Snate@binkert.org assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady()); 8766145Snate@binkert.org m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request); 8776145Snate@binkert.org return; 8786145Snate@binkert.org } 8796145Snate@binkert.org 8806145Snate@binkert.org bool hit = doRequest(request); 8816145Snate@binkert.org 8826145Snate@binkert.org} 8836145Snate@binkert.org 8846145Snate@binkert.orgbool Sequencer::doRequest(const CacheMsg& request) { 8856145Snate@binkert.org bool hit = false; 8866145Snate@binkert.org // Check the fast path 8876145Snate@binkert.org DataBlock* data_ptr; 8886145Snate@binkert.org 8896145Snate@binkert.org int thread = request.getThreadID(); 8906145Snate@binkert.org 8916145Snate@binkert.org hit = tryCacheAccess(line_address(request.getAddress()), 8926145Snate@binkert.org request.getType(), 8936145Snate@binkert.org request.getProgramCounter(), 8946145Snate@binkert.org request.getAccessMode(), 8956145Snate@binkert.org request.getSize(), 8966145Snate@binkert.org data_ptr); 8976145Snate@binkert.org 8986145Snate@binkert.org if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) { 8996145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit"); 9006145Snate@binkert.org hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread); 9016145Snate@binkert.org return true; 9026145Snate@binkert.org } 9036145Snate@binkert.org 9046145Snate@binkert.org if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) { 9056145Snate@binkert.org 9066145Snate@binkert.org // See if we can satisfy the load entirely from the store buffer 9076145Snate@binkert.org SubBlock subblock(line_address(request.getAddress()), request.getSize()); 9086145Snate@binkert.org if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) { 9096145Snate@binkert.org DataBlock dummy; 9106145Snate@binkert.org hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer 9116145Snate@binkert.org return true; 9126145Snate@binkert.org } 9136145Snate@binkert.org } 9146145Snate@binkert.org 9156145Snate@binkert.org DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss"); 9166145Snate@binkert.org issueRequest(request); 9176145Snate@binkert.org return hit; 9186145Snate@binkert.org} 9196145Snate@binkert.org 9206145Snate@binkert.orgvoid Sequencer::issueRequest(const CacheMsg& request) { 9216145Snate@binkert.org bool found = insertRequest(request); 9226145Snate@binkert.org 9236145Snate@binkert.org if (!found) { 9246145Snate@binkert.org CacheMsg msg = request; 9256145Snate@binkert.org msg.getAddress() = line_address(request.getAddress()); // Make line address 9266145Snate@binkert.org 9276145Snate@binkert.org // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code 9286145Snate@binkert.org if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) { 9296145Snate@binkert.org g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID()); 9306145Snate@binkert.org } 9316145Snate@binkert.org 9326145Snate@binkert.org if (PROTOCOL_DEBUG_TRACE) { 9336145Snate@binkert.org g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType())); 9346145Snate@binkert.org } 9356145Snate@binkert.org 9366145Snate@binkert.org#if 0 9376145Snate@binkert.org // Commented out by nate binkert because I removed the trace stuff 9386145Snate@binkert.org if (g_system_ptr->getTracer()->traceEnabled()) { 9396145Snate@binkert.org g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(), 9406145Snate@binkert.org msg.getType(), g_eventQueue_ptr->getTime()); 9416145Snate@binkert.org } 9426145Snate@binkert.org#endif 9436145Snate@binkert.org 9446145Snate@binkert.org Time latency = 0; // initialzed to an null value 9456145Snate@binkert.org 9466145Snate@binkert.org latency = SEQUENCER_TO_CONTROLLER_LATENCY; 9476145Snate@binkert.org 9486145Snate@binkert.org // Send the message to the cache controller 9496145Snate@binkert.org assert(latency > 0); 9506145Snate@binkert.org m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency); 9516145Snate@binkert.org 9526145Snate@binkert.org } // !found 9536145Snate@binkert.org} 9546145Snate@binkert.org 9556145Snate@binkert.orgbool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 9566145Snate@binkert.org const Address& pc, AccessModeType access_mode, 9576145Snate@binkert.org int size, DataBlock*& data_ptr) { 9586145Snate@binkert.org if (type == CacheRequestType_IFETCH) { 9596145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 9606145Snate@binkert.org return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr); 9616145Snate@binkert.org } 9626145Snate@binkert.org else { 9636145Snate@binkert.org return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr); 9646145Snate@binkert.org } 9656145Snate@binkert.org } else { 9666145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 9676145Snate@binkert.org return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr); 9686145Snate@binkert.org } 9696145Snate@binkert.org else { 9706145Snate@binkert.org return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr); 9716145Snate@binkert.org } 9726145Snate@binkert.org } 9736145Snate@binkert.org} 9746145Snate@binkert.org 9756145Snate@binkert.orgvoid Sequencer::resetRequestTime(const Address& addr, int thread){ 9766145Snate@binkert.org assert(thread >= 0); 9776145Snate@binkert.org //reset both load and store requests, if they exist 9786145Snate@binkert.org if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){ 9796145Snate@binkert.org CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr); 9806145Snate@binkert.org if( request.m_AccessMode != AccessModeType_UserMode){ 9816145Snate@binkert.org cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl; 9826145Snate@binkert.org printProgress(cout); 9836145Snate@binkert.org } 9846145Snate@binkert.org //ASSERT(request.m_AccessMode == AccessModeType_UserMode); 9856145Snate@binkert.org request.setTime(g_eventQueue_ptr->getTime()); 9866145Snate@binkert.org } 9876145Snate@binkert.org if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){ 9886145Snate@binkert.org CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr); 9896145Snate@binkert.org if( request.m_AccessMode != AccessModeType_UserMode){ 9906145Snate@binkert.org cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl; 9916145Snate@binkert.org printProgress(cout); 9926145Snate@binkert.org } 9936145Snate@binkert.org //ASSERT(request.m_AccessMode == AccessModeType_UserMode); 9946145Snate@binkert.org request.setTime(g_eventQueue_ptr->getTime()); 9956145Snate@binkert.org } 9966145Snate@binkert.org} 9976145Snate@binkert.org 9986145Snate@binkert.org// removes load request from queue 9996145Snate@binkert.orgvoid Sequencer::removeLoadRequest(const Address & addr, int thread){ 10006145Snate@binkert.org removeRequest(getReadRequest(addr, thread)); 10016145Snate@binkert.org} 10026145Snate@binkert.org 10036145Snate@binkert.orgvoid Sequencer::removeStoreRequest(const Address & addr, int thread){ 10046145Snate@binkert.org removeRequest(getWriteRequest(addr, thread)); 10056145Snate@binkert.org} 10066145Snate@binkert.org 10076145Snate@binkert.org// returns the read CacheMsg 10086145Snate@binkert.orgCacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){ 10096145Snate@binkert.org Address temp = addr; 10106145Snate@binkert.org assert(thread >= 0); 10116145Snate@binkert.org assert(temp == line_address(temp)); 10126145Snate@binkert.org assert(m_readRequestTable_ptr[thread]->exist(addr)); 10136145Snate@binkert.org return m_readRequestTable_ptr[thread]->lookup(addr); 10146145Snate@binkert.org} 10156145Snate@binkert.org 10166145Snate@binkert.orgCacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){ 10176145Snate@binkert.org Address temp = addr; 10186145Snate@binkert.org assert(thread >= 0); 10196145Snate@binkert.org assert(temp == line_address(temp)); 10206145Snate@binkert.org assert(m_writeRequestTable_ptr[thread]->exist(addr)); 10216145Snate@binkert.org return m_writeRequestTable_ptr[thread]->lookup(addr); 10226145Snate@binkert.org} 10236145Snate@binkert.org 10246145Snate@binkert.orgvoid Sequencer::print(ostream& out) const { 10256145Snate@binkert.org out << "[Sequencer: " << m_chip_ptr->getID() 10266145Snate@binkert.org << ", outstanding requests: " << m_outstanding_count; 10276145Snate@binkert.org 10286145Snate@binkert.org int smt_threads = RubyConfig::numberofSMTThreads(); 10296145Snate@binkert.org for(int p=0; p < smt_threads; ++p){ 10306145Snate@binkert.org out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p] 10316145Snate@binkert.org << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p]; 10326145Snate@binkert.org } 10336145Snate@binkert.org out << "]"; 10346145Snate@binkert.org} 10356145Snate@binkert.org 10366145Snate@binkert.org// this can be called from setState whenever coherence permissions are upgraded 10376145Snate@binkert.org// when invoked, coherence violations will be checked for the given block 10386145Snate@binkert.orgvoid Sequencer::checkCoherence(const Address& addr) { 10396145Snate@binkert.org#ifdef CHECK_COHERENCE 10406145Snate@binkert.org g_system_ptr->checkGlobalCoherenceInvariant(addr); 10416145Snate@binkert.org#endif 10426145Snate@binkert.org} 10436145Snate@binkert.org 10446145Snate@binkert.orgbool Sequencer::getRubyMemoryValue(const Address& addr, char* value, 10456145Snate@binkert.org unsigned int size_in_bytes ) { 10466153Sgibson@cs.wisc.edu if(g_SIMULATING){ 10476145Snate@binkert.org for(unsigned int i=0; i < size_in_bytes; i++) { 10486153Sgibson@cs.wisc.edu std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl; 10496153Sgibson@cs.wisc.edu value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, 10506153Sgibson@cs.wisc.edu // addr.getAddress() + i, 1 ); 10516145Snate@binkert.org } 10526145Snate@binkert.org return false; // Do nothing? 10536145Snate@binkert.org } else { 10546145Snate@binkert.org bool found = false; 10556145Snate@binkert.org const Address lineAddr = line_address(addr); 10566145Snate@binkert.org DataBlock data; 10576145Snate@binkert.org PhysAddress paddr(addr); 10586145Snate@binkert.org DataBlock* dataPtr = &data; 10596145Snate@binkert.org Chip* n = dynamic_cast<Chip*>(m_chip_ptr); 10606145Snate@binkert.org // LUKE - use variable names instead of macros 10616145Snate@binkert.org assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL); 10626145Snate@binkert.org assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL); 10636145Snate@binkert.org 10646145Snate@binkert.org MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() ); 10656145Snate@binkert.org int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip(); 10666145Snate@binkert.org 10676145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 10686145Snate@binkert.org if(Protocol::m_CMP){ 10696145Snate@binkert.org assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL); 10706145Snate@binkert.org } 10716145Snate@binkert.org else{ 10726145Snate@binkert.org assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL); 10736145Snate@binkert.org } 10746145Snate@binkert.org } 10756145Snate@binkert.org 10766145Snate@binkert.org if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){ 10776145Snate@binkert.org n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes); 10786145Snate@binkert.org found = true; 10796145Snate@binkert.org } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){ 10806145Snate@binkert.org n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes); 10816145Snate@binkert.org found = true; 10826145Snate@binkert.org } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){ 10836145Snate@binkert.org n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes); 10846145Snate@binkert.org found = true; 10856151Sdrh5@cs.wisc.edu // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){ 10866151Sdrh5@cs.wisc.edu // ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)); 10876151Sdrh5@cs.wisc.edu // L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr); 10886145Snate@binkert.org 10896151Sdrh5@cs.wisc.edu // int offset = addr.getOffset(); 10906151Sdrh5@cs.wisc.edu // for(int i=0; i<size_in_bytes; ++i){ 10916151Sdrh5@cs.wisc.edu // value[i] = tbeEntry.getDataBlk().getByte(offset + i); 10926151Sdrh5@cs.wisc.edu // } 10936145Snate@binkert.org 10946151Sdrh5@cs.wisc.edu // found = true; 10956145Snate@binkert.org } else { 10966145Snate@binkert.org // Address not found 10976145Snate@binkert.org //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl; 10986145Snate@binkert.org n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip())); 10996145Snate@binkert.org int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip(); 11006145Snate@binkert.org for(unsigned int i=0; i<size_in_bytes; ++i){ 11016145Snate@binkert.org int offset = addr.getOffset(); 11026145Snate@binkert.org value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i); 11036145Snate@binkert.org } 11046145Snate@binkert.org // Address not found 11056145Snate@binkert.org //WARN_MSG("Couldn't find address"); 11066145Snate@binkert.org //WARN_EXPR(addr); 11076145Snate@binkert.org found = false; 11086145Snate@binkert.org } 11096145Snate@binkert.org return true; 11106145Snate@binkert.org } 11116145Snate@binkert.org} 11126145Snate@binkert.org 11136145Snate@binkert.orgbool Sequencer::setRubyMemoryValue(const Address& addr, char *value, 11146145Snate@binkert.org unsigned int size_in_bytes) { 11156145Snate@binkert.org char test_buffer[64]; 11166145Snate@binkert.org 11176153Sgibson@cs.wisc.edu if(g_SIMULATING){ 11186145Snate@binkert.org return false; // Do nothing? 11196145Snate@binkert.org } else { 11206145Snate@binkert.org // idea here is that coherent cache should find the 11216145Snate@binkert.org // latest data, the update it 11226145Snate@binkert.org bool found = false; 11236145Snate@binkert.org const Address lineAddr = line_address(addr); 11246145Snate@binkert.org PhysAddress paddr(addr); 11256145Snate@binkert.org DataBlock data; 11266145Snate@binkert.org DataBlock* dataPtr = &data; 11276145Snate@binkert.org Chip* n = dynamic_cast<Chip*>(m_chip_ptr); 11286145Snate@binkert.org 11296145Snate@binkert.org MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() ); 11306145Snate@binkert.org int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip(); 11316145Snate@binkert.org // LUKE - use variable names instead of macros 11326145Snate@binkert.org //cout << "number of L2caches per chip = " << RubyConfig::numberOfL2CachePerChip(m_version) << endl; 11336145Snate@binkert.org //cout << "L1I cache vec size = " << n->m_L1Cache_L1IcacheMemory_vec.size() << endl; 11346145Snate@binkert.org //cout << "L1D cache vec size = " << n->m_L1Cache_L1DcacheMemory_vec.size() << endl; 11356145Snate@binkert.org //cout << "L1cache_cachememory size = " << n->m_L1Cache_cacheMemory_vec.size() << endl; 11366145Snate@binkert.org //cout << "L1cache_l2cachememory size = " << n->m_L1Cache_L2cacheMemory_vec.size() << endl; 11376145Snate@binkert.org // if (Protocol::m_TwoLevelCache) { 11386151Sdrh5@cs.wisc.edu // if(Protocol::m_CMP){ 11396151Sdrh5@cs.wisc.edu // cout << "CMP L2 cache vec size = " << n->m_L2Cache_L2cacheMemory_vec.size() << endl; 11406151Sdrh5@cs.wisc.edu // } 11416151Sdrh5@cs.wisc.edu // else{ 11426151Sdrh5@cs.wisc.edu // cout << "L2 cache vec size = " << n->m_L1Cache_cacheMemory_vec.size() << endl; 11436151Sdrh5@cs.wisc.edu // } 11446151Sdrh5@cs.wisc.edu // } 11456145Snate@binkert.org 11466145Snate@binkert.org assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL); 11476145Snate@binkert.org assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL); 11486145Snate@binkert.org if (Protocol::m_TwoLevelCache) { 11496145Snate@binkert.org if(Protocol::m_CMP){ 11506145Snate@binkert.org assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL); 11516145Snate@binkert.org } 11526145Snate@binkert.org else{ 11536145Snate@binkert.org assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL); 11546145Snate@binkert.org } 11556145Snate@binkert.org } 11566145Snate@binkert.org 11576145Snate@binkert.org if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){ 11586145Snate@binkert.org n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes); 11596145Snate@binkert.org found = true; 11606145Snate@binkert.org } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){ 11616145Snate@binkert.org n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes); 11626145Snate@binkert.org found = true; 11636145Snate@binkert.org } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){ 11646145Snate@binkert.org n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes); 11656145Snate@binkert.org found = true; 11666151Sdrh5@cs.wisc.edu // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isTagPresent(lineAddr)){ 11676151Sdrh5@cs.wisc.edu // L1Cache_TBE& tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr); 11686151Sdrh5@cs.wisc.edu // DataBlock tmpData; 11696151Sdrh5@cs.wisc.edu // int offset = addr.getOffset(); 11706151Sdrh5@cs.wisc.edu // for(int i=0; i<size_in_bytes; ++i){ 11716151Sdrh5@cs.wisc.edu // tmpData.setByte(offset + i, value[i]); 11726151Sdrh5@cs.wisc.edu // } 11736151Sdrh5@cs.wisc.edu // tbeEntry.setDataBlk(tmpData); 11746151Sdrh5@cs.wisc.edu // tbeEntry.setDirty(true); 11756145Snate@binkert.org } else { 11766145Snate@binkert.org // Address not found 11776145Snate@binkert.org n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip())); 11786145Snate@binkert.org int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip(); 11796145Snate@binkert.org for(unsigned int i=0; i<size_in_bytes; ++i){ 11806145Snate@binkert.org int offset = addr.getOffset(); 11816145Snate@binkert.org n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]); 11826145Snate@binkert.org } 11836145Snate@binkert.org found = false; 11846145Snate@binkert.org } 11856145Snate@binkert.org 11866145Snate@binkert.org if (found){ 11876145Snate@binkert.org found = getRubyMemoryValue(addr, test_buffer, size_in_bytes); 11886145Snate@binkert.org assert(found); 11896145Snate@binkert.org if(value[0] != test_buffer[0]){ 11906145Snate@binkert.org WARN_EXPR((int) value[0]); 11916145Snate@binkert.org WARN_EXPR((int) test_buffer[0]); 11926145Snate@binkert.org ERROR_MSG("setRubyMemoryValue failed to set value."); 11936145Snate@binkert.org } 11946145Snate@binkert.org } 11956145Snate@binkert.org 11966145Snate@binkert.org return true; 11976145Snate@binkert.org } 11986145Snate@binkert.org} 11996153Sgibson@cs.wisc.edu 1200