Sequencer.cc revision 6899
16145Snate@binkert.org
26145Snate@binkert.org/*
36145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
46145Snate@binkert.org * All rights reserved.
56145Snate@binkert.org *
66145Snate@binkert.org * Redistribution and use in source and binary forms, with or without
76145Snate@binkert.org * modification, are permitted provided that the following conditions are
86145Snate@binkert.org * met: redistributions of source code must retain the above copyright
96145Snate@binkert.org * notice, this list of conditions and the following disclaimer;
106145Snate@binkert.org * redistributions in binary form must reproduce the above copyright
116145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
126145Snate@binkert.org * documentation and/or other materials provided with the distribution;
136145Snate@binkert.org * neither the name of the copyright holders nor the names of its
146145Snate@binkert.org * contributors may be used to endorse or promote products derived from
156145Snate@binkert.org * this software without specific prior written permission.
166145Snate@binkert.org *
176145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
186145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
196145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
206145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
216145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
226145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
236145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
246145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
256145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
266145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
276145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
286145Snate@binkert.org */
296145Snate@binkert.org
306845Sdrh5@cs.wisc.edu#include "mem/ruby/libruby.hh"
316154Snate@binkert.org#include "mem/ruby/common/Global.hh"
326154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh"
336154Snate@binkert.org#include "mem/ruby/system/System.hh"
346154Snate@binkert.org#include "mem/protocol/Protocol.hh"
356154Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh"
366154Snate@binkert.org#include "mem/ruby/system/CacheMemory.hh"
376285Snate@binkert.org#include "mem/protocol/CacheMsg.hh"
386285Snate@binkert.org#include "mem/ruby/recorder/Tracer.hh"
396154Snate@binkert.org#include "mem/ruby/common/SubBlock.hh"
406154Snate@binkert.org#include "mem/protocol/Protocol.hh"
416154Snate@binkert.org#include "mem/gems_common/Map.hh"
426285Snate@binkert.org#include "mem/ruby/buffers/MessageBuffer.hh"
436285Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
446899SBrad.Beckmann@amd.com#include "cpu/rubytest/RubyTester.hh"
456145Snate@binkert.org
466876Ssteve.reinhardt@amd.com#include "params/RubySequencer.hh"
476876Ssteve.reinhardt@amd.com
486285Snate@binkert.org//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
496145Snate@binkert.org
506355Spdudnik@gmail.com#define LLSC_FAIL -2
516850Spdudnik@gmail.comlong int already = 0;
526876Ssteve.reinhardt@amd.com
536876Ssteve.reinhardt@amd.comSequencer *
546876Ssteve.reinhardt@amd.comRubySequencerParams::create()
556285Snate@binkert.org{
566876Ssteve.reinhardt@amd.com    return new Sequencer(this);
576285Snate@binkert.org}
586876Ssteve.reinhardt@amd.com
596876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p)
606886SBrad.Beckmann@amd.com    : RubyPort(p), deadlockCheckEvent(this)
616876Ssteve.reinhardt@amd.com{
626876Ssteve.reinhardt@amd.com    m_store_waiting_on_load_cycles = 0;
636876Ssteve.reinhardt@amd.com    m_store_waiting_on_store_cycles = 0;
646876Ssteve.reinhardt@amd.com    m_load_waiting_on_store_cycles = 0;
656876Ssteve.reinhardt@amd.com    m_load_waiting_on_load_cycles = 0;
666876Ssteve.reinhardt@amd.com
676876Ssteve.reinhardt@amd.com    m_outstanding_count = 0;
686285Snate@binkert.org
696876Ssteve.reinhardt@amd.com    m_max_outstanding_requests = 0;
706876Ssteve.reinhardt@amd.com    m_deadlock_threshold = 0;
716876Ssteve.reinhardt@amd.com    m_instCache_ptr = NULL;
726876Ssteve.reinhardt@amd.com    m_dataCache_ptr = NULL;
736145Snate@binkert.org
746876Ssteve.reinhardt@amd.com    m_instCache_ptr = p->icache;
756876Ssteve.reinhardt@amd.com    m_dataCache_ptr = p->dcache;
766876Ssteve.reinhardt@amd.com    m_max_outstanding_requests = p->max_outstanding_requests;
776876Ssteve.reinhardt@amd.com    m_deadlock_threshold = p->deadlock_threshold;
786899SBrad.Beckmann@amd.com    m_usingRubyTester = p->using_ruby_tester;
796899SBrad.Beckmann@amd.com
806876Ssteve.reinhardt@amd.com    assert(m_max_outstanding_requests > 0);
816876Ssteve.reinhardt@amd.com    assert(m_deadlock_threshold > 0);
826876Ssteve.reinhardt@amd.com    assert(m_instCache_ptr != NULL);
836876Ssteve.reinhardt@amd.com    assert(m_dataCache_ptr != NULL);
846145Snate@binkert.org}
856145Snate@binkert.org
866145Snate@binkert.orgSequencer::~Sequencer() {
876285Snate@binkert.org
886145Snate@binkert.org}
896145Snate@binkert.org
906145Snate@binkert.orgvoid Sequencer::wakeup() {
916145Snate@binkert.org  // Check for deadlock of any of the requests
926145Snate@binkert.org  Time current_time = g_eventQueue_ptr->getTime();
936145Snate@binkert.org
946145Snate@binkert.org  // Check across all outstanding requests
956145Snate@binkert.org  int total_outstanding = 0;
966285Snate@binkert.org
976285Snate@binkert.org  Vector<Address> keys = m_readRequestTable.keys();
986285Snate@binkert.org  for (int i=0; i<keys.size(); i++) {
996285Snate@binkert.org    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
1006285Snate@binkert.org    if (current_time - request->issue_time >= m_deadlock_threshold) {
1016285Snate@binkert.org      WARN_MSG("Possible Deadlock detected");
1026510Spdudnik@gmail.com      WARN_EXPR(request);
1036285Snate@binkert.org      WARN_EXPR(m_version);
1046825Spdudnik@gmail.com      WARN_EXPR(request->ruby_request.paddr);
1056285Snate@binkert.org      WARN_EXPR(keys.size());
1066285Snate@binkert.org      WARN_EXPR(current_time);
1076285Snate@binkert.org      WARN_EXPR(request->issue_time);
1086285Snate@binkert.org      WARN_EXPR(current_time - request->issue_time);
1096285Snate@binkert.org      ERROR_MSG("Aborting");
1106145Snate@binkert.org    }
1116285Snate@binkert.org  }
1126145Snate@binkert.org
1136285Snate@binkert.org  keys = m_writeRequestTable.keys();
1146285Snate@binkert.org  for (int i=0; i<keys.size(); i++) {
1156285Snate@binkert.org    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
1166285Snate@binkert.org    if (current_time - request->issue_time >= m_deadlock_threshold) {
1176285Snate@binkert.org      WARN_MSG("Possible Deadlock detected");
1186510Spdudnik@gmail.com      WARN_EXPR(request);
1196285Snate@binkert.org      WARN_EXPR(m_version);
1206285Snate@binkert.org      WARN_EXPR(current_time);
1216285Snate@binkert.org      WARN_EXPR(request->issue_time);
1226285Snate@binkert.org      WARN_EXPR(current_time - request->issue_time);
1236285Snate@binkert.org      WARN_EXPR(keys.size());
1246285Snate@binkert.org      ERROR_MSG("Aborting");
1256145Snate@binkert.org    }
1266285Snate@binkert.org  }
1276285Snate@binkert.org  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
1286285Snate@binkert.org
1296145Snate@binkert.org  assert(m_outstanding_count == total_outstanding);
1306145Snate@binkert.org
1316145Snate@binkert.org  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
1326886SBrad.Beckmann@amd.com    schedule(deadlockCheckEvent,
1336886SBrad.Beckmann@amd.com             (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
1346145Snate@binkert.org  }
1356145Snate@binkert.org}
1366145Snate@binkert.org
1376859Sdrh5@cs.wisc.eduvoid Sequencer::printStats(ostream & out) const {
1386859Sdrh5@cs.wisc.edu  out << "Sequencer: " << m_name << endl;
1396859Sdrh5@cs.wisc.edu  out << "  store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
1406859Sdrh5@cs.wisc.edu  out << "  store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
1416859Sdrh5@cs.wisc.edu  out << "  load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
1426859Sdrh5@cs.wisc.edu  out << "  load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
1436859Sdrh5@cs.wisc.edu}
1446859Sdrh5@cs.wisc.edu
1456145Snate@binkert.orgvoid Sequencer::printProgress(ostream& out) const{
1466285Snate@binkert.org  /*
1476145Snate@binkert.org  int total_demand = 0;
1486145Snate@binkert.org  out << "Sequencer Stats Version " << m_version << endl;
1496145Snate@binkert.org  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
1506145Snate@binkert.org  out << "---------------" << endl;
1516145Snate@binkert.org  out << "outstanding requests" << endl;
1526145Snate@binkert.org
1536285Snate@binkert.org  Vector<Address> rkeys = m_readRequestTable.keys();
1546285Snate@binkert.org  int read_size = rkeys.size();
1556285Snate@binkert.org  out << "proc " << m_version << " Read Requests = " << read_size << endl;
1566285Snate@binkert.org  // print the request table
1576285Snate@binkert.org  for(int i=0; i < read_size; ++i){
1586285Snate@binkert.org    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
1596285Snate@binkert.org    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
1606285Snate@binkert.org    total_demand++;
1616285Snate@binkert.org  }
1626145Snate@binkert.org
1636285Snate@binkert.org  Vector<Address> wkeys = m_writeRequestTable.keys();
1646285Snate@binkert.org  int write_size = wkeys.size();
1656285Snate@binkert.org  out << "proc " << m_version << " Write Requests = " << write_size << endl;
1666285Snate@binkert.org  // print the request table
1676285Snate@binkert.org  for(int i=0; i < write_size; ++i){
1686285Snate@binkert.org      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
1696145Snate@binkert.org      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
1706145Snate@binkert.org      if( request.getPrefetch() == PrefetchBit_No ){
1716145Snate@binkert.org        total_demand++;
1726145Snate@binkert.org      }
1736285Snate@binkert.org  }
1746145Snate@binkert.org
1756285Snate@binkert.org  out << endl;
1766285Snate@binkert.org
1776145Snate@binkert.org  out << "Total Number Outstanding: " << m_outstanding_count << endl;
1786145Snate@binkert.org  out << "Total Number Demand     : " << total_demand << endl;
1796145Snate@binkert.org  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
1806145Snate@binkert.org  out << endl;
1816145Snate@binkert.org  out << endl;
1826285Snate@binkert.org  */
1836145Snate@binkert.org}
1846145Snate@binkert.org
1856285Snate@binkert.orgvoid Sequencer::printConfig(ostream& out) const {
1866285Snate@binkert.org  out << "Seqeuncer config: " << m_name << endl;
1876285Snate@binkert.org  out << "  controller: " << m_controller->getName() << endl;
1886285Snate@binkert.org  out << "  version: " << m_version << endl;
1896285Snate@binkert.org  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
1906285Snate@binkert.org  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
1916145Snate@binkert.org}
1926145Snate@binkert.org
1936145Snate@binkert.org// Insert the request on the correct request table.  Return true if
1946145Snate@binkert.org// the entry was already present.
1956285Snate@binkert.orgbool Sequencer::insertRequest(SequencerRequest* request) {
1966285Snate@binkert.org  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
1976285Snate@binkert.org
1986145Snate@binkert.org  assert(m_outstanding_count == total_outstanding);
1996145Snate@binkert.org
2006145Snate@binkert.org  // See if we should schedule a deadlock check
2016886SBrad.Beckmann@amd.com  if (deadlockCheckEvent.scheduled() == false) {
2026893SBrad.Beckmann@amd.com    schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
2036145Snate@binkert.org  }
2046145Snate@binkert.org
2056285Snate@binkert.org  Address line_addr(request->ruby_request.paddr);
2066285Snate@binkert.org  line_addr.makeLineAddress();
2076285Snate@binkert.org  if ((request->ruby_request.type == RubyRequestType_ST) ||
2086355Spdudnik@gmail.com      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2096355Spdudnik@gmail.com      (request->ruby_request.type == RubyRequestType_RMW_Write) ||
2106350Spdudnik@gmail.com      (request->ruby_request.type == RubyRequestType_Locked_Read) ||
2116350Spdudnik@gmail.com      (request->ruby_request.type == RubyRequestType_Locked_Write)) {
2126285Snate@binkert.org    if (m_writeRequestTable.exist(line_addr)) {
2136285Snate@binkert.org      m_writeRequestTable.lookup(line_addr) = request;
2146285Snate@binkert.org      //      return true;
2156285Snate@binkert.org      assert(0); // drh5: isn't this an error?  do you lose the initial request?
2166145Snate@binkert.org    }
2176285Snate@binkert.org    m_writeRequestTable.allocate(line_addr);
2186285Snate@binkert.org    m_writeRequestTable.lookup(line_addr) = request;
2196145Snate@binkert.org    m_outstanding_count++;
2206145Snate@binkert.org  } else {
2216285Snate@binkert.org    if (m_readRequestTable.exist(line_addr)) {
2226285Snate@binkert.org      m_readRequestTable.lookup(line_addr) = request;
2236285Snate@binkert.org      //      return true;
2246285Snate@binkert.org      assert(0); // drh5: isn't this an error?  do you lose the initial request?
2256145Snate@binkert.org    }
2266285Snate@binkert.org    m_readRequestTable.allocate(line_addr);
2276285Snate@binkert.org    m_readRequestTable.lookup(line_addr) = request;
2286145Snate@binkert.org    m_outstanding_count++;
2296145Snate@binkert.org  }
2306145Snate@binkert.org
2316145Snate@binkert.org  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
2326145Snate@binkert.org
2336285Snate@binkert.org  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
2346285Snate@binkert.org  assert(m_outstanding_count == total_outstanding);
2356145Snate@binkert.org
2366145Snate@binkert.org  return false;
2376145Snate@binkert.org}
2386145Snate@binkert.org
2396285Snate@binkert.orgvoid Sequencer::removeRequest(SequencerRequest* srequest) {
2406145Snate@binkert.org
2416285Snate@binkert.org  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
2426285Snate@binkert.org
2436285Snate@binkert.org  const RubyRequest & ruby_request = srequest->ruby_request;
2446285Snate@binkert.org  Address line_addr(ruby_request.paddr);
2456285Snate@binkert.org  line_addr.makeLineAddress();
2466285Snate@binkert.org  if ((ruby_request.type == RubyRequestType_ST) ||
2476355Spdudnik@gmail.com      (ruby_request.type == RubyRequestType_RMW_Read) ||
2486355Spdudnik@gmail.com      (ruby_request.type == RubyRequestType_RMW_Write) ||
2496350Spdudnik@gmail.com      (ruby_request.type == RubyRequestType_Locked_Read) ||
2506350Spdudnik@gmail.com      (ruby_request.type == RubyRequestType_Locked_Write)) {
2516285Snate@binkert.org    m_writeRequestTable.deallocate(line_addr);
2526145Snate@binkert.org  } else {
2536285Snate@binkert.org    m_readRequestTable.deallocate(line_addr);
2546145Snate@binkert.org  }
2556145Snate@binkert.org  m_outstanding_count--;
2566145Snate@binkert.org
2576285Snate@binkert.org  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
2586145Snate@binkert.org}
2596145Snate@binkert.org
2606145Snate@binkert.orgvoid Sequencer::writeCallback(const Address& address, DataBlock& data) {
2616145Snate@binkert.org
2626145Snate@binkert.org  assert(address == line_address(address));
2636285Snate@binkert.org  assert(m_writeRequestTable.exist(line_address(address)));
2646145Snate@binkert.org
2656285Snate@binkert.org  SequencerRequest* request = m_writeRequestTable.lookup(address);
2666846Spdudnik@cs.wisc.edu
2676145Snate@binkert.org  removeRequest(request);
2686145Snate@binkert.org
2696285Snate@binkert.org  assert((request->ruby_request.type == RubyRequestType_ST) ||
2706355Spdudnik@gmail.com         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2716355Spdudnik@gmail.com         (request->ruby_request.type == RubyRequestType_RMW_Write) ||
2726350Spdudnik@gmail.com         (request->ruby_request.type == RubyRequestType_Locked_Read) ||
2736350Spdudnik@gmail.com         (request->ruby_request.type == RubyRequestType_Locked_Write));
2746863Sdrh5@cs.wisc.edu
2756350Spdudnik@gmail.com  if (request->ruby_request.type == RubyRequestType_Locked_Read) {
2766347Spdudnik@gmail.com    m_dataCache_ptr->setLocked(address, m_version);
2776347Spdudnik@gmail.com  }
2786506Spdudnik@gmail.com  else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
2796863Sdrh5@cs.wisc.edu    m_controller->blockOnQueue(address, m_mandatory_q_ptr);
2806506Spdudnik@gmail.com  }
2816506Spdudnik@gmail.com  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
2826863Sdrh5@cs.wisc.edu    m_controller->unblock(address);
2836506Spdudnik@gmail.com  }
2846145Snate@binkert.org
2856285Snate@binkert.org  hitCallback(request, data);
2866145Snate@binkert.org}
2876145Snate@binkert.org
2886145Snate@binkert.orgvoid Sequencer::readCallback(const Address& address, DataBlock& data) {
2896145Snate@binkert.org
2906285Snate@binkert.org  assert(address == line_address(address));
2916285Snate@binkert.org  assert(m_readRequestTable.exist(line_address(address)));
2926145Snate@binkert.org
2936285Snate@binkert.org  SequencerRequest* request = m_readRequestTable.lookup(address);
2946285Snate@binkert.org  removeRequest(request);
2956285Snate@binkert.org
2966285Snate@binkert.org  assert((request->ruby_request.type == RubyRequestType_LD) ||
2976381Sdrh5@cs.wisc.edu	 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2986285Snate@binkert.org         (request->ruby_request.type == RubyRequestType_IFETCH));
2996285Snate@binkert.org
3006285Snate@binkert.org  hitCallback(request, data);
3016145Snate@binkert.org}
3026145Snate@binkert.org
3036285Snate@binkert.orgvoid Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
3046285Snate@binkert.org  const RubyRequest & ruby_request = srequest->ruby_request;
3056285Snate@binkert.org  Address request_address(ruby_request.paddr);
3066285Snate@binkert.org  Address request_line_address(ruby_request.paddr);
3076285Snate@binkert.org  request_line_address.makeLineAddress();
3086285Snate@binkert.org  RubyRequestType type = ruby_request.type;
3096285Snate@binkert.org  Time issued_time = srequest->issue_time;
3106145Snate@binkert.org
3116145Snate@binkert.org  // Set this cache entry to the most recently used
3126285Snate@binkert.org  if (type == RubyRequestType_IFETCH) {
3136285Snate@binkert.org    if (m_instCache_ptr->isTagPresent(request_line_address) )
3146285Snate@binkert.org      m_instCache_ptr->setMRU(request_line_address);
3156145Snate@binkert.org  } else {
3166285Snate@binkert.org    if (m_dataCache_ptr->isTagPresent(request_line_address) )
3176285Snate@binkert.org      m_dataCache_ptr->setMRU(request_line_address);
3186145Snate@binkert.org  }
3196145Snate@binkert.org
3206145Snate@binkert.org  assert(g_eventQueue_ptr->getTime() >= issued_time);
3216145Snate@binkert.org  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
3226145Snate@binkert.org
3236285Snate@binkert.org  // Profile the miss latency for all non-zero demand misses
3246285Snate@binkert.org  if (miss_latency != 0) {
3256285Snate@binkert.org    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
3266285Snate@binkert.org
3276285Snate@binkert.org    if (Debug::getProtocolTrace()) {
3286285Snate@binkert.org      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
3296285Snate@binkert.org                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
3306285Snate@binkert.org    }
3316285Snate@binkert.org  }
3326285Snate@binkert.org  /*
3336285Snate@binkert.org  if (request.getPrefetch() == PrefetchBit_Yes) {
3346285Snate@binkert.org    return; // Ignore the prefetch
3356285Snate@binkert.org  }
3366285Snate@binkert.org  */
3376285Snate@binkert.org
3386285Snate@binkert.org  // update the data
3396285Snate@binkert.org  if (ruby_request.data != NULL) {
3406285Snate@binkert.org    if ((type == RubyRequestType_LD) ||
3416381Sdrh5@cs.wisc.edu        (type == RubyRequestType_IFETCH) ||
3426381Sdrh5@cs.wisc.edu        (type == RubyRequestType_RMW_Read)) {
3436285Snate@binkert.org      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
3446285Snate@binkert.org    } else {
3456285Snate@binkert.org      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
3466285Snate@binkert.org    }
3476145Snate@binkert.org  }
3486145Snate@binkert.org
3496899SBrad.Beckmann@amd.com  //
3506899SBrad.Beckmann@amd.com  // If using the RubyTester, update the RubyTester sender state's subBlock
3516899SBrad.Beckmann@amd.com  // with the recieved data.  The tester will later access this state.
3526899SBrad.Beckmann@amd.com  // Note: RubyPort will access it's sender state before the RubyTester.
3536899SBrad.Beckmann@amd.com  //
3546899SBrad.Beckmann@amd.com  if (m_usingRubyTester) {
3556899SBrad.Beckmann@amd.com      //
3566899SBrad.Beckmann@amd.com      // Since the hit callback func only takes a request id, we must iterate
3576899SBrad.Beckmann@amd.com      // through the requests and update the packet's subBlock here.
3586899SBrad.Beckmann@amd.com      // All this would be fixed if we could attach a M5 pkt pointer to the
3596899SBrad.Beckmann@amd.com      // ruby request, however that change will break the libruby interface so
3606899SBrad.Beckmann@amd.com      // we'll hold off on that for now.
3616899SBrad.Beckmann@amd.com      //
3626899SBrad.Beckmann@amd.com      RequestMap::iterator i = pending_cpu_requests.find(srequest->id);
3636899SBrad.Beckmann@amd.com      if (i == pending_cpu_requests.end())
3646899SBrad.Beckmann@amd.com          panic("could not find pending request %d\n", srequest->id);
3656899SBrad.Beckmann@amd.com      RequestCookie *cookie = i->second;
3666899SBrad.Beckmann@amd.com      Packet *pkt = cookie->pkt;
3676899SBrad.Beckmann@amd.com
3686899SBrad.Beckmann@amd.com      RubyTester::SenderState* testerSenderState;
3696899SBrad.Beckmann@amd.com      testerSenderState = safe_cast<RubyTester::SenderState*>(pkt->senderState);
3706899SBrad.Beckmann@amd.com      testerSenderState->subBlock->mergeFrom(data);
3716899SBrad.Beckmann@amd.com  }
3726899SBrad.Beckmann@amd.com
3736285Snate@binkert.org  m_hit_callback(srequest->id);
3746285Snate@binkert.org  delete srequest;
3756285Snate@binkert.org}
3766285Snate@binkert.org
3776285Snate@binkert.org// Returns true if the sequencer already has a load or store outstanding
3786845Sdrh5@cs.wisc.eduint Sequencer::isReady(const RubyRequest& request) {
3796859Sdrh5@cs.wisc.edu  bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
3806859Sdrh5@cs.wisc.edu  bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
3816859Sdrh5@cs.wisc.edu  if ( is_outstanding_store ) {
3826859Sdrh5@cs.wisc.edu    if ((request.type == RubyRequestType_LD) ||
3836859Sdrh5@cs.wisc.edu        (request.type == RubyRequestType_IFETCH) ||
3846859Sdrh5@cs.wisc.edu        (request.type == RubyRequestType_RMW_Read)) {
3856859Sdrh5@cs.wisc.edu      m_store_waiting_on_load_cycles++;
3866859Sdrh5@cs.wisc.edu    } else {
3876859Sdrh5@cs.wisc.edu      m_store_waiting_on_store_cycles++;
3886859Sdrh5@cs.wisc.edu    }
3896859Sdrh5@cs.wisc.edu    return LIBRUBY_ALIASED_REQUEST;
3906859Sdrh5@cs.wisc.edu  } else if ( is_outstanding_load ) {
3916859Sdrh5@cs.wisc.edu    if ((request.type == RubyRequestType_ST) ||
3926859Sdrh5@cs.wisc.edu        (request.type == RubyRequestType_RMW_Write) ) {
3936859Sdrh5@cs.wisc.edu      m_load_waiting_on_store_cycles++;
3946859Sdrh5@cs.wisc.edu    } else {
3956859Sdrh5@cs.wisc.edu      m_load_waiting_on_load_cycles++;
3966859Sdrh5@cs.wisc.edu    }
3976856Sdrh5@cs.wisc.edu    return LIBRUBY_ALIASED_REQUEST;
3986145Snate@binkert.org  }
3996145Snate@binkert.org
4006510Spdudnik@gmail.com  if (m_outstanding_count >= m_max_outstanding_requests) {
4016845Sdrh5@cs.wisc.edu    return LIBRUBY_BUFFER_FULL;
4026145Snate@binkert.org  }
4036845Sdrh5@cs.wisc.edu
4046845Sdrh5@cs.wisc.edu  return 1;
4056145Snate@binkert.org}
4066145Snate@binkert.org
4076285Snate@binkert.orgbool Sequencer::empty() const {
4086285Snate@binkert.org  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
4096145Snate@binkert.org}
4106145Snate@binkert.org
4116349Spdudnik@gmail.com
4126285Snate@binkert.orgint64_t Sequencer::makeRequest(const RubyRequest & request)
4136285Snate@binkert.org{
4146285Snate@binkert.org  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
4156845Sdrh5@cs.wisc.edu  int ready = isReady(request);
4166845Sdrh5@cs.wisc.edu  if (ready > 0) {
4176285Snate@binkert.org    int64_t id = makeUniqueRequestID();
4186285Snate@binkert.org    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
4196285Snate@binkert.org    bool found = insertRequest(srequest);
4206825Spdudnik@gmail.com    if (!found) {
4216350Spdudnik@gmail.com      if (request.type == RubyRequestType_Locked_Write) {
4226355Spdudnik@gmail.com        // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
4236355Spdudnik@gmail.com        // ensuring that nothing comes between checking the flag and servicing the store
4246349Spdudnik@gmail.com        if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
4256355Spdudnik@gmail.com          return LLSC_FAIL;
4266349Spdudnik@gmail.com        }
4276349Spdudnik@gmail.com        else {
4286349Spdudnik@gmail.com          m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
4296349Spdudnik@gmail.com        }
4306349Spdudnik@gmail.com      }
4316285Snate@binkert.org      issueRequest(request);
4326145Snate@binkert.org
4336859Sdrh5@cs.wisc.edu      // TODO: issue hardware prefetches here
4346859Sdrh5@cs.wisc.edu      return id;
4356825Spdudnik@gmail.com    }
4366825Spdudnik@gmail.com    else {
4376825Spdudnik@gmail.com      assert(0);
4386859Sdrh5@cs.wisc.edu      return 0;
4396825Spdudnik@gmail.com    }
4406859Sdrh5@cs.wisc.edu  } else {
4416845Sdrh5@cs.wisc.edu    return ready;
4426145Snate@binkert.org  }
4436145Snate@binkert.org}
4446145Snate@binkert.org
4456285Snate@binkert.orgvoid Sequencer::issueRequest(const RubyRequest& request) {
4466285Snate@binkert.org
4476285Snate@binkert.org  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
4486285Snate@binkert.org  CacheRequestType ctype;
4496285Snate@binkert.org  switch(request.type) {
4506285Snate@binkert.org  case RubyRequestType_IFETCH:
4516285Snate@binkert.org    ctype = CacheRequestType_IFETCH;
4526285Snate@binkert.org    break;
4536285Snate@binkert.org  case RubyRequestType_LD:
4546285Snate@binkert.org    ctype = CacheRequestType_LD;
4556285Snate@binkert.org    break;
4566285Snate@binkert.org  case RubyRequestType_ST:
4576285Snate@binkert.org    ctype = CacheRequestType_ST;
4586285Snate@binkert.org    break;
4596350Spdudnik@gmail.com  case RubyRequestType_Locked_Read:
4606350Spdudnik@gmail.com  case RubyRequestType_Locked_Write:
4616846Spdudnik@cs.wisc.edu    ctype = CacheRequestType_ATOMIC;
4626285Snate@binkert.org    break;
4636355Spdudnik@gmail.com  case RubyRequestType_RMW_Read:
4646355Spdudnik@gmail.com    ctype = CacheRequestType_ATOMIC;
4656355Spdudnik@gmail.com    break;
4666355Spdudnik@gmail.com  case RubyRequestType_RMW_Write:
4676355Spdudnik@gmail.com    ctype = CacheRequestType_ATOMIC;
4686355Spdudnik@gmail.com    break;
4696285Snate@binkert.org  default:
4706285Snate@binkert.org    assert(0);
4716145Snate@binkert.org  }
4726285Snate@binkert.org  AccessModeType amtype;
4736285Snate@binkert.org  switch(request.access_mode){
4746285Snate@binkert.org  case RubyAccessMode_User:
4756285Snate@binkert.org    amtype = AccessModeType_UserMode;
4766285Snate@binkert.org    break;
4776285Snate@binkert.org  case RubyAccessMode_Supervisor:
4786285Snate@binkert.org    amtype = AccessModeType_SupervisorMode;
4796285Snate@binkert.org    break;
4806285Snate@binkert.org  case RubyAccessMode_Device:
4816285Snate@binkert.org    amtype = AccessModeType_UserMode;
4826285Snate@binkert.org    break;
4836285Snate@binkert.org  default:
4846285Snate@binkert.org    assert(0);
4856285Snate@binkert.org  }
4866285Snate@binkert.org  Address line_addr(request.paddr);
4876285Snate@binkert.org  line_addr.makeLineAddress();
4886505Spdudnik@gmail.com  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
4896285Snate@binkert.org
4906285Snate@binkert.org  if (Debug::getProtocolTrace()) {
4916285Snate@binkert.org    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
4926285Snate@binkert.org                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
4936285Snate@binkert.org  }
4946285Snate@binkert.org
4956285Snate@binkert.org  if (g_system_ptr->getTracer()->traceEnabled()) {
4966890SBrad.Beckmann@amd.com    g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
4976285Snate@binkert.org                                            request.type, g_eventQueue_ptr->getTime());
4986285Snate@binkert.org  }
4996285Snate@binkert.org
5006285Snate@binkert.org  Time latency = 0;  // initialzed to an null value
5016285Snate@binkert.org
5026285Snate@binkert.org  if (request.type == RubyRequestType_IFETCH)
5036285Snate@binkert.org    latency = m_instCache_ptr->getLatency();
5046285Snate@binkert.org  else
5056285Snate@binkert.org    latency = m_dataCache_ptr->getLatency();
5066285Snate@binkert.org
5076285Snate@binkert.org  // Send the message to the cache controller
5086285Snate@binkert.org  assert(latency > 0);
5096285Snate@binkert.org
5106876Ssteve.reinhardt@amd.com  assert(m_mandatory_q_ptr != NULL);
5116285Snate@binkert.org  m_mandatory_q_ptr->enqueue(msg, latency);
5126285Snate@binkert.org}
5136285Snate@binkert.org/*
5146285Snate@binkert.orgbool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
5156285Snate@binkert.org                               AccessModeType access_mode,
5166285Snate@binkert.org                               int size, DataBlock*& data_ptr) {
5176285Snate@binkert.org  if (type == CacheRequestType_IFETCH) {
5186285Snate@binkert.org    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
5196285Snate@binkert.org  } else {
5206285Snate@binkert.org    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
5216145Snate@binkert.org  }
5226145Snate@binkert.org}
5236285Snate@binkert.org*/
5246145Snate@binkert.org
5256145Snate@binkert.orgvoid Sequencer::print(ostream& out) const {
5266285Snate@binkert.org  out << "[Sequencer: " << m_version
5276145Snate@binkert.org      << ", outstanding requests: " << m_outstanding_count;
5286145Snate@binkert.org
5296285Snate@binkert.org  out << ", read request table: " << m_readRequestTable
5306285Snate@binkert.org      << ", write request table: " << m_writeRequestTable;
5316145Snate@binkert.org  out << "]";
5326145Snate@binkert.org}
5336145Snate@binkert.org
5346145Snate@binkert.org// this can be called from setState whenever coherence permissions are upgraded
5356145Snate@binkert.org// when invoked, coherence violations will be checked for the given block
5366145Snate@binkert.orgvoid Sequencer::checkCoherence(const Address& addr) {
5376145Snate@binkert.org#ifdef CHECK_COHERENCE
5386145Snate@binkert.org  g_system_ptr->checkGlobalCoherenceInvariant(addr);
5396145Snate@binkert.org#endif
5406145Snate@binkert.org}
5416145Snate@binkert.org
542