Sequencer.cc revision 6899
12292SN/A
22329SN/A/*
32292SN/A * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
42292SN/A * All rights reserved.
52292SN/A *
62292SN/A * Redistribution and use in source and binary forms, with or without
72292SN/A * modification, are permitted provided that the following conditions are
82292SN/A * met: redistributions of source code must retain the above copyright
92292SN/A * notice, this list of conditions and the following disclaimer;
102292SN/A * redistributions in binary form must reproduce the above copyright
112292SN/A * notice, this list of conditions and the following disclaimer in the
122292SN/A * documentation and/or other materials provided with the distribution;
132292SN/A * neither the name of the copyright holders nor the names of its
142292SN/A * contributors may be used to endorse or promote products derived from
152292SN/A * this software without specific prior written permission.
162292SN/A *
172292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
182292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
192292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
202292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
212292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
222292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
232292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
242292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
252292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
262292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
272689Sktlim@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
282689Sktlim@umich.edu */
292689Sktlim@umich.edu
302292SN/A#include "mem/ruby/libruby.hh"
312292SN/A#include "mem/ruby/common/Global.hh"
322292SN/A#include "mem/ruby/system/Sequencer.hh"
332292SN/A#include "mem/ruby/system/System.hh"
342292SN/A#include "mem/protocol/Protocol.hh"
352329SN/A#include "mem/ruby/profiler/Profiler.hh"
364395Ssaidi@eecs.umich.edu#include "mem/ruby/system/CacheMemory.hh"
372292SN/A#include "mem/protocol/CacheMsg.hh"
382292SN/A#include "mem/ruby/recorder/Tracer.hh"
392292SN/A#include "mem/ruby/common/SubBlock.hh"
408591Sgblack@eecs.umich.edu#include "mem/protocol/Protocol.hh"
418506Sgblack@eecs.umich.edu#include "mem/gems_common/Map.hh"
423326Sktlim@umich.edu#include "mem/ruby/buffers/MessageBuffer.hh"
438481Sgblack@eecs.umich.edu#include "mem/ruby/slicc_interface/AbstractController.hh"
448229Snate@binkert.org#include "cpu/rubytest/RubyTester.hh"
456658Snate@binkert.org
462292SN/A#include "params/RubySequencer.hh"
478230Snate@binkert.org
488232Snate@binkert.org//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
493348Sbinkertn@umich.edu
502669Sktlim@umich.edu#define LLSC_FAIL -2
518817Sgblack@eecs.umich.edulong int already = 0;
522292SN/A
538737Skoansin.tan@gmail.comSequencer *
545529Snate@binkert.orgRubySequencerParams::create()
552292SN/A{
562329SN/A    return new Sequencer(this);
572329SN/A}
582329SN/A
592329SN/ASequencer::Sequencer(const Params *p)
602329SN/A    : RubyPort(p), deadlockCheckEvent(this)
612329SN/A{
622329SN/A    m_store_waiting_on_load_cycles = 0;
632329SN/A    m_store_waiting_on_store_cycles = 0;
642329SN/A    m_load_waiting_on_store_cycles = 0;
652329SN/A    m_load_waiting_on_load_cycles = 0;
662292SN/A
672292SN/A    m_outstanding_count = 0;
682292SN/A
692292SN/A    m_max_outstanding_requests = 0;
702733Sktlim@umich.edu    m_deadlock_threshold = 0;
712292SN/A    m_instCache_ptr = NULL;
722292SN/A    m_dataCache_ptr = NULL;
732907Sktlim@umich.edu
742292SN/A    m_instCache_ptr = p->icache;
752292SN/A    m_dataCache_ptr = p->dcache;
762292SN/A    m_max_outstanding_requests = p->max_outstanding_requests;
772292SN/A    m_deadlock_threshold = p->deadlock_threshold;
782292SN/A    m_usingRubyTester = p->using_ruby_tester;
792292SN/A
802292SN/A    assert(m_max_outstanding_requests > 0);
815529Snate@binkert.org    assert(m_deadlock_threshold > 0);
825529Snate@binkert.org    assert(m_instCache_ptr != NULL);
835529Snate@binkert.org    assert(m_dataCache_ptr != NULL);
842292SN/A}
852292SN/A
862292SN/ASequencer::~Sequencer() {
872292SN/A
882727Sktlim@umich.edu}
892727Sktlim@umich.edu
902727Sktlim@umich.eduvoid Sequencer::wakeup() {
912907Sktlim@umich.edu  // Check for deadlock of any of the requests
928922Swilliam.wang@arm.com  Time current_time = g_eventQueue_ptr->getTime();
932907Sktlim@umich.edu
942348SN/A  // Check across all outstanding requests
952307SN/A  int total_outstanding = 0;
962307SN/A
972348SN/A  Vector<Address> keys = m_readRequestTable.keys();
982307SN/A  for (int i=0; i<keys.size(); i++) {
992307SN/A    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
1002348SN/A    if (current_time - request->issue_time >= m_deadlock_threshold) {
1012307SN/A      WARN_MSG("Possible Deadlock detected");
1022307SN/A      WARN_EXPR(request);
1032292SN/A      WARN_EXPR(m_version);
1042292SN/A      WARN_EXPR(request->ruby_request.paddr);
1052292SN/A      WARN_EXPR(keys.size());
1062292SN/A      WARN_EXPR(current_time);
1072292SN/A      WARN_EXPR(request->issue_time);
1082292SN/A      WARN_EXPR(current_time - request->issue_time);
1092292SN/A      ERROR_MSG("Aborting");
1102292SN/A    }
1112292SN/A  }
1122292SN/A
1132292SN/A  keys = m_writeRequestTable.keys();
1142292SN/A  for (int i=0; i<keys.size(); i++) {
1152292SN/A    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
1162292SN/A    if (current_time - request->issue_time >= m_deadlock_threshold) {
1178545Ssaidi@eecs.umich.edu      WARN_MSG("Possible Deadlock detected");
1188545Ssaidi@eecs.umich.edu      WARN_EXPR(request);
1198545Ssaidi@eecs.umich.edu      WARN_EXPR(m_version);
1208199SAli.Saidi@ARM.com      WARN_EXPR(current_time);
1218199SAli.Saidi@ARM.com      WARN_EXPR(request->issue_time);
1228199SAli.Saidi@ARM.com      WARN_EXPR(current_time - request->issue_time);
1238199SAli.Saidi@ARM.com      WARN_EXPR(keys.size());
1248199SAli.Saidi@ARM.com      ERROR_MSG("Aborting");
1258545Ssaidi@eecs.umich.edu    }
1268545Ssaidi@eecs.umich.edu  }
1278545Ssaidi@eecs.umich.edu  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
1288545Ssaidi@eecs.umich.edu
1298545Ssaidi@eecs.umich.edu  assert(m_outstanding_count == total_outstanding);
1308545Ssaidi@eecs.umich.edu
1312292SN/A  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
1322292SN/A    schedule(deadlockCheckEvent,
1332292SN/A             (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
1342329SN/A  }
1352292SN/A}
1362292SN/A
1372292SN/Avoid Sequencer::printStats(ostream & out) const {
1382292SN/A  out << "Sequencer: " << m_name << endl;
1392292SN/A  out << "  store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
1402292SN/A  out << "  store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
1412292SN/A  out << "  load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
1422292SN/A  out << "  load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
1432292SN/A}
1442292SN/A
1452292SN/Avoid Sequencer::printProgress(ostream& out) const{
1462292SN/A  /*
1472292SN/A  int total_demand = 0;
1482292SN/A  out << "Sequencer Stats Version " << m_version << endl;
1492790Sktlim@umich.edu  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
1502790Sktlim@umich.edu  out << "---------------" << endl;
1512669Sktlim@umich.edu  out << "outstanding requests" << endl;
1522669Sktlim@umich.edu
1532292SN/A  Vector<Address> rkeys = m_readRequestTable.keys();
1542292SN/A  int read_size = rkeys.size();
1552292SN/A  out << "proc " << m_version << " Read Requests = " << read_size << endl;
1562292SN/A  // print the request table
1572292SN/A  for(int i=0; i < read_size; ++i){
1582292SN/A    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
1592292SN/A    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
1602292SN/A    total_demand++;
1612292SN/A  }
1622292SN/A
1632292SN/A  Vector<Address> wkeys = m_writeRequestTable.keys();
1642292SN/A  int write_size = wkeys.size();
1652292SN/A  out << "proc " << m_version << " Write Requests = " << write_size << endl;
1662292SN/A  // print the request table
1672292SN/A  for(int i=0; i < write_size; ++i){
1682292SN/A      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
1692292SN/A      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
1702292SN/A      if( request.getPrefetch() == PrefetchBit_No ){
1712292SN/A        total_demand++;
1722292SN/A      }
1732292SN/A  }
1742292SN/A
1752292SN/A  out << endl;
1762329SN/A
1772292SN/A  out << "Total Number Outstanding: " << m_outstanding_count << endl;
1782292SN/A  out << "Total Number Demand     : " << total_demand << endl;
1792292SN/A  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
1802348SN/A  out << endl;
1812292SN/A  out << endl;
1822292SN/A  */
1832292SN/A}
1842348SN/A
1852292SN/Avoid Sequencer::printConfig(ostream& out) const {
1862292SN/A  out << "Seqeuncer config: " << m_name << endl;
1872292SN/A  out << "  controller: " << m_controller->getName() << endl;
1882348SN/A  out << "  version: " << m_version << endl;
1892292SN/A  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
1902292SN/A  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
1912292SN/A}
1922292SN/A
1932292SN/A// Insert the request on the correct request table.  Return true if
1942292SN/A// the entry was already present.
1952292SN/Abool Sequencer::insertRequest(SequencerRequest* request) {
1962292SN/A  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
1972292SN/A
1982292SN/A  assert(m_outstanding_count == total_outstanding);
1992292SN/A
2002292SN/A  // See if we should schedule a deadlock check
2012292SN/A  if (deadlockCheckEvent.scheduled() == false) {
2022292SN/A    schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
2032292SN/A  }
2042292SN/A
2052292SN/A  Address line_addr(request->ruby_request.paddr);
2062292SN/A  line_addr.makeLineAddress();
2072292SN/A  if ((request->ruby_request.type == RubyRequestType_ST) ||
2082292SN/A      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2092292SN/A      (request->ruby_request.type == RubyRequestType_RMW_Write) ||
2102292SN/A      (request->ruby_request.type == RubyRequestType_Locked_Read) ||
2112292SN/A      (request->ruby_request.type == RubyRequestType_Locked_Write)) {
2122292SN/A    if (m_writeRequestTable.exist(line_addr)) {
2132292SN/A      m_writeRequestTable.lookup(line_addr) = request;
2142292SN/A      //      return true;
2152292SN/A      assert(0); // drh5: isn't this an error?  do you lose the initial request?
2162292SN/A    }
2172292SN/A    m_writeRequestTable.allocate(line_addr);
2182292SN/A    m_writeRequestTable.lookup(line_addr) = request;
2192292SN/A    m_outstanding_count++;
2202292SN/A  } else {
2212292SN/A    if (m_readRequestTable.exist(line_addr)) {
2222292SN/A      m_readRequestTable.lookup(line_addr) = request;
2232292SN/A      //      return true;
2242678Sktlim@umich.edu      assert(0); // drh5: isn't this an error?  do you lose the initial request?
2252678Sktlim@umich.edu    }
2262292SN/A    m_readRequestTable.allocate(line_addr);
2272907Sktlim@umich.edu    m_readRequestTable.lookup(line_addr) = request;
2282907Sktlim@umich.edu    m_outstanding_count++;
2292907Sktlim@umich.edu  }
2302292SN/A
2312698Sktlim@umich.edu  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
2322678Sktlim@umich.edu
2332678Sktlim@umich.edu  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
2346974Stjones1@inf.ed.ac.uk  assert(m_outstanding_count == total_outstanding);
2356974Stjones1@inf.ed.ac.uk
2366974Stjones1@inf.ed.ac.uk  return false;
2372698Sktlim@umich.edu}
2383349Sbinkertn@umich.edu
2392693Sktlim@umich.eduvoid Sequencer::removeRequest(SequencerRequest* srequest) {
2402292SN/A
2412292SN/A  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
2422292SN/A
2436974Stjones1@inf.ed.ac.uk  const RubyRequest & ruby_request = srequest->ruby_request;
2446974Stjones1@inf.ed.ac.uk  Address line_addr(ruby_request.paddr);
2456974Stjones1@inf.ed.ac.uk  line_addr.makeLineAddress();
2462292SN/A  if ((ruby_request.type == RubyRequestType_ST) ||
2472292SN/A      (ruby_request.type == RubyRequestType_RMW_Read) ||
2482292SN/A      (ruby_request.type == RubyRequestType_RMW_Write) ||
2492292SN/A      (ruby_request.type == RubyRequestType_Locked_Read) ||
2502292SN/A      (ruby_request.type == RubyRequestType_Locked_Write)) {
2512292SN/A    m_writeRequestTable.deallocate(line_addr);
2522292SN/A  } else {
2532292SN/A    m_readRequestTable.deallocate(line_addr);
2542292SN/A  }
2552329SN/A  m_outstanding_count--;
2562329SN/A
2572329SN/A  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
2582329SN/A}
2592292SN/A
2602292SN/Avoid Sequencer::writeCallback(const Address& address, DataBlock& data) {
2612733Sktlim@umich.edu
2622292SN/A  assert(address == line_address(address));
2632292SN/A  assert(m_writeRequestTable.exist(line_address(address)));
2642292SN/A
2652292SN/A  SequencerRequest* request = m_writeRequestTable.lookup(address);
2662907Sktlim@umich.edu
2672907Sktlim@umich.edu  removeRequest(request);
2682669Sktlim@umich.edu
2692907Sktlim@umich.edu  assert((request->ruby_request.type == RubyRequestType_ST) ||
2708922Swilliam.wang@arm.com         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2712292SN/A         (request->ruby_request.type == RubyRequestType_RMW_Write) ||
2722698Sktlim@umich.edu         (request->ruby_request.type == RubyRequestType_Locked_Read) ||
2739044SAli.Saidi@ARM.com         (request->ruby_request.type == RubyRequestType_Locked_Write));
2742678Sktlim@umich.edu
2752678Sktlim@umich.edu  if (request->ruby_request.type == RubyRequestType_Locked_Read) {
2762698Sktlim@umich.edu    m_dataCache_ptr->setLocked(address, m_version);
2772678Sktlim@umich.edu  }
2789046SAli.Saidi@ARM.com  else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
2799046SAli.Saidi@ARM.com    m_controller->blockOnQueue(address, m_mandatory_q_ptr);
2809046SAli.Saidi@ARM.com  }
2812678Sktlim@umich.edu  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
2822698Sktlim@umich.edu    m_controller->unblock(address);
2832678Sktlim@umich.edu  }
2849046SAli.Saidi@ARM.com
2859046SAli.Saidi@ARM.com  hitCallback(request, data);
2869046SAli.Saidi@ARM.com}
2879046SAli.Saidi@ARM.com
2889046SAli.Saidi@ARM.comvoid Sequencer::readCallback(const Address& address, DataBlock& data) {
2899046SAli.Saidi@ARM.com
2909046SAli.Saidi@ARM.com  assert(address == line_address(address));
2919046SAli.Saidi@ARM.com  assert(m_readRequestTable.exist(line_address(address)));
2922698Sktlim@umich.edu
2932678Sktlim@umich.edu  SequencerRequest* request = m_readRequestTable.lookup(address);
2942698Sktlim@umich.edu  removeRequest(request);
2952678Sktlim@umich.edu
2966974Stjones1@inf.ed.ac.uk  assert((request->ruby_request.type == RubyRequestType_LD) ||
2976974Stjones1@inf.ed.ac.uk	 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
2986974Stjones1@inf.ed.ac.uk         (request->ruby_request.type == RubyRequestType_IFETCH));
2996974Stjones1@inf.ed.ac.uk
3006974Stjones1@inf.ed.ac.uk  hitCallback(request, data);
3016974Stjones1@inf.ed.ac.uk}
3026974Stjones1@inf.ed.ac.uk
3032678Sktlim@umich.eduvoid Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
3042678Sktlim@umich.edu  const RubyRequest & ruby_request = srequest->ruby_request;
3052698Sktlim@umich.edu  Address request_address(ruby_request.paddr);
3062678Sktlim@umich.edu  Address request_line_address(ruby_request.paddr);
3072678Sktlim@umich.edu  request_line_address.makeLineAddress();
3082678Sktlim@umich.edu  RubyRequestType type = ruby_request.type;
3092678Sktlim@umich.edu  Time issued_time = srequest->issue_time;
3102678Sktlim@umich.edu
3112678Sktlim@umich.edu  // Set this cache entry to the most recently used
3122678Sktlim@umich.edu  if (type == RubyRequestType_IFETCH) {
3132678Sktlim@umich.edu    if (m_instCache_ptr->isTagPresent(request_line_address) )
3142678Sktlim@umich.edu      m_instCache_ptr->setMRU(request_line_address);
3155336Shines@cs.fsu.edu  } else {
3162678Sktlim@umich.edu    if (m_dataCache_ptr->isTagPresent(request_line_address) )
3172678Sktlim@umich.edu      m_dataCache_ptr->setMRU(request_line_address);
3182698Sktlim@umich.edu  }
3192678Sktlim@umich.edu
3202678Sktlim@umich.edu  assert(g_eventQueue_ptr->getTime() >= issued_time);
3212698Sktlim@umich.edu  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
3222678Sktlim@umich.edu
3232678Sktlim@umich.edu  // Profile the miss latency for all non-zero demand misses
3242678Sktlim@umich.edu  if (miss_latency != 0) {
3252678Sktlim@umich.edu    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
3262678Sktlim@umich.edu
3272678Sktlim@umich.edu    if (Debug::getProtocolTrace()) {
3282292SN/A      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
3292292SN/A                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
3302292SN/A    }
3312292SN/A  }
3324326Sgblack@eecs.umich.edu  /*
3332292SN/A  if (request.getPrefetch() == PrefetchBit_Yes) {
3344326Sgblack@eecs.umich.edu    return; // Ignore the prefetch
3354395Ssaidi@eecs.umich.edu  }
3364326Sgblack@eecs.umich.edu  */
3372292SN/A
3389152Satgutier@umich.edu  // update the data
3399152Satgutier@umich.edu  if (ruby_request.data != NULL) {
3409152Satgutier@umich.edu    if ((type == RubyRequestType_LD) ||
3419152Satgutier@umich.edu        (type == RubyRequestType_IFETCH) ||
3429152Satgutier@umich.edu        (type == RubyRequestType_RMW_Read)) {
3432292SN/A      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
3442292SN/A    } else {
3456974Stjones1@inf.ed.ac.uk      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
3466974Stjones1@inf.ed.ac.uk    }
3474326Sgblack@eecs.umich.edu  }
3484395Ssaidi@eecs.umich.edu
3494326Sgblack@eecs.umich.edu  //
3509046SAli.Saidi@ARM.com  // If using the RubyTester, update the RubyTester sender state's subBlock
3519046SAli.Saidi@ARM.com  // with the recieved data.  The tester will later access this state.
3522292SN/A  // Note: RubyPort will access it's sender state before the RubyTester.
3532292SN/A  //
3542669Sktlim@umich.edu  if (m_usingRubyTester) {
3552669Sktlim@umich.edu      //
3566974Stjones1@inf.ed.ac.uk      // Since the hit callback func only takes a request id, we must iterate
3576974Stjones1@inf.ed.ac.uk      // through the requests and update the packet's subBlock here.
3586974Stjones1@inf.ed.ac.uk      // All this would be fixed if we could attach a M5 pkt pointer to the
3592292SN/A      // ruby request, however that change will break the libruby interface so
3609046SAli.Saidi@ARM.com      // we'll hold off on that for now.
3616974Stjones1@inf.ed.ac.uk      //
3626974Stjones1@inf.ed.ac.uk      RequestMap::iterator i = pending_cpu_requests.find(srequest->id);
3632292SN/A      if (i == pending_cpu_requests.end())
3642292SN/A          panic("could not find pending request %d\n", srequest->id);
3652292SN/A      RequestCookie *cookie = i->second;
3662292SN/A      Packet *pkt = cookie->pkt;
3672292SN/A
3682292SN/A      RubyTester::SenderState* testerSenderState;
3692292SN/A      testerSenderState = safe_cast<RubyTester::SenderState*>(pkt->senderState);
3702329SN/A      testerSenderState->subBlock->mergeFrom(data);
3712292SN/A  }
3722292SN/A
3736221Snate@binkert.org  m_hit_callback(srequest->id);
3742292SN/A  delete srequest;
3752292SN/A}
3762292SN/A
3772292SN/A// Returns true if the sequencer already has a load or store outstanding
3782292SN/Aint Sequencer::isReady(const RubyRequest& request) {
3792292SN/A  bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
3802292SN/A  bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
3812329SN/A  if ( is_outstanding_store ) {
3822329SN/A    if ((request.type == RubyRequestType_LD) ||
3832329SN/A        (request.type == RubyRequestType_IFETCH) ||
3842292SN/A        (request.type == RubyRequestType_RMW_Read)) {
3852329SN/A      m_store_waiting_on_load_cycles++;
3862329SN/A    } else {
3872329SN/A      m_store_waiting_on_store_cycles++;
3882292SN/A    }
3892292SN/A    return LIBRUBY_ALIASED_REQUEST;
3908199SAli.Saidi@ARM.com  } else if ( is_outstanding_load ) {
3918199SAli.Saidi@ARM.com    if ((request.type == RubyRequestType_ST) ||
3928199SAli.Saidi@ARM.com        (request.type == RubyRequestType_RMW_Write) ) {
3938199SAli.Saidi@ARM.com      m_load_waiting_on_store_cycles++;
3948199SAli.Saidi@ARM.com    } else {
3958199SAli.Saidi@ARM.com      m_load_waiting_on_load_cycles++;
3968199SAli.Saidi@ARM.com    }
3978199SAli.Saidi@ARM.com    return LIBRUBY_ALIASED_REQUEST;
3982292SN/A  }
3992292SN/A
4002329SN/A  if (m_outstanding_count >= m_max_outstanding_requests) {
4012292SN/A    return LIBRUBY_BUFFER_FULL;
4022292SN/A  }
4032292SN/A
4042292SN/A  return 1;
4052292SN/A}
4062292SN/A
4072292SN/Abool Sequencer::empty() const {
4082292SN/A  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
4092292SN/A}
4102292SN/A
4112292SN/A
4122329SN/Aint64_t Sequencer::makeRequest(const RubyRequest & request)
4132329SN/A{
4142292SN/A  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
4152292SN/A  int ready = isReady(request);
4162292SN/A  if (ready > 0) {
4172292SN/A    int64_t id = makeUniqueRequestID();
4182292SN/A    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
4192292SN/A    bool found = insertRequest(srequest);
4202292SN/A    if (!found) {
4212292SN/A      if (request.type == RubyRequestType_Locked_Write) {
4222292SN/A        // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
4232292SN/A        // ensuring that nothing comes between checking the flag and servicing the store
4242292SN/A        if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
4252292SN/A          return LLSC_FAIL;
4262348SN/A        }
4272307SN/A        else {
4282307SN/A          m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
4292292SN/A        }
4302292SN/A      }
4318545Ssaidi@eecs.umich.edu      issueRequest(request);
4328545Ssaidi@eecs.umich.edu
4338545Ssaidi@eecs.umich.edu      // TODO: issue hardware prefetches here
4342292SN/A      return id;
4352292SN/A    }
4362292SN/A    else {
4372292SN/A      assert(0);
4382292SN/A      return 0;
4392292SN/A    }
4402292SN/A  } else {
4412292SN/A    return ready;
4422292SN/A  }
4432292SN/A}
4442292SN/A
4452292SN/Avoid Sequencer::issueRequest(const RubyRequest& request) {
4462698Sktlim@umich.edu
4472698Sktlim@umich.edu  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
4482693Sktlim@umich.edu  CacheRequestType ctype;
4492698Sktlim@umich.edu  switch(request.type) {
4502678Sktlim@umich.edu  case RubyRequestType_IFETCH:
4512678Sktlim@umich.edu    ctype = CacheRequestType_IFETCH;
4522329SN/A    break;
4532292SN/A  case RubyRequestType_LD:
4542292SN/A    ctype = CacheRequestType_LD;
4552348SN/A    break;
4562292SN/A  case RubyRequestType_ST:
4572292SN/A    ctype = CacheRequestType_ST;
4588727Snilay@cs.wisc.edu    break;
4598727Snilay@cs.wisc.edu  case RubyRequestType_Locked_Read:
4608727Snilay@cs.wisc.edu  case RubyRequestType_Locked_Write:
4612348SN/A    ctype = CacheRequestType_ATOMIC;
4622292SN/A    break;
4632292SN/A  case RubyRequestType_RMW_Read:
4642292SN/A    ctype = CacheRequestType_ATOMIC;
4652292SN/A    break;
4662292SN/A  case RubyRequestType_RMW_Write:
4676974Stjones1@inf.ed.ac.uk    ctype = CacheRequestType_ATOMIC;
4686974Stjones1@inf.ed.ac.uk    break;
4696974Stjones1@inf.ed.ac.uk  default:
4706974Stjones1@inf.ed.ac.uk    assert(0);
4716974Stjones1@inf.ed.ac.uk  }
4726974Stjones1@inf.ed.ac.uk  AccessModeType amtype;
4736974Stjones1@inf.ed.ac.uk  switch(request.access_mode){
4748727Snilay@cs.wisc.edu  case RubyAccessMode_User:
4758727Snilay@cs.wisc.edu    amtype = AccessModeType_UserMode;
4768727Snilay@cs.wisc.edu    break;
4772292SN/A  case RubyAccessMode_Supervisor:
4782292SN/A    amtype = AccessModeType_SupervisorMode;
4792292SN/A    break;
4802727Sktlim@umich.edu  case RubyAccessMode_Device:
4815999Snate@binkert.org    amtype = AccessModeType_UserMode;
4822307SN/A    break;
4833126Sktlim@umich.edu  default:
4845999Snate@binkert.org    assert(0);
4853126Sktlim@umich.edu  }
4863126Sktlim@umich.edu  Address line_addr(request.paddr);
4875999Snate@binkert.org  line_addr.makeLineAddress();
4883126Sktlim@umich.edu  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
4893126Sktlim@umich.edu
4903126Sktlim@umich.edu  if (Debug::getProtocolTrace()) {
4915999Snate@binkert.org    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
4923126Sktlim@umich.edu                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
4933126Sktlim@umich.edu  }
4945999Snate@binkert.org
4953126Sktlim@umich.edu  if (g_system_ptr->getTracer()->traceEnabled()) {
4962727Sktlim@umich.edu    g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
4975999Snate@binkert.org                                            request.type, g_eventQueue_ptr->getTime());
4982727Sktlim@umich.edu  }
4992727Sktlim@umich.edu
5005999Snate@binkert.org  Time latency = 0;  // initialzed to an null value
5012727Sktlim@umich.edu
5022727Sktlim@umich.edu  if (request.type == RubyRequestType_IFETCH)
5035999Snate@binkert.org    latency = m_instCache_ptr->getLatency();
5042727Sktlim@umich.edu  else
5052727Sktlim@umich.edu    latency = m_dataCache_ptr->getLatency();
5065999Snate@binkert.org
5072727Sktlim@umich.edu  // Send the message to the cache controller
5082727Sktlim@umich.edu  assert(latency > 0);
5095999Snate@binkert.org
5102727Sktlim@umich.edu  assert(m_mandatory_q_ptr != NULL);
5112292SN/A  m_mandatory_q_ptr->enqueue(msg, latency);
5122292SN/A}
5137520Sgblack@eecs.umich.edu/*
5147520Sgblack@eecs.umich.edubool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
5152292SN/A                               AccessModeType access_mode,
5162292SN/A                               int size, DataBlock*& data_ptr) {
5177520Sgblack@eecs.umich.edu  if (type == CacheRequestType_IFETCH) {
5187520Sgblack@eecs.umich.edu    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
5192292SN/A  } else {
5202292SN/A    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
5212292SN/A  }
5222292SN/A}
5232292SN/A*/
5242292SN/A
5252292SN/Avoid Sequencer::print(ostream& out) const {
5262292SN/A  out << "[Sequencer: " << m_version
5272292SN/A      << ", outstanding requests: " << m_outstanding_count;
5282292SN/A
5292292SN/A  out << ", read request table: " << m_readRequestTable
5302292SN/A      << ", write request table: " << m_writeRequestTable;
5312292SN/A  out << "]";
5322292SN/A}
5332292SN/A
5342292SN/A// this can be called from setState whenever coherence permissions are upgraded
5352292SN/A// when invoked, coherence violations will be checked for the given block
5362292SN/Avoid Sequencer::checkCoherence(const Address& addr) {
5372292SN/A#ifdef CHECK_COHERENCE
5382292SN/A  g_system_ptr->checkGlobalCoherenceInvariant(addr);
5392292SN/A#endif
5402292SN/A}
5412292SN/A
5422292SN/A