Sequencer.cc revision 9773
16145Snate@binkert.org/*
26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
36145Snate@binkert.org * All rights reserved.
46145Snate@binkert.org *
56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without
66145Snate@binkert.org * modification, are permitted provided that the following conditions are
76145Snate@binkert.org * met: redistributions of source code must retain the above copyright
86145Snate@binkert.org * notice, this list of conditions and the following disclaimer;
96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright
106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
116145Snate@binkert.org * documentation and/or other materials provided with the distribution;
126145Snate@binkert.org * neither the name of the copyright holders nor the names of its
136145Snate@binkert.org * contributors may be used to endorse or promote products derived from
146145Snate@binkert.org * this software without specific prior written permission.
156145Snate@binkert.org *
166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276145Snate@binkert.org */
286145Snate@binkert.org
298229Snate@binkert.org#include "base/misc.hh"
307056Snate@binkert.org#include "base/str.hh"
318615Snilay@cs.wisc.edu#include "config/the_isa.hh"
328615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA
338615Snilay@cs.wisc.edu#include "arch/x86/insts/microldstop.hh"
348615Snilay@cs.wisc.edu#endif // X86_ISA
357632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
368232Snate@binkert.org#include "debug/MemoryAccess.hh"
378232Snate@binkert.org#include "debug/ProtocolTrace.hh"
388615Snilay@cs.wisc.edu#include "debug/RubySequencer.hh"
399104Shestness@cs.utexas.edu#include "debug/RubyStats.hh"
408615Snilay@cs.wisc.edu#include "mem/protocol/PrefetchBit.hh"
418615Snilay@cs.wisc.edu#include "mem/protocol/RubyAccessMode.hh"
427039Snate@binkert.org#include "mem/ruby/common/Global.hh"
437039Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh"
448229Snate@binkert.org#include "mem/ruby/slicc_interface/RubyRequest.hh"
456154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh"
466154Snate@binkert.org#include "mem/ruby/system/System.hh"
477550SBrad.Beckmann@amd.com#include "mem/packet.hh"
486876Ssteve.reinhardt@amd.com
497055Snate@binkert.orgusing namespace std;
507055Snate@binkert.org
516876Ssteve.reinhardt@amd.comSequencer *
526876Ssteve.reinhardt@amd.comRubySequencerParams::create()
536285Snate@binkert.org{
546876Ssteve.reinhardt@amd.com    return new Sequencer(this);
556285Snate@binkert.org}
567039Snate@binkert.org
576876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p)
586886SBrad.Beckmann@amd.com    : RubyPort(p), deadlockCheckEvent(this)
596876Ssteve.reinhardt@amd.com{
606876Ssteve.reinhardt@amd.com    m_store_waiting_on_load_cycles = 0;
616876Ssteve.reinhardt@amd.com    m_store_waiting_on_store_cycles = 0;
626876Ssteve.reinhardt@amd.com    m_load_waiting_on_store_cycles = 0;
636876Ssteve.reinhardt@amd.com    m_load_waiting_on_load_cycles = 0;
647039Snate@binkert.org
656876Ssteve.reinhardt@amd.com    m_outstanding_count = 0;
666285Snate@binkert.org
676876Ssteve.reinhardt@amd.com    m_instCache_ptr = p->icache;
686876Ssteve.reinhardt@amd.com    m_dataCache_ptr = p->dcache;
696876Ssteve.reinhardt@amd.com    m_max_outstanding_requests = p->max_outstanding_requests;
706876Ssteve.reinhardt@amd.com    m_deadlock_threshold = p->deadlock_threshold;
716899SBrad.Beckmann@amd.com
726876Ssteve.reinhardt@amd.com    assert(m_max_outstanding_requests > 0);
736876Ssteve.reinhardt@amd.com    assert(m_deadlock_threshold > 0);
746876Ssteve.reinhardt@amd.com    assert(m_instCache_ptr != NULL);
756876Ssteve.reinhardt@amd.com    assert(m_dataCache_ptr != NULL);
768171Stushar@csail.mit.edu
778171Stushar@csail.mit.edu    m_usingNetworkTester = p->using_network_tester;
786145Snate@binkert.org}
796145Snate@binkert.org
807039Snate@binkert.orgSequencer::~Sequencer()
817039Snate@binkert.org{
826145Snate@binkert.org}
836145Snate@binkert.org
847039Snate@binkert.orgvoid
857039Snate@binkert.orgSequencer::wakeup()
867039Snate@binkert.org{
879342SAndreas.Sandberg@arm.com    assert(getDrainState() != Drainable::Draining);
889245Shestness@cs.wisc.edu
897039Snate@binkert.org    // Check for deadlock of any of the requests
909501Snilay@cs.wisc.edu    Cycles current_time = curCycle();
916145Snate@binkert.org
927039Snate@binkert.org    // Check across all outstanding requests
937039Snate@binkert.org    int total_outstanding = 0;
946285Snate@binkert.org
957455Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
967455Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
977455Snate@binkert.org    for (; read != read_end; ++read) {
987455Snate@binkert.org        SequencerRequest* request = read->second;
997455Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
1007455Snate@binkert.org            continue;
1017455Snate@binkert.org
1027805Snilay@cs.wisc.edu        panic("Possible Deadlock detected. Aborting!\n"
1037921SBrad.Beckmann@amd.com             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
1047805Snilay@cs.wisc.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1058615Snilay@cs.wisc.edu             Address(request->pkt->getAddr()), m_readRequestTable.size(),
1069467Smalek.musleh@gmail.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1079467Smalek.musleh@gmail.com              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1086145Snate@binkert.org    }
1096145Snate@binkert.org
1107455Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1117455Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1127455Snate@binkert.org    for (; write != write_end; ++write) {
1137455Snate@binkert.org        SequencerRequest* request = write->second;
1147455Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
1157455Snate@binkert.org            continue;
1167455Snate@binkert.org
1177805Snilay@cs.wisc.edu        panic("Possible Deadlock detected. Aborting!\n"
1187921SBrad.Beckmann@amd.com             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1197805Snilay@cs.wisc.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1208615Snilay@cs.wisc.edu             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
1219467Smalek.musleh@gmail.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1229467Smalek.musleh@gmail.com              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1236145Snate@binkert.org    }
1246285Snate@binkert.org
1257039Snate@binkert.org    total_outstanding += m_writeRequestTable.size();
1267039Snate@binkert.org    total_outstanding += m_readRequestTable.size();
1276145Snate@binkert.org
1287039Snate@binkert.org    assert(m_outstanding_count == total_outstanding);
1297039Snate@binkert.org
1307039Snate@binkert.org    if (m_outstanding_count > 0) {
1317039Snate@binkert.org        // If there are still outstanding requests, keep checking
1329465Snilay@cs.wisc.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1337039Snate@binkert.org    }
1346145Snate@binkert.org}
1356145Snate@binkert.org
1369598Snilay@cs.wisc.eduvoid Sequencer::clearStats()
1379598Snilay@cs.wisc.edu{
1389598Snilay@cs.wisc.edu    m_outstandReqHist.clear();
1399773Snilay@cs.wisc.edu
1409773Snilay@cs.wisc.edu    // Initialize the histograms that track latency of all requests
1419773Snilay@cs.wisc.edu    m_latencyHist.clear(20);
1429773Snilay@cs.wisc.edu    m_typeLatencyHist.resize(RubyRequestType_NUM);
1439773Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
1449773Snilay@cs.wisc.edu        m_typeLatencyHist[i].clear(20);
1459773Snilay@cs.wisc.edu    }
1469773Snilay@cs.wisc.edu
1479773Snilay@cs.wisc.edu    // Initialize the histograms that track latency of requests that
1489773Snilay@cs.wisc.edu    // hit in the cache attached to the sequencer.
1499773Snilay@cs.wisc.edu    m_hitLatencyHist.clear(20);
1509773Snilay@cs.wisc.edu    m_hitTypeLatencyHist.resize(RubyRequestType_NUM);
1519773Snilay@cs.wisc.edu    m_hitTypeMachLatencyHist.resize(RubyRequestType_NUM);
1529773Snilay@cs.wisc.edu
1539773Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
1549773Snilay@cs.wisc.edu        m_hitTypeLatencyHist[i].clear(20);
1559773Snilay@cs.wisc.edu        m_hitTypeMachLatencyHist[i].resize(MachineType_NUM);
1569773Snilay@cs.wisc.edu        for (int j = 0; j < MachineType_NUM; j++) {
1579773Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[i][j].clear(20);
1589773Snilay@cs.wisc.edu        }
1599773Snilay@cs.wisc.edu    }
1609773Snilay@cs.wisc.edu
1619773Snilay@cs.wisc.edu    // Initialize the histograms that track the latency of requests that
1629773Snilay@cs.wisc.edu    // missed in the cache attached to the sequencer.
1639773Snilay@cs.wisc.edu    m_missLatencyHist.clear(20);
1649773Snilay@cs.wisc.edu    m_missTypeLatencyHist.resize(RubyRequestType_NUM);
1659773Snilay@cs.wisc.edu    m_missTypeMachLatencyHist.resize(RubyRequestType_NUM);
1669773Snilay@cs.wisc.edu
1679773Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
1689773Snilay@cs.wisc.edu        m_missTypeLatencyHist[i].clear(20);
1699773Snilay@cs.wisc.edu        m_missTypeMachLatencyHist[i].resize(MachineType_NUM);
1709773Snilay@cs.wisc.edu        for (int j = 0; j < MachineType_NUM; j++) {
1719773Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[i][j].clear(20);
1729773Snilay@cs.wisc.edu        }
1739773Snilay@cs.wisc.edu    }
1749773Snilay@cs.wisc.edu
1759773Snilay@cs.wisc.edu    m_hitMachLatencyHist.resize(MachineType_NUM);
1769773Snilay@cs.wisc.edu    m_missMachLatencyHist.resize(MachineType_NUM);
1779773Snilay@cs.wisc.edu    m_IssueToInitialDelayHist.resize(MachineType_NUM);
1789773Snilay@cs.wisc.edu    m_InitialToForwardDelayHist.resize(MachineType_NUM);
1799773Snilay@cs.wisc.edu    m_ForwardToFirstResponseDelayHist.resize(MachineType_NUM);
1809773Snilay@cs.wisc.edu    m_FirstResponseToCompletionDelayHist.resize(MachineType_NUM);
1819773Snilay@cs.wisc.edu    m_IncompleteTimes.resize(MachineType_NUM);
1829773Snilay@cs.wisc.edu
1839773Snilay@cs.wisc.edu    for (int i = 0; i < MachineType_NUM; i++) {
1849773Snilay@cs.wisc.edu        m_missMachLatencyHist[i].clear(20);
1859773Snilay@cs.wisc.edu        m_hitMachLatencyHist[i].clear(20);
1869773Snilay@cs.wisc.edu
1879773Snilay@cs.wisc.edu        m_IssueToInitialDelayHist[i].clear(20);
1889773Snilay@cs.wisc.edu        m_InitialToForwardDelayHist[i].clear(20);
1899773Snilay@cs.wisc.edu        m_ForwardToFirstResponseDelayHist[i].clear(20);
1909773Snilay@cs.wisc.edu        m_FirstResponseToCompletionDelayHist[i].clear(20);
1919773Snilay@cs.wisc.edu
1929773Snilay@cs.wisc.edu        m_IncompleteTimes[i] = 0;
1939773Snilay@cs.wisc.edu    }
1949598Snilay@cs.wisc.edu}
1959598Snilay@cs.wisc.edu
1967039Snate@binkert.orgvoid
1977039Snate@binkert.orgSequencer::printStats(ostream & out) const
1987039Snate@binkert.org{
1997039Snate@binkert.org    out << "Sequencer: " << m_name << endl
2007039Snate@binkert.org        << "  store_waiting_on_load_cycles: "
2017039Snate@binkert.org        << m_store_waiting_on_load_cycles << endl
2027039Snate@binkert.org        << "  store_waiting_on_store_cycles: "
2037039Snate@binkert.org        << m_store_waiting_on_store_cycles << endl
2047039Snate@binkert.org        << "  load_waiting_on_load_cycles: "
2057039Snate@binkert.org        << m_load_waiting_on_load_cycles << endl
2067039Snate@binkert.org        << "  load_waiting_on_store_cycles: "
2077039Snate@binkert.org        << m_load_waiting_on_store_cycles << endl;
2086859Sdrh5@cs.wisc.edu}
2096859Sdrh5@cs.wisc.edu
2107039Snate@binkert.orgvoid
2117039Snate@binkert.orgSequencer::printProgress(ostream& out) const
2127039Snate@binkert.org{
2137039Snate@binkert.org#if 0
2147039Snate@binkert.org    int total_demand = 0;
2157039Snate@binkert.org    out << "Sequencer Stats Version " << m_version << endl;
2169171Snilay@cs.wisc.edu    out << "Current time = " << g_system_ptr->getTime() << endl;
2177039Snate@binkert.org    out << "---------------" << endl;
2187039Snate@binkert.org    out << "outstanding requests" << endl;
2196145Snate@binkert.org
2207455Snate@binkert.org    out << "proc " << m_Read
2217455Snate@binkert.org        << " version Requests = " << m_readRequestTable.size() << endl;
2226145Snate@binkert.org
2237039Snate@binkert.org    // print the request table
2247455Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
2257455Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
2267455Snate@binkert.org    for (; read != read_end; ++read) {
2277455Snate@binkert.org        SequencerRequest* request = read->second;
2287039Snate@binkert.org        out << "\tRequest[ " << i << " ] = " << request->type
2297039Snate@binkert.org            << " Address " << rkeys[i]
2307039Snate@binkert.org            << " Posted " << request->issue_time
2317039Snate@binkert.org            << " PF " << PrefetchBit_No << endl;
2326145Snate@binkert.org        total_demand++;
2337039Snate@binkert.org    }
2346145Snate@binkert.org
2357455Snate@binkert.org    out << "proc " << m_version
2367455Snate@binkert.org        << " Write Requests = " << m_writeRequestTable.size << endl;
2376285Snate@binkert.org
2387039Snate@binkert.org    // print the request table
2397455Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
2407455Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
2417455Snate@binkert.org    for (; write != write_end; ++write) {
2427455Snate@binkert.org        SequencerRequest* request = write->second;
2437039Snate@binkert.org        out << "\tRequest[ " << i << " ] = " << request.getType()
2447039Snate@binkert.org            << " Address " << wkeys[i]
2457039Snate@binkert.org            << " Posted " << request.getTime()
2467039Snate@binkert.org            << " PF " << request.getPrefetch() << endl;
2477039Snate@binkert.org        if (request.getPrefetch() == PrefetchBit_No) {
2487039Snate@binkert.org            total_demand++;
2497039Snate@binkert.org        }
2507039Snate@binkert.org    }
2517039Snate@binkert.org
2527039Snate@binkert.org    out << endl;
2537039Snate@binkert.org
2547039Snate@binkert.org    out << "Total Number Outstanding: " << m_outstanding_count << endl
2557039Snate@binkert.org        << "Total Number Demand     : " << total_demand << endl
2567039Snate@binkert.org        << "Total Number Prefetches : " << m_outstanding_count - total_demand
2577039Snate@binkert.org        << endl << endl << endl;
2587039Snate@binkert.org#endif
2596145Snate@binkert.org}
2606145Snate@binkert.org
2616145Snate@binkert.org// Insert the request on the correct request table.  Return true if
2626145Snate@binkert.org// the entry was already present.
2638615Snilay@cs.wisc.eduRequestStatus
2648615Snilay@cs.wisc.eduSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
2657039Snate@binkert.org{
2668641Snate@binkert.org    assert(m_outstanding_count ==
2678641Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2686145Snate@binkert.org
2697039Snate@binkert.org    // See if we should schedule a deadlock check
2709342SAndreas.Sandberg@arm.com    if (!deadlockCheckEvent.scheduled() &&
2719342SAndreas.Sandberg@arm.com        getDrainState() != Drainable::Draining) {
2729465Snilay@cs.wisc.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
2737039Snate@binkert.org    }
2746145Snate@binkert.org
2758615Snilay@cs.wisc.edu    Address line_addr(pkt->getAddr());
2767039Snate@binkert.org    line_addr.makeLineAddress();
2779224Sandreas.hansson@arm.com    // Create a default entry, mapping the address to NULL, the cast is
2789224Sandreas.hansson@arm.com    // there to make gcc 4.4 happy
2799224Sandreas.hansson@arm.com    RequestTable::value_type default_entry(line_addr,
2809224Sandreas.hansson@arm.com                                           (SequencerRequest*) NULL);
2819224Sandreas.hansson@arm.com
2828615Snilay@cs.wisc.edu    if ((request_type == RubyRequestType_ST) ||
2838615Snilay@cs.wisc.edu        (request_type == RubyRequestType_RMW_Read) ||
2848615Snilay@cs.wisc.edu        (request_type == RubyRequestType_RMW_Write) ||
2858615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Load_Linked) ||
2868615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Store_Conditional) ||
2878615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Locked_RMW_Read) ||
2888615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Locked_RMW_Write) ||
2898615Snilay@cs.wisc.edu        (request_type == RubyRequestType_FLUSH)) {
2908615Snilay@cs.wisc.edu
2918615Snilay@cs.wisc.edu        // Check if there is any outstanding read request for the same
2928615Snilay@cs.wisc.edu        // cache line.
2938615Snilay@cs.wisc.edu        if (m_readRequestTable.count(line_addr) > 0) {
2948615Snilay@cs.wisc.edu            m_store_waiting_on_load_cycles++;
2958615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
2968615Snilay@cs.wisc.edu        }
2978615Snilay@cs.wisc.edu
2987455Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2999224Sandreas.hansson@arm.com            m_writeRequestTable.insert(default_entry);
3008615Snilay@cs.wisc.edu        if (r.second) {
3018615Snilay@cs.wisc.edu            RequestTable::iterator i = r.first;
3029465Snilay@cs.wisc.edu            i->second = new SequencerRequest(pkt, request_type, curCycle());
3038615Snilay@cs.wisc.edu            m_outstanding_count++;
3048615Snilay@cs.wisc.edu        } else {
3058615Snilay@cs.wisc.edu          // There is an outstanding write request for the cache line
3068615Snilay@cs.wisc.edu          m_store_waiting_on_store_cycles++;
3078615Snilay@cs.wisc.edu          return RequestStatus_Aliased;
3088615Snilay@cs.wisc.edu        }
3098615Snilay@cs.wisc.edu    } else {
3108615Snilay@cs.wisc.edu        // Check if there is any outstanding write request for the same
3118615Snilay@cs.wisc.edu        // cache line.
3128615Snilay@cs.wisc.edu        if (m_writeRequestTable.count(line_addr) > 0) {
3138615Snilay@cs.wisc.edu            m_load_waiting_on_store_cycles++;
3148615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
3158615Snilay@cs.wisc.edu        }
3167039Snate@binkert.org
3177455Snate@binkert.org        pair<RequestTable::iterator, bool> r =
3189224Sandreas.hansson@arm.com            m_readRequestTable.insert(default_entry);
3197039Snate@binkert.org
3208615Snilay@cs.wisc.edu        if (r.second) {
3218615Snilay@cs.wisc.edu            RequestTable::iterator i = r.first;
3229465Snilay@cs.wisc.edu            i->second = new SequencerRequest(pkt, request_type, curCycle());
3238615Snilay@cs.wisc.edu            m_outstanding_count++;
3248615Snilay@cs.wisc.edu        } else {
3258615Snilay@cs.wisc.edu            // There is an outstanding read request for the cache line
3268615Snilay@cs.wisc.edu            m_load_waiting_on_load_cycles++;
3278615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
3287039Snate@binkert.org        }
3296145Snate@binkert.org    }
3306145Snate@binkert.org
3319598Snilay@cs.wisc.edu    m_outstandReqHist.add(m_outstanding_count);
3328641Snate@binkert.org    assert(m_outstanding_count ==
3338641Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
3346145Snate@binkert.org
3358615Snilay@cs.wisc.edu    return RequestStatus_Ready;
3366145Snate@binkert.org}
3376145Snate@binkert.org
3387039Snate@binkert.orgvoid
3397455Snate@binkert.orgSequencer::markRemoved()
3407455Snate@binkert.org{
3417455Snate@binkert.org    m_outstanding_count--;
3427455Snate@binkert.org    assert(m_outstanding_count ==
3437455Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
3447455Snate@binkert.org}
3457455Snate@binkert.org
3467455Snate@binkert.orgvoid
3477039Snate@binkert.orgSequencer::removeRequest(SequencerRequest* srequest)
3487039Snate@binkert.org{
3497039Snate@binkert.org    assert(m_outstanding_count ==
3507039Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
3516145Snate@binkert.org
3528615Snilay@cs.wisc.edu    Address line_addr(srequest->pkt->getAddr());
3537039Snate@binkert.org    line_addr.makeLineAddress();
3548615Snilay@cs.wisc.edu    if ((srequest->m_type == RubyRequestType_ST) ||
3558615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_RMW_Read) ||
3568615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_RMW_Write) ||
3578615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Load_Linked) ||
3588615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Store_Conditional) ||
3598615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
3608615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
3617455Snate@binkert.org        m_writeRequestTable.erase(line_addr);
3627039Snate@binkert.org    } else {
3637455Snate@binkert.org        m_readRequestTable.erase(line_addr);
3647039Snate@binkert.org    }
3656285Snate@binkert.org
3667455Snate@binkert.org    markRemoved();
3676145Snate@binkert.org}
3686145Snate@binkert.org
3699563Sgope@wisc.eduvoid
3709563Sgope@wisc.eduSequencer::invalidateSC(const Address& address)
3719563Sgope@wisc.edu{
3729563Sgope@wisc.edu    RequestTable::iterator i = m_writeRequestTable.find(address);
3739563Sgope@wisc.edu    if (i != m_writeRequestTable.end()) {
3749563Sgope@wisc.edu        SequencerRequest* request = i->second;
3759563Sgope@wisc.edu        // The controller has lost the coherence permissions, hence the lock
3769563Sgope@wisc.edu        // on the cache line maintained by the cache should be cleared.
3779563Sgope@wisc.edu        if (request->m_type == RubyRequestType_Store_Conditional) {
3789563Sgope@wisc.edu            m_dataCache_ptr->clearLocked(address);
3799563Sgope@wisc.edu        }
3809563Sgope@wisc.edu    }
3819563Sgope@wisc.edu}
3829563Sgope@wisc.edu
3837560SBrad.Beckmann@amd.combool
3847560SBrad.Beckmann@amd.comSequencer::handleLlsc(const Address& address, SequencerRequest* request)
3857550SBrad.Beckmann@amd.com{
3867560SBrad.Beckmann@amd.com    //
3877560SBrad.Beckmann@amd.com    // The success flag indicates whether the LLSC operation was successful.
3887560SBrad.Beckmann@amd.com    // LL ops will always succeed, but SC may fail if the cache line is no
3897560SBrad.Beckmann@amd.com    // longer locked.
3907560SBrad.Beckmann@amd.com    //
3917560SBrad.Beckmann@amd.com    bool success = true;
3928615Snilay@cs.wisc.edu    if (request->m_type == RubyRequestType_Store_Conditional) {
3937550SBrad.Beckmann@amd.com        if (!m_dataCache_ptr->isLocked(address, m_version)) {
3947550SBrad.Beckmann@amd.com            //
3957550SBrad.Beckmann@amd.com            // For failed SC requests, indicate the failure to the cpu by
3967550SBrad.Beckmann@amd.com            // setting the extra data to zero.
3977550SBrad.Beckmann@amd.com            //
3988615Snilay@cs.wisc.edu            request->pkt->req->setExtraData(0);
3997560SBrad.Beckmann@amd.com            success = false;
4007550SBrad.Beckmann@amd.com        } else {
4017550SBrad.Beckmann@amd.com            //
4027550SBrad.Beckmann@amd.com            // For successful SC requests, indicate the success to the cpu by
4037550SBrad.Beckmann@amd.com            // setting the extra data to one.
4047550SBrad.Beckmann@amd.com            //
4058615Snilay@cs.wisc.edu            request->pkt->req->setExtraData(1);
4067550SBrad.Beckmann@amd.com        }
4077560SBrad.Beckmann@amd.com        //
4087560SBrad.Beckmann@amd.com        // Independent of success, all SC operations must clear the lock
4097560SBrad.Beckmann@amd.com        //
4107550SBrad.Beckmann@amd.com        m_dataCache_ptr->clearLocked(address);
4118615Snilay@cs.wisc.edu    } else if (request->m_type == RubyRequestType_Load_Linked) {
4127550SBrad.Beckmann@amd.com        //
4137550SBrad.Beckmann@amd.com        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
4147550SBrad.Beckmann@amd.com        // previously locked cache lines?
4157550SBrad.Beckmann@amd.com        //
4167550SBrad.Beckmann@amd.com        m_dataCache_ptr->setLocked(address, m_version);
4178615Snilay@cs.wisc.edu    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
4188615Snilay@cs.wisc.edu               (m_dataCache_ptr->isLocked(address, m_version))) {
4197550SBrad.Beckmann@amd.com        //
4207550SBrad.Beckmann@amd.com        // Normal writes should clear the locked address
4217550SBrad.Beckmann@amd.com        //
4227550SBrad.Beckmann@amd.com        m_dataCache_ptr->clearLocked(address);
4237550SBrad.Beckmann@amd.com    }
4247560SBrad.Beckmann@amd.com    return success;
4257550SBrad.Beckmann@amd.com}
4267550SBrad.Beckmann@amd.com
4277550SBrad.Beckmann@amd.comvoid
4289773Snilay@cs.wisc.eduSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
4299773Snilay@cs.wisc.edu                             const MachineType respondingMach,
4309773Snilay@cs.wisc.edu                             bool isExternalHit, Cycles issuedTime,
4319773Snilay@cs.wisc.edu                             Cycles initialRequestTime,
4329773Snilay@cs.wisc.edu                             Cycles forwardRequestTime,
4339773Snilay@cs.wisc.edu                             Cycles firstResponseTime, Cycles completionTime)
4347039Snate@binkert.org{
4359773Snilay@cs.wisc.edu    m_latencyHist.add(cycles);
4369773Snilay@cs.wisc.edu    m_typeLatencyHist[type].add(cycles);
4379773Snilay@cs.wisc.edu
4389773Snilay@cs.wisc.edu    if (isExternalHit) {
4399773Snilay@cs.wisc.edu        m_missLatencyHist.add(cycles);
4409773Snilay@cs.wisc.edu        m_missTypeLatencyHist[type].add(cycles);
4419773Snilay@cs.wisc.edu
4429773Snilay@cs.wisc.edu        if (respondingMach != MachineType_NUM) {
4439773Snilay@cs.wisc.edu            m_missMachLatencyHist[respondingMach].add(cycles);
4449773Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[type][respondingMach].add(cycles);
4459773Snilay@cs.wisc.edu
4469773Snilay@cs.wisc.edu            if ((issuedTime <= initialRequestTime) &&
4479773Snilay@cs.wisc.edu                (initialRequestTime <= forwardRequestTime) &&
4489773Snilay@cs.wisc.edu                (forwardRequestTime <= firstResponseTime) &&
4499773Snilay@cs.wisc.edu                (firstResponseTime <= completionTime)) {
4509773Snilay@cs.wisc.edu
4519773Snilay@cs.wisc.edu                m_IssueToInitialDelayHist[respondingMach].add(
4529773Snilay@cs.wisc.edu                    initialRequestTime - issuedTime);
4539773Snilay@cs.wisc.edu                m_InitialToForwardDelayHist[respondingMach].add(
4549773Snilay@cs.wisc.edu                    forwardRequestTime - initialRequestTime);
4559773Snilay@cs.wisc.edu                m_ForwardToFirstResponseDelayHist[respondingMach].add(
4569773Snilay@cs.wisc.edu                    firstResponseTime - forwardRequestTime);
4579773Snilay@cs.wisc.edu                m_FirstResponseToCompletionDelayHist[respondingMach].add(
4589773Snilay@cs.wisc.edu                    completionTime - firstResponseTime);
4599773Snilay@cs.wisc.edu            } else {
4609773Snilay@cs.wisc.edu                m_IncompleteTimes[respondingMach]++;
4619773Snilay@cs.wisc.edu            }
4629773Snilay@cs.wisc.edu        }
4639773Snilay@cs.wisc.edu    } else {
4649773Snilay@cs.wisc.edu        m_hitLatencyHist.add(cycles);
4659773Snilay@cs.wisc.edu        m_hitTypeLatencyHist[type].add(cycles);
4669773Snilay@cs.wisc.edu
4679773Snilay@cs.wisc.edu        if (respondingMach != MachineType_NUM) {
4689773Snilay@cs.wisc.edu            m_hitMachLatencyHist[respondingMach].add(cycles);
4699773Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[type][respondingMach].add(cycles);
4709773Snilay@cs.wisc.edu        }
4719773Snilay@cs.wisc.edu    }
4727546SBrad.Beckmann@amd.com}
4737546SBrad.Beckmann@amd.com
4747546SBrad.Beckmann@amd.comvoid
4759773Snilay@cs.wisc.eduSequencer::writeCallback(const Address& address, DataBlock& data,
4769773Snilay@cs.wisc.edu                         const bool externalHit, const MachineType mach,
4779773Snilay@cs.wisc.edu                         const Cycles initialRequestTime,
4789773Snilay@cs.wisc.edu                         const Cycles forwardRequestTime,
4799773Snilay@cs.wisc.edu                         const Cycles firstResponseTime)
4807565SBrad.Beckmann@amd.com{
4817039Snate@binkert.org    assert(address == line_address(address));
4827455Snate@binkert.org    assert(m_writeRequestTable.count(line_address(address)));
4836145Snate@binkert.org
4847455Snate@binkert.org    RequestTable::iterator i = m_writeRequestTable.find(address);
4857455Snate@binkert.org    assert(i != m_writeRequestTable.end());
4867455Snate@binkert.org    SequencerRequest* request = i->second;
4876145Snate@binkert.org
4887455Snate@binkert.org    m_writeRequestTable.erase(i);
4897455Snate@binkert.org    markRemoved();
4906846Spdudnik@cs.wisc.edu
4918615Snilay@cs.wisc.edu    assert((request->m_type == RubyRequestType_ST) ||
4928615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_ATOMIC) ||
4938615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_RMW_Read) ||
4948615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_RMW_Write) ||
4958615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Load_Linked) ||
4968615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Store_Conditional) ||
4978615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
4988615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
4998615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_FLUSH));
5008184Ssomayeh@cs.wisc.edu
5017550SBrad.Beckmann@amd.com    //
5027550SBrad.Beckmann@amd.com    // For Alpha, properly handle LL, SC, and write requests with respect to
5037550SBrad.Beckmann@amd.com    // locked cache blocks.
5047550SBrad.Beckmann@amd.com    //
5058171Stushar@csail.mit.edu    // Not valid for Network_test protocl
5068171Stushar@csail.mit.edu    //
5078171Stushar@csail.mit.edu    bool success = true;
5088171Stushar@csail.mit.edu    if(!m_usingNetworkTester)
5098171Stushar@csail.mit.edu        success = handleLlsc(address, request);
5107550SBrad.Beckmann@amd.com
5118615Snilay@cs.wisc.edu    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
5127039Snate@binkert.org        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
5138615Snilay@cs.wisc.edu    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
5147039Snate@binkert.org        m_controller->unblock(address);
5157039Snate@binkert.org    }
5166863Sdrh5@cs.wisc.edu
5179773Snilay@cs.wisc.edu    hitCallback(request, data, success, mach, externalHit,
5187565SBrad.Beckmann@amd.com                initialRequestTime, forwardRequestTime, firstResponseTime);
5196145Snate@binkert.org}
5206145Snate@binkert.org
5217039Snate@binkert.orgvoid
5229773Snilay@cs.wisc.eduSequencer::readCallback(const Address& address, DataBlock& data,
5239773Snilay@cs.wisc.edu                        bool externalHit, const MachineType mach,
5249507Snilay@cs.wisc.edu                        Cycles initialRequestTime,
5259507Snilay@cs.wisc.edu                        Cycles forwardRequestTime,
5269507Snilay@cs.wisc.edu                        Cycles firstResponseTime)
5277565SBrad.Beckmann@amd.com{
5287039Snate@binkert.org    assert(address == line_address(address));
5297455Snate@binkert.org    assert(m_readRequestTable.count(line_address(address)));
5306145Snate@binkert.org
5317455Snate@binkert.org    RequestTable::iterator i = m_readRequestTable.find(address);
5327455Snate@binkert.org    assert(i != m_readRequestTable.end());
5337455Snate@binkert.org    SequencerRequest* request = i->second;
5347455Snate@binkert.org
5357455Snate@binkert.org    m_readRequestTable.erase(i);
5367455Snate@binkert.org    markRemoved();
5376145Snate@binkert.org
5388615Snilay@cs.wisc.edu    assert((request->m_type == RubyRequestType_LD) ||
5398615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_IFETCH));
5406285Snate@binkert.org
5419773Snilay@cs.wisc.edu    hitCallback(request, data, true, mach, externalHit,
5427565SBrad.Beckmann@amd.com                initialRequestTime, forwardRequestTime, firstResponseTime);
5436145Snate@binkert.org}
5446145Snate@binkert.org
5457039Snate@binkert.orgvoid
5469773Snilay@cs.wisc.eduSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
5479773Snilay@cs.wisc.edu                       bool llscSuccess,
5489773Snilay@cs.wisc.edu                       const MachineType mach, const bool externalHit,
5499773Snilay@cs.wisc.edu                       const Cycles initialRequestTime,
5509773Snilay@cs.wisc.edu                       const Cycles forwardRequestTime,
5519773Snilay@cs.wisc.edu                       const Cycles firstResponseTime)
5527039Snate@binkert.org{
5538615Snilay@cs.wisc.edu    PacketPtr pkt = srequest->pkt;
5548615Snilay@cs.wisc.edu    Address request_address(pkt->getAddr());
5558615Snilay@cs.wisc.edu    Address request_line_address(pkt->getAddr());
5567039Snate@binkert.org    request_line_address.makeLineAddress();
5578615Snilay@cs.wisc.edu    RubyRequestType type = srequest->m_type;
5589507Snilay@cs.wisc.edu    Cycles issued_time = srequest->issue_time;
5596145Snate@binkert.org
5607039Snate@binkert.org    // Set this cache entry to the most recently used
5617039Snate@binkert.org    if (type == RubyRequestType_IFETCH) {
5628828Snilay@cs.wisc.edu        m_instCache_ptr->setMRU(request_line_address);
5637039Snate@binkert.org    } else {
5648828Snilay@cs.wisc.edu        m_dataCache_ptr->setMRU(request_line_address);
5657039Snate@binkert.org    }
5666145Snate@binkert.org
5679465Snilay@cs.wisc.edu    assert(curCycle() >= issued_time);
5689773Snilay@cs.wisc.edu    Cycles total_latency = curCycle() - issued_time;
5696145Snate@binkert.org
5709773Snilay@cs.wisc.edu    // Profile the latency for all demand accesses.
5719773Snilay@cs.wisc.edu    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
5729773Snilay@cs.wisc.edu                      initialRequestTime, forwardRequestTime,
5739773Snilay@cs.wisc.edu                      firstResponseTime, curCycle());
5746285Snate@binkert.org
5759773Snilay@cs.wisc.edu    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
5769773Snilay@cs.wisc.edu             curTick(), m_version, "Seq",
5779773Snilay@cs.wisc.edu             llscSuccess ? "Done" : "SC_Failed", "", "",
5789773Snilay@cs.wisc.edu             request_address, total_latency);
5796285Snate@binkert.org
5807039Snate@binkert.org    // update the data
5818688Snilay@cs.wisc.edu    if (g_system_ptr->m_warmup_enabled) {
5828688Snilay@cs.wisc.edu        assert(pkt->getPtr<uint8_t>(false) != NULL);
5838688Snilay@cs.wisc.edu        data.setData(pkt->getPtr<uint8_t>(false),
5848688Snilay@cs.wisc.edu                     request_address.getOffset(), pkt->getSize());
5858688Snilay@cs.wisc.edu    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
5867039Snate@binkert.org        if ((type == RubyRequestType_LD) ||
5877039Snate@binkert.org            (type == RubyRequestType_IFETCH) ||
5887039Snate@binkert.org            (type == RubyRequestType_RMW_Read) ||
5897908Shestness@cs.utexas.edu            (type == RubyRequestType_Locked_RMW_Read) ||
5907907Shestness@cs.utexas.edu            (type == RubyRequestType_Load_Linked)) {
5918615Snilay@cs.wisc.edu            memcpy(pkt->getPtr<uint8_t>(true),
5928615Snilay@cs.wisc.edu                   data.getData(request_address.getOffset(), pkt->getSize()),
5938615Snilay@cs.wisc.edu                   pkt->getSize());
5947039Snate@binkert.org        } else {
5958615Snilay@cs.wisc.edu            data.setData(pkt->getPtr<uint8_t>(true),
5968615Snilay@cs.wisc.edu                         request_address.getOffset(), pkt->getSize());
5977039Snate@binkert.org        }
5986285Snate@binkert.org    } else {
5997039Snate@binkert.org        DPRINTF(MemoryAccess,
6007039Snate@binkert.org                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
6017039Snate@binkert.org                RubyRequestType_to_string(type));
6027039Snate@binkert.org    }
6037023SBrad.Beckmann@amd.com
6047039Snate@binkert.org    // If using the RubyTester, update the RubyTester sender state's
6057039Snate@binkert.org    // subBlock with the recieved data.  The tester will later access
6067039Snate@binkert.org    // this state.
6077039Snate@binkert.org    // Note: RubyPort will access it's sender state before the
6087039Snate@binkert.org    // RubyTester.
6097039Snate@binkert.org    if (m_usingRubyTester) {
6109542Sandreas.hansson@arm.com        RubyPort::SenderState *reqSenderState =
6118615Snilay@cs.wisc.edu            safe_cast<RubyPort::SenderState*>(pkt->senderState);
6129542Sandreas.hansson@arm.com        // @todo This is a dangerous assumption on nothing else
6139542Sandreas.hansson@arm.com        // modifying the senderState
6147039Snate@binkert.org        RubyTester::SenderState* testerSenderState =
6159542Sandreas.hansson@arm.com            safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor);
6169542Sandreas.hansson@arm.com        testerSenderState->subBlock.mergeFrom(data);
6177039Snate@binkert.org    }
6187023SBrad.Beckmann@amd.com
6197039Snate@binkert.org    delete srequest;
6208688Snilay@cs.wisc.edu
6218688Snilay@cs.wisc.edu    if (g_system_ptr->m_warmup_enabled) {
6229632Sjthestness@gmail.com        assert(pkt->req);
6239632Sjthestness@gmail.com        delete pkt->req;
6248688Snilay@cs.wisc.edu        delete pkt;
6258688Snilay@cs.wisc.edu        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
6268688Snilay@cs.wisc.edu    } else if (g_system_ptr->m_cooldown_enabled) {
6278688Snilay@cs.wisc.edu        delete pkt;
6288688Snilay@cs.wisc.edu        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
6298688Snilay@cs.wisc.edu    } else {
6308688Snilay@cs.wisc.edu        ruby_hit_callback(pkt);
6318688Snilay@cs.wisc.edu    }
6326285Snate@binkert.org}
6336285Snate@binkert.org
6347039Snate@binkert.orgbool
6357039Snate@binkert.orgSequencer::empty() const
6367039Snate@binkert.org{
6377455Snate@binkert.org    return m_writeRequestTable.empty() && m_readRequestTable.empty();
6386145Snate@binkert.org}
6396145Snate@binkert.org
6407039Snate@binkert.orgRequestStatus
6418615Snilay@cs.wisc.eduSequencer::makeRequest(PacketPtr pkt)
6427039Snate@binkert.org{
6438615Snilay@cs.wisc.edu    if (m_outstanding_count >= m_max_outstanding_requests) {
6448615Snilay@cs.wisc.edu        return RequestStatus_BufferFull;
6458615Snilay@cs.wisc.edu    }
6468615Snilay@cs.wisc.edu
6478615Snilay@cs.wisc.edu    RubyRequestType primary_type = RubyRequestType_NULL;
6488615Snilay@cs.wisc.edu    RubyRequestType secondary_type = RubyRequestType_NULL;
6498615Snilay@cs.wisc.edu
6508615Snilay@cs.wisc.edu    if (pkt->isLLSC()) {
6518615Snilay@cs.wisc.edu        //
6528615Snilay@cs.wisc.edu        // Alpha LL/SC instructions need to be handled carefully by the cache
6538615Snilay@cs.wisc.edu        // coherence protocol to ensure they follow the proper semantics. In
6548615Snilay@cs.wisc.edu        // particular, by identifying the operations as atomic, the protocol
6558615Snilay@cs.wisc.edu        // should understand that migratory sharing optimizations should not
6568615Snilay@cs.wisc.edu        // be performed (i.e. a load between the LL and SC should not steal
6578615Snilay@cs.wisc.edu        // away exclusive permission).
6588615Snilay@cs.wisc.edu        //
6598615Snilay@cs.wisc.edu        if (pkt->isWrite()) {
6608615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing SC\n");
6618615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Store_Conditional;
6628615Snilay@cs.wisc.edu        } else {
6638615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing LL\n");
6648615Snilay@cs.wisc.edu            assert(pkt->isRead());
6658615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Load_Linked;
6668615Snilay@cs.wisc.edu        }
6678615Snilay@cs.wisc.edu        secondary_type = RubyRequestType_ATOMIC;
6688615Snilay@cs.wisc.edu    } else if (pkt->req->isLocked()) {
6698615Snilay@cs.wisc.edu        //
6708615Snilay@cs.wisc.edu        // x86 locked instructions are translated to store cache coherence
6718615Snilay@cs.wisc.edu        // requests because these requests should always be treated as read
6728615Snilay@cs.wisc.edu        // exclusive operations and should leverage any migratory sharing
6738615Snilay@cs.wisc.edu        // optimization built into the protocol.
6748615Snilay@cs.wisc.edu        //
6758615Snilay@cs.wisc.edu        if (pkt->isWrite()) {
6768615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
6778615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Locked_RMW_Write;
6788615Snilay@cs.wisc.edu        } else {
6798615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
6808615Snilay@cs.wisc.edu            assert(pkt->isRead());
6818615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Locked_RMW_Read;
6828615Snilay@cs.wisc.edu        }
6838615Snilay@cs.wisc.edu        secondary_type = RubyRequestType_ST;
6848615Snilay@cs.wisc.edu    } else {
6858615Snilay@cs.wisc.edu        if (pkt->isRead()) {
6868615Snilay@cs.wisc.edu            if (pkt->req->isInstFetch()) {
6878615Snilay@cs.wisc.edu                primary_type = secondary_type = RubyRequestType_IFETCH;
6888615Snilay@cs.wisc.edu            } else {
6898615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA
6908615Snilay@cs.wisc.edu                uint32_t flags = pkt->req->getFlags();
6918615Snilay@cs.wisc.edu                bool storeCheck = flags &
6928615Snilay@cs.wisc.edu                        (TheISA::StoreCheck << TheISA::FlagShift);
6938615Snilay@cs.wisc.edu#else
6948615Snilay@cs.wisc.edu                bool storeCheck = false;
6958615Snilay@cs.wisc.edu#endif // X86_ISA
6968615Snilay@cs.wisc.edu                if (storeCheck) {
6978615Snilay@cs.wisc.edu                    primary_type = RubyRequestType_RMW_Read;
6988615Snilay@cs.wisc.edu                    secondary_type = RubyRequestType_ST;
6998615Snilay@cs.wisc.edu                } else {
7008615Snilay@cs.wisc.edu                    primary_type = secondary_type = RubyRequestType_LD;
7018615Snilay@cs.wisc.edu                }
7028615Snilay@cs.wisc.edu            }
7038615Snilay@cs.wisc.edu        } else if (pkt->isWrite()) {
7048615Snilay@cs.wisc.edu            //
7058615Snilay@cs.wisc.edu            // Note: M5 packets do not differentiate ST from RMW_Write
7068615Snilay@cs.wisc.edu            //
7078615Snilay@cs.wisc.edu            primary_type = secondary_type = RubyRequestType_ST;
7088615Snilay@cs.wisc.edu        } else if (pkt->isFlush()) {
7098615Snilay@cs.wisc.edu          primary_type = secondary_type = RubyRequestType_FLUSH;
7108615Snilay@cs.wisc.edu        } else {
7118615Snilay@cs.wisc.edu            panic("Unsupported ruby packet type\n");
7128615Snilay@cs.wisc.edu        }
7138615Snilay@cs.wisc.edu    }
7148615Snilay@cs.wisc.edu
7158615Snilay@cs.wisc.edu    RequestStatus status = insertRequest(pkt, primary_type);
7167039Snate@binkert.org    if (status != RequestStatus_Ready)
7177039Snate@binkert.org        return status;
7186349Spdudnik@gmail.com
7198615Snilay@cs.wisc.edu    issueRequest(pkt, secondary_type);
7206145Snate@binkert.org
7217039Snate@binkert.org    // TODO: issue hardware prefetches here
7227039Snate@binkert.org    return RequestStatus_Issued;
7236145Snate@binkert.org}
7246145Snate@binkert.org
7257039Snate@binkert.orgvoid
7268615Snilay@cs.wisc.eduSequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
7277039Snate@binkert.org{
7289216Sandreas.hansson@arm.com    assert(pkt != NULL);
7298615Snilay@cs.wisc.edu    int proc_id = -1;
7309216Sandreas.hansson@arm.com    if (pkt->req->hasContextId()) {
7318615Snilay@cs.wisc.edu        proc_id = pkt->req->contextId();
7327039Snate@binkert.org    }
7336285Snate@binkert.org
7348615Snilay@cs.wisc.edu    // If valid, copy the pc to the ruby request
7358615Snilay@cs.wisc.edu    Addr pc = 0;
7368615Snilay@cs.wisc.edu    if (pkt->req->hasPC()) {
7378615Snilay@cs.wisc.edu        pc = pkt->req->getPC();
7387039Snate@binkert.org    }
7396285Snate@binkert.org
7409508Snilay@cs.wisc.edu    RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
7418615Snilay@cs.wisc.edu                                       pkt->getPtr<uint8_t>(true),
7428615Snilay@cs.wisc.edu                                       pkt->getSize(), pc, secondary_type,
7438615Snilay@cs.wisc.edu                                       RubyAccessMode_Supervisor, pkt,
7448188SLisa.Hsu@amd.com                                       PrefetchBit_No, proc_id);
7456285Snate@binkert.org
7468266Sksewell@umich.edu    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
7478266Sksewell@umich.edu            curTick(), m_version, "Seq", "Begin", "", "",
7488615Snilay@cs.wisc.edu            msg->getPhysicalAddress(),
7498615Snilay@cs.wisc.edu            RubyRequestType_to_string(secondary_type));
7506285Snate@binkert.org
7519499Snilay@cs.wisc.edu    Cycles latency(0);  // initialzed to an null value
7526285Snate@binkert.org
7538615Snilay@cs.wisc.edu    if (secondary_type == RubyRequestType_IFETCH)
7547039Snate@binkert.org        latency = m_instCache_ptr->getLatency();
7557039Snate@binkert.org    else
7567039Snate@binkert.org        latency = m_dataCache_ptr->getLatency();
7576285Snate@binkert.org
7587039Snate@binkert.org    // Send the message to the cache controller
7597039Snate@binkert.org    assert(latency > 0);
7606145Snate@binkert.org
7617039Snate@binkert.org    assert(m_mandatory_q_ptr != NULL);
7627039Snate@binkert.org    m_mandatory_q_ptr->enqueue(msg, latency);
7636145Snate@binkert.org}
7646145Snate@binkert.org
7657455Snate@binkert.orgtemplate <class KEY, class VALUE>
7667455Snate@binkert.orgstd::ostream &
7677455Snate@binkert.orgoperator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
7687455Snate@binkert.org{
7697455Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
7707455Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
7717455Snate@binkert.org
7727455Snate@binkert.org    out << "[";
7737455Snate@binkert.org    for (; i != end; ++i)
7747455Snate@binkert.org        out << " " << i->first << "=" << i->second;
7757455Snate@binkert.org    out << " ]";
7767455Snate@binkert.org
7777455Snate@binkert.org    return out;
7787455Snate@binkert.org}
7797455Snate@binkert.org
7807039Snate@binkert.orgvoid
7817039Snate@binkert.orgSequencer::print(ostream& out) const
7827039Snate@binkert.org{
7837039Snate@binkert.org    out << "[Sequencer: " << m_version
7847039Snate@binkert.org        << ", outstanding requests: " << m_outstanding_count
7857039Snate@binkert.org        << ", read request table: " << m_readRequestTable
7867039Snate@binkert.org        << ", write request table: " << m_writeRequestTable
7877039Snate@binkert.org        << "]";
7887039Snate@binkert.org}
7897039Snate@binkert.org
7907039Snate@binkert.org// this can be called from setState whenever coherence permissions are
7917039Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the
7927039Snate@binkert.org// given block
7937039Snate@binkert.orgvoid
7947039Snate@binkert.orgSequencer::checkCoherence(const Address& addr)
7957039Snate@binkert.org{
7966145Snate@binkert.org#ifdef CHECK_COHERENCE
7977039Snate@binkert.org    g_system_ptr->checkGlobalCoherenceInvariant(addr);
7986145Snate@binkert.org#endif
7996145Snate@binkert.org}
8008717Snilay@cs.wisc.edu
8018717Snilay@cs.wisc.eduvoid
8029104Shestness@cs.utexas.eduSequencer::recordRequestType(SequencerRequestType requestType) {
8039104Shestness@cs.utexas.edu    DPRINTF(RubyStats, "Recorded statistic: %s\n",
8049104Shestness@cs.utexas.edu            SequencerRequestType_to_string(requestType));
8059104Shestness@cs.utexas.edu}
8069104Shestness@cs.utexas.edu
8079104Shestness@cs.utexas.edu
8089104Shestness@cs.utexas.eduvoid
8098717Snilay@cs.wisc.eduSequencer::evictionCallback(const Address& address)
8108717Snilay@cs.wisc.edu{
8118717Snilay@cs.wisc.edu    ruby_eviction_callback(address);
8128717Snilay@cs.wisc.edu}
813