Sequencer.cc revision 10089
16145Snate@binkert.org/*
26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
36145Snate@binkert.org * All rights reserved.
46145Snate@binkert.org *
56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without
66145Snate@binkert.org * modification, are permitted provided that the following conditions are
76145Snate@binkert.org * met: redistributions of source code must retain the above copyright
86145Snate@binkert.org * notice, this list of conditions and the following disclaimer;
96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright
106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
116145Snate@binkert.org * documentation and/or other materials provided with the distribution;
126145Snate@binkert.org * neither the name of the copyright holders nor the names of its
136145Snate@binkert.org * contributors may be used to endorse or promote products derived from
146145Snate@binkert.org * this software without specific prior written permission.
156145Snate@binkert.org *
166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276145Snate@binkert.org */
286145Snate@binkert.org
298229Snate@binkert.org#include "base/misc.hh"
307056Snate@binkert.org#include "base/str.hh"
318615Snilay@cs.wisc.edu#include "config/the_isa.hh"
328615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA
338615Snilay@cs.wisc.edu#include "arch/x86/insts/microldstop.hh"
348615Snilay@cs.wisc.edu#endif // X86_ISA
357632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
368232Snate@binkert.org#include "debug/MemoryAccess.hh"
378232Snate@binkert.org#include "debug/ProtocolTrace.hh"
388615Snilay@cs.wisc.edu#include "debug/RubySequencer.hh"
399104Shestness@cs.utexas.edu#include "debug/RubyStats.hh"
408615Snilay@cs.wisc.edu#include "mem/protocol/PrefetchBit.hh"
418615Snilay@cs.wisc.edu#include "mem/protocol/RubyAccessMode.hh"
427039Snate@binkert.org#include "mem/ruby/common/Global.hh"
437039Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh"
448229Snate@binkert.org#include "mem/ruby/slicc_interface/RubyRequest.hh"
456154Snate@binkert.org#include "mem/ruby/system/Sequencer.hh"
466154Snate@binkert.org#include "mem/ruby/system/System.hh"
477550SBrad.Beckmann@amd.com#include "mem/packet.hh"
486876Ssteve.reinhardt@amd.com
497055Snate@binkert.orgusing namespace std;
507055Snate@binkert.org
516876Ssteve.reinhardt@amd.comSequencer *
526876Ssteve.reinhardt@amd.comRubySequencerParams::create()
536285Snate@binkert.org{
546876Ssteve.reinhardt@amd.com    return new Sequencer(this);
556285Snate@binkert.org}
567039Snate@binkert.org
576876Ssteve.reinhardt@amd.comSequencer::Sequencer(const Params *p)
5810012Snilay@cs.wisc.edu    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
596876Ssteve.reinhardt@amd.com{
606876Ssteve.reinhardt@amd.com    m_outstanding_count = 0;
616285Snate@binkert.org
626876Ssteve.reinhardt@amd.com    m_instCache_ptr = p->icache;
636876Ssteve.reinhardt@amd.com    m_dataCache_ptr = p->dcache;
646876Ssteve.reinhardt@amd.com    m_max_outstanding_requests = p->max_outstanding_requests;
656876Ssteve.reinhardt@amd.com    m_deadlock_threshold = p->deadlock_threshold;
666899SBrad.Beckmann@amd.com
676876Ssteve.reinhardt@amd.com    assert(m_max_outstanding_requests > 0);
686876Ssteve.reinhardt@amd.com    assert(m_deadlock_threshold > 0);
696876Ssteve.reinhardt@amd.com    assert(m_instCache_ptr != NULL);
706876Ssteve.reinhardt@amd.com    assert(m_dataCache_ptr != NULL);
718171Stushar@csail.mit.edu
728171Stushar@csail.mit.edu    m_usingNetworkTester = p->using_network_tester;
736145Snate@binkert.org}
746145Snate@binkert.org
757039Snate@binkert.orgSequencer::~Sequencer()
767039Snate@binkert.org{
776145Snate@binkert.org}
786145Snate@binkert.org
797039Snate@binkert.orgvoid
807039Snate@binkert.orgSequencer::wakeup()
817039Snate@binkert.org{
829342SAndreas.Sandberg@arm.com    assert(getDrainState() != Drainable::Draining);
839245Shestness@cs.wisc.edu
847039Snate@binkert.org    // Check for deadlock of any of the requests
859501Snilay@cs.wisc.edu    Cycles current_time = curCycle();
866145Snate@binkert.org
877039Snate@binkert.org    // Check across all outstanding requests
887039Snate@binkert.org    int total_outstanding = 0;
896285Snate@binkert.org
907455Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
917455Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
927455Snate@binkert.org    for (; read != read_end; ++read) {
937455Snate@binkert.org        SequencerRequest* request = read->second;
947455Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
957455Snate@binkert.org            continue;
967455Snate@binkert.org
977805Snilay@cs.wisc.edu        panic("Possible Deadlock detected. Aborting!\n"
987921SBrad.Beckmann@amd.com             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
997805Snilay@cs.wisc.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1008615Snilay@cs.wisc.edu             Address(request->pkt->getAddr()), m_readRequestTable.size(),
1019467Smalek.musleh@gmail.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1029467Smalek.musleh@gmail.com              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1036145Snate@binkert.org    }
1046145Snate@binkert.org
1057455Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1067455Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1077455Snate@binkert.org    for (; write != write_end; ++write) {
1087455Snate@binkert.org        SequencerRequest* request = write->second;
1097455Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
1107455Snate@binkert.org            continue;
1117455Snate@binkert.org
1127805Snilay@cs.wisc.edu        panic("Possible Deadlock detected. Aborting!\n"
1137921SBrad.Beckmann@amd.com             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1147805Snilay@cs.wisc.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1158615Snilay@cs.wisc.edu             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
1169467Smalek.musleh@gmail.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1179467Smalek.musleh@gmail.com              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1186145Snate@binkert.org    }
1196285Snate@binkert.org
1207039Snate@binkert.org    total_outstanding += m_writeRequestTable.size();
1217039Snate@binkert.org    total_outstanding += m_readRequestTable.size();
1226145Snate@binkert.org
1237039Snate@binkert.org    assert(m_outstanding_count == total_outstanding);
1247039Snate@binkert.org
1257039Snate@binkert.org    if (m_outstanding_count > 0) {
1267039Snate@binkert.org        // If there are still outstanding requests, keep checking
1279465Snilay@cs.wisc.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1287039Snate@binkert.org    }
1296145Snate@binkert.org}
1306145Snate@binkert.org
13110012Snilay@cs.wisc.eduvoid Sequencer::resetStats()
1329598Snilay@cs.wisc.edu{
13310012Snilay@cs.wisc.edu    m_latencyHist.reset();
13410012Snilay@cs.wisc.edu    m_hitLatencyHist.reset();
13510012Snilay@cs.wisc.edu    m_missLatencyHist.reset();
1369773Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
13710012Snilay@cs.wisc.edu        m_typeLatencyHist[i]->reset();
13810012Snilay@cs.wisc.edu        m_hitTypeLatencyHist[i]->reset();
13910012Snilay@cs.wisc.edu        m_missTypeLatencyHist[i]->reset();
1409773Snilay@cs.wisc.edu        for (int j = 0; j < MachineType_NUM; j++) {
14110012Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[i][j]->reset();
14210012Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[i][j]->reset();
1439773Snilay@cs.wisc.edu        }
1449773Snilay@cs.wisc.edu    }
1459773Snilay@cs.wisc.edu
14610012Snilay@cs.wisc.edu    for (int i = 0; i < MachineType_NUM; i++) {
14710012Snilay@cs.wisc.edu        m_missMachLatencyHist[i]->reset();
14810012Snilay@cs.wisc.edu        m_hitMachLatencyHist[i]->reset();
1499773Snilay@cs.wisc.edu
15010012Snilay@cs.wisc.edu        m_IssueToInitialDelayHist[i]->reset();
15110012Snilay@cs.wisc.edu        m_InitialToForwardDelayHist[i]->reset();
15210012Snilay@cs.wisc.edu        m_ForwardToFirstResponseDelayHist[i]->reset();
15310012Snilay@cs.wisc.edu        m_FirstResponseToCompletionDelayHist[i]->reset();
1549773Snilay@cs.wisc.edu
1559773Snilay@cs.wisc.edu        m_IncompleteTimes[i] = 0;
1569773Snilay@cs.wisc.edu    }
1579598Snilay@cs.wisc.edu}
1589598Snilay@cs.wisc.edu
1597039Snate@binkert.orgvoid
1607039Snate@binkert.orgSequencer::printProgress(ostream& out) const
1617039Snate@binkert.org{
1627039Snate@binkert.org#if 0
1637039Snate@binkert.org    int total_demand = 0;
1647039Snate@binkert.org    out << "Sequencer Stats Version " << m_version << endl;
1659171Snilay@cs.wisc.edu    out << "Current time = " << g_system_ptr->getTime() << endl;
1667039Snate@binkert.org    out << "---------------" << endl;
1677039Snate@binkert.org    out << "outstanding requests" << endl;
1686145Snate@binkert.org
1697455Snate@binkert.org    out << "proc " << m_Read
1707455Snate@binkert.org        << " version Requests = " << m_readRequestTable.size() << endl;
1716145Snate@binkert.org
1727039Snate@binkert.org    // print the request table
1737455Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
1747455Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
1757455Snate@binkert.org    for (; read != read_end; ++read) {
1767455Snate@binkert.org        SequencerRequest* request = read->second;
1777039Snate@binkert.org        out << "\tRequest[ " << i << " ] = " << request->type
1787039Snate@binkert.org            << " Address " << rkeys[i]
1797039Snate@binkert.org            << " Posted " << request->issue_time
1807039Snate@binkert.org            << " PF " << PrefetchBit_No << endl;
1816145Snate@binkert.org        total_demand++;
1827039Snate@binkert.org    }
1836145Snate@binkert.org
1847455Snate@binkert.org    out << "proc " << m_version
1857455Snate@binkert.org        << " Write Requests = " << m_writeRequestTable.size << endl;
1866285Snate@binkert.org
1877039Snate@binkert.org    // print the request table
1887455Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1897455Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1907455Snate@binkert.org    for (; write != write_end; ++write) {
1917455Snate@binkert.org        SequencerRequest* request = write->second;
1927039Snate@binkert.org        out << "\tRequest[ " << i << " ] = " << request.getType()
1937039Snate@binkert.org            << " Address " << wkeys[i]
1947039Snate@binkert.org            << " Posted " << request.getTime()
1957039Snate@binkert.org            << " PF " << request.getPrefetch() << endl;
1967039Snate@binkert.org        if (request.getPrefetch() == PrefetchBit_No) {
1977039Snate@binkert.org            total_demand++;
1987039Snate@binkert.org        }
1997039Snate@binkert.org    }
2007039Snate@binkert.org
2017039Snate@binkert.org    out << endl;
2027039Snate@binkert.org
2037039Snate@binkert.org    out << "Total Number Outstanding: " << m_outstanding_count << endl
2047039Snate@binkert.org        << "Total Number Demand     : " << total_demand << endl
2057039Snate@binkert.org        << "Total Number Prefetches : " << m_outstanding_count - total_demand
2067039Snate@binkert.org        << endl << endl << endl;
2077039Snate@binkert.org#endif
2086145Snate@binkert.org}
2096145Snate@binkert.org
2106145Snate@binkert.org// Insert the request on the correct request table.  Return true if
2116145Snate@binkert.org// the entry was already present.
2128615Snilay@cs.wisc.eduRequestStatus
2138615Snilay@cs.wisc.eduSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
2147039Snate@binkert.org{
2158641Snate@binkert.org    assert(m_outstanding_count ==
2168641Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2176145Snate@binkert.org
2187039Snate@binkert.org    // See if we should schedule a deadlock check
2199342SAndreas.Sandberg@arm.com    if (!deadlockCheckEvent.scheduled() &&
2209342SAndreas.Sandberg@arm.com        getDrainState() != Drainable::Draining) {
2219465Snilay@cs.wisc.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
2227039Snate@binkert.org    }
2236145Snate@binkert.org
2248615Snilay@cs.wisc.edu    Address line_addr(pkt->getAddr());
2257039Snate@binkert.org    line_addr.makeLineAddress();
2269224Sandreas.hansson@arm.com    // Create a default entry, mapping the address to NULL, the cast is
2279224Sandreas.hansson@arm.com    // there to make gcc 4.4 happy
2289224Sandreas.hansson@arm.com    RequestTable::value_type default_entry(line_addr,
2299224Sandreas.hansson@arm.com                                           (SequencerRequest*) NULL);
2309224Sandreas.hansson@arm.com
2318615Snilay@cs.wisc.edu    if ((request_type == RubyRequestType_ST) ||
2328615Snilay@cs.wisc.edu        (request_type == RubyRequestType_RMW_Read) ||
2338615Snilay@cs.wisc.edu        (request_type == RubyRequestType_RMW_Write) ||
2348615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Load_Linked) ||
2358615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Store_Conditional) ||
2368615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Locked_RMW_Read) ||
2378615Snilay@cs.wisc.edu        (request_type == RubyRequestType_Locked_RMW_Write) ||
2388615Snilay@cs.wisc.edu        (request_type == RubyRequestType_FLUSH)) {
2398615Snilay@cs.wisc.edu
2408615Snilay@cs.wisc.edu        // Check if there is any outstanding read request for the same
2418615Snilay@cs.wisc.edu        // cache line.
2428615Snilay@cs.wisc.edu        if (m_readRequestTable.count(line_addr) > 0) {
24310012Snilay@cs.wisc.edu            m_store_waiting_on_load++;
2448615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
2458615Snilay@cs.wisc.edu        }
2468615Snilay@cs.wisc.edu
2477455Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2489224Sandreas.hansson@arm.com            m_writeRequestTable.insert(default_entry);
2498615Snilay@cs.wisc.edu        if (r.second) {
2508615Snilay@cs.wisc.edu            RequestTable::iterator i = r.first;
2519465Snilay@cs.wisc.edu            i->second = new SequencerRequest(pkt, request_type, curCycle());
2528615Snilay@cs.wisc.edu            m_outstanding_count++;
2538615Snilay@cs.wisc.edu        } else {
2548615Snilay@cs.wisc.edu          // There is an outstanding write request for the cache line
25510012Snilay@cs.wisc.edu          m_store_waiting_on_store++;
2568615Snilay@cs.wisc.edu          return RequestStatus_Aliased;
2578615Snilay@cs.wisc.edu        }
2588615Snilay@cs.wisc.edu    } else {
2598615Snilay@cs.wisc.edu        // Check if there is any outstanding write request for the same
2608615Snilay@cs.wisc.edu        // cache line.
2618615Snilay@cs.wisc.edu        if (m_writeRequestTable.count(line_addr) > 0) {
26210012Snilay@cs.wisc.edu            m_load_waiting_on_store++;
2638615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
2648615Snilay@cs.wisc.edu        }
2657039Snate@binkert.org
2667455Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2679224Sandreas.hansson@arm.com            m_readRequestTable.insert(default_entry);
2687039Snate@binkert.org
2698615Snilay@cs.wisc.edu        if (r.second) {
2708615Snilay@cs.wisc.edu            RequestTable::iterator i = r.first;
2719465Snilay@cs.wisc.edu            i->second = new SequencerRequest(pkt, request_type, curCycle());
2728615Snilay@cs.wisc.edu            m_outstanding_count++;
2738615Snilay@cs.wisc.edu        } else {
2748615Snilay@cs.wisc.edu            // There is an outstanding read request for the cache line
27510012Snilay@cs.wisc.edu            m_load_waiting_on_load++;
2768615Snilay@cs.wisc.edu            return RequestStatus_Aliased;
2777039Snate@binkert.org        }
2786145Snate@binkert.org    }
2796145Snate@binkert.org
28010012Snilay@cs.wisc.edu    m_outstandReqHist.sample(m_outstanding_count);
2818641Snate@binkert.org    assert(m_outstanding_count ==
2828641Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2836145Snate@binkert.org
2848615Snilay@cs.wisc.edu    return RequestStatus_Ready;
2856145Snate@binkert.org}
2866145Snate@binkert.org
2877039Snate@binkert.orgvoid
2887455Snate@binkert.orgSequencer::markRemoved()
2897455Snate@binkert.org{
2907455Snate@binkert.org    m_outstanding_count--;
2917455Snate@binkert.org    assert(m_outstanding_count ==
2927455Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
2937455Snate@binkert.org}
2947455Snate@binkert.org
2957455Snate@binkert.orgvoid
2967039Snate@binkert.orgSequencer::removeRequest(SequencerRequest* srequest)
2977039Snate@binkert.org{
2987039Snate@binkert.org    assert(m_outstanding_count ==
2997039Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
3006145Snate@binkert.org
3018615Snilay@cs.wisc.edu    Address line_addr(srequest->pkt->getAddr());
3027039Snate@binkert.org    line_addr.makeLineAddress();
3038615Snilay@cs.wisc.edu    if ((srequest->m_type == RubyRequestType_ST) ||
3048615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_RMW_Read) ||
3058615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_RMW_Write) ||
3068615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Load_Linked) ||
3078615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Store_Conditional) ||
3088615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
3098615Snilay@cs.wisc.edu        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
3107455Snate@binkert.org        m_writeRequestTable.erase(line_addr);
3117039Snate@binkert.org    } else {
3127455Snate@binkert.org        m_readRequestTable.erase(line_addr);
3137039Snate@binkert.org    }
3146285Snate@binkert.org
3157455Snate@binkert.org    markRemoved();
3166145Snate@binkert.org}
3176145Snate@binkert.org
3189563Sgope@wisc.eduvoid
3199563Sgope@wisc.eduSequencer::invalidateSC(const Address& address)
3209563Sgope@wisc.edu{
3219563Sgope@wisc.edu    RequestTable::iterator i = m_writeRequestTable.find(address);
3229563Sgope@wisc.edu    if (i != m_writeRequestTable.end()) {
3239563Sgope@wisc.edu        SequencerRequest* request = i->second;
3249563Sgope@wisc.edu        // The controller has lost the coherence permissions, hence the lock
3259563Sgope@wisc.edu        // on the cache line maintained by the cache should be cleared.
3269563Sgope@wisc.edu        if (request->m_type == RubyRequestType_Store_Conditional) {
3279563Sgope@wisc.edu            m_dataCache_ptr->clearLocked(address);
3289563Sgope@wisc.edu        }
3299563Sgope@wisc.edu    }
3309563Sgope@wisc.edu}
3319563Sgope@wisc.edu
3327560SBrad.Beckmann@amd.combool
3337560SBrad.Beckmann@amd.comSequencer::handleLlsc(const Address& address, SequencerRequest* request)
3347550SBrad.Beckmann@amd.com{
3357560SBrad.Beckmann@amd.com    //
3367560SBrad.Beckmann@amd.com    // The success flag indicates whether the LLSC operation was successful.
3377560SBrad.Beckmann@amd.com    // LL ops will always succeed, but SC may fail if the cache line is no
3387560SBrad.Beckmann@amd.com    // longer locked.
3397560SBrad.Beckmann@amd.com    //
3407560SBrad.Beckmann@amd.com    bool success = true;
3418615Snilay@cs.wisc.edu    if (request->m_type == RubyRequestType_Store_Conditional) {
3427550SBrad.Beckmann@amd.com        if (!m_dataCache_ptr->isLocked(address, m_version)) {
3437550SBrad.Beckmann@amd.com            //
3447550SBrad.Beckmann@amd.com            // For failed SC requests, indicate the failure to the cpu by
3457550SBrad.Beckmann@amd.com            // setting the extra data to zero.
3467550SBrad.Beckmann@amd.com            //
3478615Snilay@cs.wisc.edu            request->pkt->req->setExtraData(0);
3487560SBrad.Beckmann@amd.com            success = false;
3497550SBrad.Beckmann@amd.com        } else {
3507550SBrad.Beckmann@amd.com            //
3517550SBrad.Beckmann@amd.com            // For successful SC requests, indicate the success to the cpu by
3527550SBrad.Beckmann@amd.com            // setting the extra data to one.
3537550SBrad.Beckmann@amd.com            //
3548615Snilay@cs.wisc.edu            request->pkt->req->setExtraData(1);
3557550SBrad.Beckmann@amd.com        }
3567560SBrad.Beckmann@amd.com        //
3577560SBrad.Beckmann@amd.com        // Independent of success, all SC operations must clear the lock
3587560SBrad.Beckmann@amd.com        //
3597550SBrad.Beckmann@amd.com        m_dataCache_ptr->clearLocked(address);
3608615Snilay@cs.wisc.edu    } else if (request->m_type == RubyRequestType_Load_Linked) {
3617550SBrad.Beckmann@amd.com        //
3627550SBrad.Beckmann@amd.com        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
3637550SBrad.Beckmann@amd.com        // previously locked cache lines?
3647550SBrad.Beckmann@amd.com        //
3657550SBrad.Beckmann@amd.com        m_dataCache_ptr->setLocked(address, m_version);
3668615Snilay@cs.wisc.edu    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
3678615Snilay@cs.wisc.edu               (m_dataCache_ptr->isLocked(address, m_version))) {
3687550SBrad.Beckmann@amd.com        //
3697550SBrad.Beckmann@amd.com        // Normal writes should clear the locked address
3707550SBrad.Beckmann@amd.com        //
3717550SBrad.Beckmann@amd.com        m_dataCache_ptr->clearLocked(address);
3727550SBrad.Beckmann@amd.com    }
3737560SBrad.Beckmann@amd.com    return success;
3747550SBrad.Beckmann@amd.com}
3757550SBrad.Beckmann@amd.com
3767550SBrad.Beckmann@amd.comvoid
3779773Snilay@cs.wisc.eduSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
3789773Snilay@cs.wisc.edu                             const MachineType respondingMach,
3799773Snilay@cs.wisc.edu                             bool isExternalHit, Cycles issuedTime,
3809773Snilay@cs.wisc.edu                             Cycles initialRequestTime,
3819773Snilay@cs.wisc.edu                             Cycles forwardRequestTime,
3829773Snilay@cs.wisc.edu                             Cycles firstResponseTime, Cycles completionTime)
3837039Snate@binkert.org{
38410012Snilay@cs.wisc.edu    m_latencyHist.sample(cycles);
38510012Snilay@cs.wisc.edu    m_typeLatencyHist[type]->sample(cycles);
3869773Snilay@cs.wisc.edu
3879773Snilay@cs.wisc.edu    if (isExternalHit) {
38810012Snilay@cs.wisc.edu        m_missLatencyHist.sample(cycles);
38910012Snilay@cs.wisc.edu        m_missTypeLatencyHist[type]->sample(cycles);
3909773Snilay@cs.wisc.edu
3919773Snilay@cs.wisc.edu        if (respondingMach != MachineType_NUM) {
39210012Snilay@cs.wisc.edu            m_missMachLatencyHist[respondingMach]->sample(cycles);
39310012Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3949773Snilay@cs.wisc.edu
3959773Snilay@cs.wisc.edu            if ((issuedTime <= initialRequestTime) &&
3969773Snilay@cs.wisc.edu                (initialRequestTime <= forwardRequestTime) &&
3979773Snilay@cs.wisc.edu                (forwardRequestTime <= firstResponseTime) &&
3989773Snilay@cs.wisc.edu                (firstResponseTime <= completionTime)) {
3999773Snilay@cs.wisc.edu
40010012Snilay@cs.wisc.edu                m_IssueToInitialDelayHist[respondingMach]->sample(
4019773Snilay@cs.wisc.edu                    initialRequestTime - issuedTime);
40210012Snilay@cs.wisc.edu                m_InitialToForwardDelayHist[respondingMach]->sample(
4039773Snilay@cs.wisc.edu                    forwardRequestTime - initialRequestTime);
40410012Snilay@cs.wisc.edu                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
4059773Snilay@cs.wisc.edu                    firstResponseTime - forwardRequestTime);
40610012Snilay@cs.wisc.edu                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
4079773Snilay@cs.wisc.edu                    completionTime - firstResponseTime);
4089773Snilay@cs.wisc.edu            } else {
4099773Snilay@cs.wisc.edu                m_IncompleteTimes[respondingMach]++;
4109773Snilay@cs.wisc.edu            }
4119773Snilay@cs.wisc.edu        }
4129773Snilay@cs.wisc.edu    } else {
41310012Snilay@cs.wisc.edu        m_hitLatencyHist.sample(cycles);
41410012Snilay@cs.wisc.edu        m_hitTypeLatencyHist[type]->sample(cycles);
4159773Snilay@cs.wisc.edu
4169773Snilay@cs.wisc.edu        if (respondingMach != MachineType_NUM) {
41710012Snilay@cs.wisc.edu            m_hitMachLatencyHist[respondingMach]->sample(cycles);
41810012Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
4199773Snilay@cs.wisc.edu        }
4209773Snilay@cs.wisc.edu    }
4217546SBrad.Beckmann@amd.com}
4227546SBrad.Beckmann@amd.com
4237546SBrad.Beckmann@amd.comvoid
4249773Snilay@cs.wisc.eduSequencer::writeCallback(const Address& address, DataBlock& data,
4259773Snilay@cs.wisc.edu                         const bool externalHit, const MachineType mach,
4269773Snilay@cs.wisc.edu                         const Cycles initialRequestTime,
4279773Snilay@cs.wisc.edu                         const Cycles forwardRequestTime,
4289773Snilay@cs.wisc.edu                         const Cycles firstResponseTime)
4297565SBrad.Beckmann@amd.com{
4307039Snate@binkert.org    assert(address == line_address(address));
4317455Snate@binkert.org    assert(m_writeRequestTable.count(line_address(address)));
4326145Snate@binkert.org
4337455Snate@binkert.org    RequestTable::iterator i = m_writeRequestTable.find(address);
4347455Snate@binkert.org    assert(i != m_writeRequestTable.end());
4357455Snate@binkert.org    SequencerRequest* request = i->second;
4366145Snate@binkert.org
4377455Snate@binkert.org    m_writeRequestTable.erase(i);
4387455Snate@binkert.org    markRemoved();
4396846Spdudnik@cs.wisc.edu
4408615Snilay@cs.wisc.edu    assert((request->m_type == RubyRequestType_ST) ||
4418615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_ATOMIC) ||
4428615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_RMW_Read) ||
4438615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_RMW_Write) ||
4448615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Load_Linked) ||
4458615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Store_Conditional) ||
4468615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
4478615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
4488615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_FLUSH));
4498184Ssomayeh@cs.wisc.edu
4507550SBrad.Beckmann@amd.com    //
4517550SBrad.Beckmann@amd.com    // For Alpha, properly handle LL, SC, and write requests with respect to
4527550SBrad.Beckmann@amd.com    // locked cache blocks.
4537550SBrad.Beckmann@amd.com    //
4548171Stushar@csail.mit.edu    // Not valid for Network_test protocl
4558171Stushar@csail.mit.edu    //
4568171Stushar@csail.mit.edu    bool success = true;
4578171Stushar@csail.mit.edu    if(!m_usingNetworkTester)
4588171Stushar@csail.mit.edu        success = handleLlsc(address, request);
4597550SBrad.Beckmann@amd.com
4608615Snilay@cs.wisc.edu    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
4617039Snate@binkert.org        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
4628615Snilay@cs.wisc.edu    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
4637039Snate@binkert.org        m_controller->unblock(address);
4647039Snate@binkert.org    }
4656863Sdrh5@cs.wisc.edu
4669773Snilay@cs.wisc.edu    hitCallback(request, data, success, mach, externalHit,
4677565SBrad.Beckmann@amd.com                initialRequestTime, forwardRequestTime, firstResponseTime);
4686145Snate@binkert.org}
4696145Snate@binkert.org
4707039Snate@binkert.orgvoid
4719773Snilay@cs.wisc.eduSequencer::readCallback(const Address& address, DataBlock& data,
4729773Snilay@cs.wisc.edu                        bool externalHit, const MachineType mach,
4739507Snilay@cs.wisc.edu                        Cycles initialRequestTime,
4749507Snilay@cs.wisc.edu                        Cycles forwardRequestTime,
4759507Snilay@cs.wisc.edu                        Cycles firstResponseTime)
4767565SBrad.Beckmann@amd.com{
4777039Snate@binkert.org    assert(address == line_address(address));
4787455Snate@binkert.org    assert(m_readRequestTable.count(line_address(address)));
4796145Snate@binkert.org
4807455Snate@binkert.org    RequestTable::iterator i = m_readRequestTable.find(address);
4817455Snate@binkert.org    assert(i != m_readRequestTable.end());
4827455Snate@binkert.org    SequencerRequest* request = i->second;
4837455Snate@binkert.org
4847455Snate@binkert.org    m_readRequestTable.erase(i);
4857455Snate@binkert.org    markRemoved();
4866145Snate@binkert.org
4878615Snilay@cs.wisc.edu    assert((request->m_type == RubyRequestType_LD) ||
4888615Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_IFETCH));
4896285Snate@binkert.org
4909773Snilay@cs.wisc.edu    hitCallback(request, data, true, mach, externalHit,
4917565SBrad.Beckmann@amd.com                initialRequestTime, forwardRequestTime, firstResponseTime);
4926145Snate@binkert.org}
4936145Snate@binkert.org
4947039Snate@binkert.orgvoid
4959773Snilay@cs.wisc.eduSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
4969773Snilay@cs.wisc.edu                       bool llscSuccess,
4979773Snilay@cs.wisc.edu                       const MachineType mach, const bool externalHit,
4989773Snilay@cs.wisc.edu                       const Cycles initialRequestTime,
4999773Snilay@cs.wisc.edu                       const Cycles forwardRequestTime,
5009773Snilay@cs.wisc.edu                       const Cycles firstResponseTime)
5017039Snate@binkert.org{
5028615Snilay@cs.wisc.edu    PacketPtr pkt = srequest->pkt;
5038615Snilay@cs.wisc.edu    Address request_address(pkt->getAddr());
5048615Snilay@cs.wisc.edu    Address request_line_address(pkt->getAddr());
5057039Snate@binkert.org    request_line_address.makeLineAddress();
5068615Snilay@cs.wisc.edu    RubyRequestType type = srequest->m_type;
5079507Snilay@cs.wisc.edu    Cycles issued_time = srequest->issue_time;
5086145Snate@binkert.org
5097039Snate@binkert.org    // Set this cache entry to the most recently used
5107039Snate@binkert.org    if (type == RubyRequestType_IFETCH) {
5118828Snilay@cs.wisc.edu        m_instCache_ptr->setMRU(request_line_address);
5127039Snate@binkert.org    } else {
5138828Snilay@cs.wisc.edu        m_dataCache_ptr->setMRU(request_line_address);
5147039Snate@binkert.org    }
5156145Snate@binkert.org
5169465Snilay@cs.wisc.edu    assert(curCycle() >= issued_time);
5179773Snilay@cs.wisc.edu    Cycles total_latency = curCycle() - issued_time;
5186145Snate@binkert.org
5199773Snilay@cs.wisc.edu    // Profile the latency for all demand accesses.
5209773Snilay@cs.wisc.edu    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
5219773Snilay@cs.wisc.edu                      initialRequestTime, forwardRequestTime,
5229773Snilay@cs.wisc.edu                      firstResponseTime, curCycle());
5236285Snate@binkert.org
5249773Snilay@cs.wisc.edu    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
5259773Snilay@cs.wisc.edu             curTick(), m_version, "Seq",
5269773Snilay@cs.wisc.edu             llscSuccess ? "Done" : "SC_Failed", "", "",
5279773Snilay@cs.wisc.edu             request_address, total_latency);
5286285Snate@binkert.org
5297039Snate@binkert.org    // update the data
5308688Snilay@cs.wisc.edu    if (g_system_ptr->m_warmup_enabled) {
5318688Snilay@cs.wisc.edu        assert(pkt->getPtr<uint8_t>(false) != NULL);
5328688Snilay@cs.wisc.edu        data.setData(pkt->getPtr<uint8_t>(false),
5338688Snilay@cs.wisc.edu                     request_address.getOffset(), pkt->getSize());
5348688Snilay@cs.wisc.edu    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
5357039Snate@binkert.org        if ((type == RubyRequestType_LD) ||
5367039Snate@binkert.org            (type == RubyRequestType_IFETCH) ||
5377039Snate@binkert.org            (type == RubyRequestType_RMW_Read) ||
5387908Shestness@cs.utexas.edu            (type == RubyRequestType_Locked_RMW_Read) ||
5397907Shestness@cs.utexas.edu            (type == RubyRequestType_Load_Linked)) {
5408615Snilay@cs.wisc.edu            memcpy(pkt->getPtr<uint8_t>(true),
5418615Snilay@cs.wisc.edu                   data.getData(request_address.getOffset(), pkt->getSize()),
5428615Snilay@cs.wisc.edu                   pkt->getSize());
5437039Snate@binkert.org        } else {
5448615Snilay@cs.wisc.edu            data.setData(pkt->getPtr<uint8_t>(true),
5458615Snilay@cs.wisc.edu                         request_address.getOffset(), pkt->getSize());
5467039Snate@binkert.org        }
5476285Snate@binkert.org    } else {
5487039Snate@binkert.org        DPRINTF(MemoryAccess,
5497039Snate@binkert.org                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
5507039Snate@binkert.org                RubyRequestType_to_string(type));
5517039Snate@binkert.org    }
5527023SBrad.Beckmann@amd.com
5537039Snate@binkert.org    // If using the RubyTester, update the RubyTester sender state's
5547039Snate@binkert.org    // subBlock with the recieved data.  The tester will later access
5557039Snate@binkert.org    // this state.
5567039Snate@binkert.org    if (m_usingRubyTester) {
5577039Snate@binkert.org        RubyTester::SenderState* testerSenderState =
55810089Sandreas.hansson@arm.com            pkt->findNextSenderState<RubyTester::SenderState>();
55910089Sandreas.hansson@arm.com        assert(testerSenderState);
5609542Sandreas.hansson@arm.com        testerSenderState->subBlock.mergeFrom(data);
5617039Snate@binkert.org    }
5627023SBrad.Beckmann@amd.com
5637039Snate@binkert.org    delete srequest;
5648688Snilay@cs.wisc.edu
5658688Snilay@cs.wisc.edu    if (g_system_ptr->m_warmup_enabled) {
5669632Sjthestness@gmail.com        assert(pkt->req);
5679632Sjthestness@gmail.com        delete pkt->req;
5688688Snilay@cs.wisc.edu        delete pkt;
5698688Snilay@cs.wisc.edu        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
5708688Snilay@cs.wisc.edu    } else if (g_system_ptr->m_cooldown_enabled) {
5718688Snilay@cs.wisc.edu        delete pkt;
5728688Snilay@cs.wisc.edu        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
5738688Snilay@cs.wisc.edu    } else {
5748688Snilay@cs.wisc.edu        ruby_hit_callback(pkt);
5758688Snilay@cs.wisc.edu    }
5766285Snate@binkert.org}
5776285Snate@binkert.org
5787039Snate@binkert.orgbool
5797039Snate@binkert.orgSequencer::empty() const
5807039Snate@binkert.org{
5817455Snate@binkert.org    return m_writeRequestTable.empty() && m_readRequestTable.empty();
5826145Snate@binkert.org}
5836145Snate@binkert.org
5847039Snate@binkert.orgRequestStatus
5858615Snilay@cs.wisc.eduSequencer::makeRequest(PacketPtr pkt)
5867039Snate@binkert.org{
5878615Snilay@cs.wisc.edu    if (m_outstanding_count >= m_max_outstanding_requests) {
5888615Snilay@cs.wisc.edu        return RequestStatus_BufferFull;
5898615Snilay@cs.wisc.edu    }
5908615Snilay@cs.wisc.edu
5918615Snilay@cs.wisc.edu    RubyRequestType primary_type = RubyRequestType_NULL;
5928615Snilay@cs.wisc.edu    RubyRequestType secondary_type = RubyRequestType_NULL;
5938615Snilay@cs.wisc.edu
5948615Snilay@cs.wisc.edu    if (pkt->isLLSC()) {
5958615Snilay@cs.wisc.edu        //
5968615Snilay@cs.wisc.edu        // Alpha LL/SC instructions need to be handled carefully by the cache
5978615Snilay@cs.wisc.edu        // coherence protocol to ensure they follow the proper semantics. In
5988615Snilay@cs.wisc.edu        // particular, by identifying the operations as atomic, the protocol
5998615Snilay@cs.wisc.edu        // should understand that migratory sharing optimizations should not
6008615Snilay@cs.wisc.edu        // be performed (i.e. a load between the LL and SC should not steal
6018615Snilay@cs.wisc.edu        // away exclusive permission).
6028615Snilay@cs.wisc.edu        //
6038615Snilay@cs.wisc.edu        if (pkt->isWrite()) {
6048615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing SC\n");
6058615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Store_Conditional;
6068615Snilay@cs.wisc.edu        } else {
6078615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing LL\n");
6088615Snilay@cs.wisc.edu            assert(pkt->isRead());
6098615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Load_Linked;
6108615Snilay@cs.wisc.edu        }
6118615Snilay@cs.wisc.edu        secondary_type = RubyRequestType_ATOMIC;
6128615Snilay@cs.wisc.edu    } else if (pkt->req->isLocked()) {
6138615Snilay@cs.wisc.edu        //
6148615Snilay@cs.wisc.edu        // x86 locked instructions are translated to store cache coherence
6158615Snilay@cs.wisc.edu        // requests because these requests should always be treated as read
6168615Snilay@cs.wisc.edu        // exclusive operations and should leverage any migratory sharing
6178615Snilay@cs.wisc.edu        // optimization built into the protocol.
6188615Snilay@cs.wisc.edu        //
6198615Snilay@cs.wisc.edu        if (pkt->isWrite()) {
6208615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
6218615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Locked_RMW_Write;
6228615Snilay@cs.wisc.edu        } else {
6238615Snilay@cs.wisc.edu            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
6248615Snilay@cs.wisc.edu            assert(pkt->isRead());
6258615Snilay@cs.wisc.edu            primary_type = RubyRequestType_Locked_RMW_Read;
6268615Snilay@cs.wisc.edu        }
6278615Snilay@cs.wisc.edu        secondary_type = RubyRequestType_ST;
6288615Snilay@cs.wisc.edu    } else {
6298615Snilay@cs.wisc.edu        if (pkt->isRead()) {
6308615Snilay@cs.wisc.edu            if (pkt->req->isInstFetch()) {
6318615Snilay@cs.wisc.edu                primary_type = secondary_type = RubyRequestType_IFETCH;
6328615Snilay@cs.wisc.edu            } else {
6338615Snilay@cs.wisc.edu#if THE_ISA == X86_ISA
6348615Snilay@cs.wisc.edu                uint32_t flags = pkt->req->getFlags();
6358615Snilay@cs.wisc.edu                bool storeCheck = flags &
6368615Snilay@cs.wisc.edu                        (TheISA::StoreCheck << TheISA::FlagShift);
6378615Snilay@cs.wisc.edu#else
6388615Snilay@cs.wisc.edu                bool storeCheck = false;
6398615Snilay@cs.wisc.edu#endif // X86_ISA
6408615Snilay@cs.wisc.edu                if (storeCheck) {
6418615Snilay@cs.wisc.edu                    primary_type = RubyRequestType_RMW_Read;
6428615Snilay@cs.wisc.edu                    secondary_type = RubyRequestType_ST;
6438615Snilay@cs.wisc.edu                } else {
6448615Snilay@cs.wisc.edu                    primary_type = secondary_type = RubyRequestType_LD;
6458615Snilay@cs.wisc.edu                }
6468615Snilay@cs.wisc.edu            }
6478615Snilay@cs.wisc.edu        } else if (pkt->isWrite()) {
6488615Snilay@cs.wisc.edu            //
6498615Snilay@cs.wisc.edu            // Note: M5 packets do not differentiate ST from RMW_Write
6508615Snilay@cs.wisc.edu            //
6518615Snilay@cs.wisc.edu            primary_type = secondary_type = RubyRequestType_ST;
6528615Snilay@cs.wisc.edu        } else if (pkt->isFlush()) {
6538615Snilay@cs.wisc.edu          primary_type = secondary_type = RubyRequestType_FLUSH;
6548615Snilay@cs.wisc.edu        } else {
6558615Snilay@cs.wisc.edu            panic("Unsupported ruby packet type\n");
6568615Snilay@cs.wisc.edu        }
6578615Snilay@cs.wisc.edu    }
6588615Snilay@cs.wisc.edu
6598615Snilay@cs.wisc.edu    RequestStatus status = insertRequest(pkt, primary_type);
6607039Snate@binkert.org    if (status != RequestStatus_Ready)
6617039Snate@binkert.org        return status;
6626349Spdudnik@gmail.com
6638615Snilay@cs.wisc.edu    issueRequest(pkt, secondary_type);
6646145Snate@binkert.org
6657039Snate@binkert.org    // TODO: issue hardware prefetches here
6667039Snate@binkert.org    return RequestStatus_Issued;
6676145Snate@binkert.org}
6686145Snate@binkert.org
6697039Snate@binkert.orgvoid
6708615Snilay@cs.wisc.eduSequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
6717039Snate@binkert.org{
6729216Sandreas.hansson@arm.com    assert(pkt != NULL);
6738615Snilay@cs.wisc.edu    int proc_id = -1;
6749216Sandreas.hansson@arm.com    if (pkt->req->hasContextId()) {
6758615Snilay@cs.wisc.edu        proc_id = pkt->req->contextId();
6767039Snate@binkert.org    }
6776285Snate@binkert.org
6788615Snilay@cs.wisc.edu    // If valid, copy the pc to the ruby request
6798615Snilay@cs.wisc.edu    Addr pc = 0;
6808615Snilay@cs.wisc.edu    if (pkt->req->hasPC()) {
6818615Snilay@cs.wisc.edu        pc = pkt->req->getPC();
6827039Snate@binkert.org    }
6836285Snate@binkert.org
6849508Snilay@cs.wisc.edu    RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
6858615Snilay@cs.wisc.edu                                       pkt->getPtr<uint8_t>(true),
6868615Snilay@cs.wisc.edu                                       pkt->getSize(), pc, secondary_type,
6878615Snilay@cs.wisc.edu                                       RubyAccessMode_Supervisor, pkt,
6888188SLisa.Hsu@amd.com                                       PrefetchBit_No, proc_id);
6896285Snate@binkert.org
6908266Sksewell@umich.edu    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
6918266Sksewell@umich.edu            curTick(), m_version, "Seq", "Begin", "", "",
6928615Snilay@cs.wisc.edu            msg->getPhysicalAddress(),
6938615Snilay@cs.wisc.edu            RubyRequestType_to_string(secondary_type));
6946285Snate@binkert.org
6959499Snilay@cs.wisc.edu    Cycles latency(0);  // initialzed to an null value
6966285Snate@binkert.org
6978615Snilay@cs.wisc.edu    if (secondary_type == RubyRequestType_IFETCH)
6987039Snate@binkert.org        latency = m_instCache_ptr->getLatency();
6997039Snate@binkert.org    else
7007039Snate@binkert.org        latency = m_dataCache_ptr->getLatency();
7016285Snate@binkert.org
7027039Snate@binkert.org    // Send the message to the cache controller
7037039Snate@binkert.org    assert(latency > 0);
7046145Snate@binkert.org
7057039Snate@binkert.org    assert(m_mandatory_q_ptr != NULL);
7067039Snate@binkert.org    m_mandatory_q_ptr->enqueue(msg, latency);
7076145Snate@binkert.org}
7086145Snate@binkert.org
7097455Snate@binkert.orgtemplate <class KEY, class VALUE>
7107455Snate@binkert.orgstd::ostream &
7117455Snate@binkert.orgoperator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
7127455Snate@binkert.org{
7137455Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
7147455Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
7157455Snate@binkert.org
7167455Snate@binkert.org    out << "[";
7177455Snate@binkert.org    for (; i != end; ++i)
7187455Snate@binkert.org        out << " " << i->first << "=" << i->second;
7197455Snate@binkert.org    out << " ]";
7207455Snate@binkert.org
7217455Snate@binkert.org    return out;
7227455Snate@binkert.org}
7237455Snate@binkert.org
7247039Snate@binkert.orgvoid
7257039Snate@binkert.orgSequencer::print(ostream& out) const
7267039Snate@binkert.org{
7277039Snate@binkert.org    out << "[Sequencer: " << m_version
7287039Snate@binkert.org        << ", outstanding requests: " << m_outstanding_count
7297039Snate@binkert.org        << ", read request table: " << m_readRequestTable
7307039Snate@binkert.org        << ", write request table: " << m_writeRequestTable
7317039Snate@binkert.org        << "]";
7327039Snate@binkert.org}
7337039Snate@binkert.org
7347039Snate@binkert.org// this can be called from setState whenever coherence permissions are
7357039Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the
7367039Snate@binkert.org// given block
7377039Snate@binkert.orgvoid
7387039Snate@binkert.orgSequencer::checkCoherence(const Address& addr)
7397039Snate@binkert.org{
7406145Snate@binkert.org#ifdef CHECK_COHERENCE
7417039Snate@binkert.org    g_system_ptr->checkGlobalCoherenceInvariant(addr);
7426145Snate@binkert.org#endif
7436145Snate@binkert.org}
7448717Snilay@cs.wisc.edu
7458717Snilay@cs.wisc.eduvoid
7469104Shestness@cs.utexas.eduSequencer::recordRequestType(SequencerRequestType requestType) {
7479104Shestness@cs.utexas.edu    DPRINTF(RubyStats, "Recorded statistic: %s\n",
7489104Shestness@cs.utexas.edu            SequencerRequestType_to_string(requestType));
7499104Shestness@cs.utexas.edu}
7509104Shestness@cs.utexas.edu
7519104Shestness@cs.utexas.edu
7529104Shestness@cs.utexas.eduvoid
7538717Snilay@cs.wisc.eduSequencer::evictionCallback(const Address& address)
7548717Snilay@cs.wisc.edu{
7558717Snilay@cs.wisc.edu    ruby_eviction_callback(address);
7568717Snilay@cs.wisc.edu}
75710012Snilay@cs.wisc.edu
75810012Snilay@cs.wisc.eduvoid
75910012Snilay@cs.wisc.eduSequencer::regStats()
76010012Snilay@cs.wisc.edu{
76110012Snilay@cs.wisc.edu    m_store_waiting_on_load
76210012Snilay@cs.wisc.edu        .name(name() + ".store_waiting_on_load")
76310012Snilay@cs.wisc.edu        .desc("Number of times a store aliased with a pending load")
76410012Snilay@cs.wisc.edu        .flags(Stats::nozero);
76510012Snilay@cs.wisc.edu    m_store_waiting_on_store
76610012Snilay@cs.wisc.edu        .name(name() + ".store_waiting_on_store")
76710012Snilay@cs.wisc.edu        .desc("Number of times a store aliased with a pending store")
76810012Snilay@cs.wisc.edu        .flags(Stats::nozero);
76910012Snilay@cs.wisc.edu    m_load_waiting_on_load
77010012Snilay@cs.wisc.edu        .name(name() + ".load_waiting_on_load")
77110012Snilay@cs.wisc.edu        .desc("Number of times a load aliased with a pending load")
77210012Snilay@cs.wisc.edu        .flags(Stats::nozero);
77310012Snilay@cs.wisc.edu    m_load_waiting_on_store
77410012Snilay@cs.wisc.edu        .name(name() + ".load_waiting_on_store")
77510012Snilay@cs.wisc.edu        .desc("Number of times a load aliased with a pending store")
77610012Snilay@cs.wisc.edu        .flags(Stats::nozero);
77710012Snilay@cs.wisc.edu
77810012Snilay@cs.wisc.edu    // These statistical variables are not for display.
77910012Snilay@cs.wisc.edu    // The profiler will collate these across different
78010012Snilay@cs.wisc.edu    // sequencers and display those collated statistics.
78110012Snilay@cs.wisc.edu    m_outstandReqHist.init(10);
78210012Snilay@cs.wisc.edu    m_latencyHist.init(10);
78310012Snilay@cs.wisc.edu    m_hitLatencyHist.init(10);
78410012Snilay@cs.wisc.edu    m_missLatencyHist.init(10);
78510012Snilay@cs.wisc.edu
78610012Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
78710012Snilay@cs.wisc.edu        m_typeLatencyHist.push_back(new Stats::Histogram());
78810012Snilay@cs.wisc.edu        m_typeLatencyHist[i]->init(10);
78910012Snilay@cs.wisc.edu
79010012Snilay@cs.wisc.edu        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
79110012Snilay@cs.wisc.edu        m_hitTypeLatencyHist[i]->init(10);
79210012Snilay@cs.wisc.edu
79310012Snilay@cs.wisc.edu        m_missTypeLatencyHist.push_back(new Stats::Histogram());
79410012Snilay@cs.wisc.edu        m_missTypeLatencyHist[i]->init(10);
79510012Snilay@cs.wisc.edu    }
79610012Snilay@cs.wisc.edu
79710012Snilay@cs.wisc.edu    for (int i = 0; i < MachineType_NUM; i++) {
79810012Snilay@cs.wisc.edu        m_hitMachLatencyHist.push_back(new Stats::Histogram());
79910012Snilay@cs.wisc.edu        m_hitMachLatencyHist[i]->init(10);
80010012Snilay@cs.wisc.edu
80110012Snilay@cs.wisc.edu        m_missMachLatencyHist.push_back(new Stats::Histogram());
80210012Snilay@cs.wisc.edu        m_missMachLatencyHist[i]->init(10);
80310012Snilay@cs.wisc.edu
80410012Snilay@cs.wisc.edu        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
80510012Snilay@cs.wisc.edu        m_IssueToInitialDelayHist[i]->init(10);
80610012Snilay@cs.wisc.edu
80710012Snilay@cs.wisc.edu        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
80810012Snilay@cs.wisc.edu        m_InitialToForwardDelayHist[i]->init(10);
80910012Snilay@cs.wisc.edu
81010012Snilay@cs.wisc.edu        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
81110012Snilay@cs.wisc.edu        m_ForwardToFirstResponseDelayHist[i]->init(10);
81210012Snilay@cs.wisc.edu
81310012Snilay@cs.wisc.edu        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
81410012Snilay@cs.wisc.edu        m_FirstResponseToCompletionDelayHist[i]->init(10);
81510012Snilay@cs.wisc.edu    }
81610012Snilay@cs.wisc.edu
81710012Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
81810012Snilay@cs.wisc.edu        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
81910012Snilay@cs.wisc.edu        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
82010012Snilay@cs.wisc.edu
82110012Snilay@cs.wisc.edu        for (int j = 0; j < MachineType_NUM; j++) {
82210012Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
82310012Snilay@cs.wisc.edu            m_hitTypeMachLatencyHist[i][j]->init(10);
82410012Snilay@cs.wisc.edu
82510012Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
82610012Snilay@cs.wisc.edu            m_missTypeMachLatencyHist[i][j]->init(10);
82710012Snilay@cs.wisc.edu        }
82810012Snilay@cs.wisc.edu    }
82910012Snilay@cs.wisc.edu}
830