Sequencer.cc revision 11019:fc1e41e88fd3
16657Snate@binkert.org/*
26657Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
36657Snate@binkert.org * All rights reserved.
46657Snate@binkert.org *
56657Snate@binkert.org * Redistribution and use in source and binary forms, with or without
66657Snate@binkert.org * modification, are permitted provided that the following conditions are
76657Snate@binkert.org * met: redistributions of source code must retain the above copyright
86657Snate@binkert.org * notice, this list of conditions and the following disclaimer;
96657Snate@binkert.org * redistributions in binary form must reproduce the above copyright
106657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
116657Snate@binkert.org * documentation and/or other materials provided with the distribution;
126657Snate@binkert.org * neither the name of the copyright holders nor the names of its
136657Snate@binkert.org * contributors may be used to endorse or promote products derived from
146657Snate@binkert.org * this software without specific prior written permission.
156657Snate@binkert.org *
166657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196657Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216657Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226657Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236657Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246657Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256657Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266657Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276657Snate@binkert.org */
286999Snate@binkert.org
296657Snate@binkert.org#include "arch/x86/ldstflags.hh"
306657Snate@binkert.org#include "base/misc.hh"
316657Snate@binkert.org#include "base/str.hh"
326657Snate@binkert.org#include "cpu/testers/rubytest/RubyTester.hh"
336657Snate@binkert.org#include "debug/MemoryAccess.hh"
346657Snate@binkert.org#include "debug/ProtocolTrace.hh"
356657Snate@binkert.org#include "debug/RubySequencer.hh"
366657Snate@binkert.org#include "debug/RubyStats.hh"
376657Snate@binkert.org#include "mem/protocol/PrefetchBit.hh"
386657Snate@binkert.org#include "mem/protocol/RubyAccessMode.hh"
396657Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh"
406657Snate@binkert.org#include "mem/ruby/slicc_interface/RubyRequest.hh"
416657Snate@binkert.org#include "mem/ruby/system/Sequencer.hh"
426657Snate@binkert.org#include "mem/ruby/system/System.hh"
436657Snate@binkert.org#include "mem/packet.hh"
446657Snate@binkert.org#include "sim/system.hh"
456657Snate@binkert.org
466657Snate@binkert.orgusing namespace std;
476657Snate@binkert.org
486657Snate@binkert.orgSequencer *
496657Snate@binkert.orgRubySequencerParams::create()
506657Snate@binkert.org{
516657Snate@binkert.org    return new Sequencer(this);
526657Snate@binkert.org}
536657Snate@binkert.org
546882SBrad.Beckmann@amd.comSequencer::Sequencer(const Params *p)
556657Snate@binkert.org    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
566657Snate@binkert.org{
576657Snate@binkert.org    m_outstanding_count = 0;
586657Snate@binkert.org
596657Snate@binkert.org    m_instCache_ptr = p->icache;
606657Snate@binkert.org    m_dataCache_ptr = p->dcache;
616657Snate@binkert.org    m_data_cache_hit_latency = p->dcache_hit_latency;
626657Snate@binkert.org    m_inst_cache_hit_latency = p->icache_hit_latency;
636657Snate@binkert.org    m_max_outstanding_requests = p->max_outstanding_requests;
646657Snate@binkert.org    m_deadlock_threshold = p->deadlock_threshold;
656657Snate@binkert.org
666657Snate@binkert.org    assert(m_max_outstanding_requests > 0);
676657Snate@binkert.org    assert(m_deadlock_threshold > 0);
686657Snate@binkert.org    assert(m_instCache_ptr != NULL);
696657Snate@binkert.org    assert(m_dataCache_ptr != NULL);
706657Snate@binkert.org    assert(m_data_cache_hit_latency > 0);
716657Snate@binkert.org    assert(m_inst_cache_hit_latency > 0);
726657Snate@binkert.org
736657Snate@binkert.org    m_usingNetworkTester = p->using_network_tester;
746657Snate@binkert.org}
756657Snate@binkert.org
766657Snate@binkert.orgSequencer::~Sequencer()
776657Snate@binkert.org{
786657Snate@binkert.org}
796657Snate@binkert.org
806657Snate@binkert.orgvoid
816657Snate@binkert.orgSequencer::wakeup()
826657Snate@binkert.org{
836657Snate@binkert.org    assert(drainState() != DrainState::Draining);
846657Snate@binkert.org
856657Snate@binkert.org    // Check for deadlock of any of the requests
866657Snate@binkert.org    Cycles current_time = curCycle();
876657Snate@binkert.org
886657Snate@binkert.org    // Check across all outstanding requests
896657Snate@binkert.org    int total_outstanding = 0;
906657Snate@binkert.org
916657Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
926657Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
936657Snate@binkert.org    for (; read != read_end; ++read) {
946657Snate@binkert.org        SequencerRequest* request = read->second;
956657Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
966657Snate@binkert.org            continue;
976657Snate@binkert.org
986657Snate@binkert.org        panic("Possible Deadlock detected. Aborting!\n"
996657Snate@binkert.org             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
1006657Snate@binkert.org             "current time: %u issue_time: %d difference: %d\n", m_version,
1016657Snate@binkert.org             Address(request->pkt->getAddr()), m_readRequestTable.size(),
1026657Snate@binkert.org              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1038086SBrad.Beckmann@amd.com              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1048086SBrad.Beckmann@amd.com    }
1058086SBrad.Beckmann@amd.com
1066657Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1076657Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1086657Snate@binkert.org    for (; write != write_end; ++write) {
1096657Snate@binkert.org        SequencerRequest* request = write->second;
1109298Snilay@cs.wisc.edu        if (current_time - request->issue_time < m_deadlock_threshold)
1116657Snate@binkert.org            continue;
1126657Snate@binkert.org
1136657Snate@binkert.org        panic("Possible Deadlock detected. Aborting!\n"
1146657Snate@binkert.org             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1156657Snate@binkert.org             "current time: %u issue_time: %d difference: %d\n", m_version,
1166657Snate@binkert.org             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
1176657Snate@binkert.org              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1186657Snate@binkert.org              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1196657Snate@binkert.org    }
1206657Snate@binkert.org
1216657Snate@binkert.org    total_outstanding += m_writeRequestTable.size();
1226657Snate@binkert.org    total_outstanding += m_readRequestTable.size();
1236657Snate@binkert.org
1246657Snate@binkert.org    assert(m_outstanding_count == total_outstanding);
1256657Snate@binkert.org
1266657Snate@binkert.org    if (m_outstanding_count > 0) {
1276657Snate@binkert.org        // If there are still outstanding requests, keep checking
1286657Snate@binkert.org        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1296657Snate@binkert.org    }
1306657Snate@binkert.org}
1316657Snate@binkert.org
1326657Snate@binkert.orgvoid Sequencer::resetStats()
1336657Snate@binkert.org{
1346657Snate@binkert.org    m_latencyHist.reset();
1356657Snate@binkert.org    m_hitLatencyHist.reset();
1366657Snate@binkert.org    m_missLatencyHist.reset();
1376657Snate@binkert.org    for (int i = 0; i < RubyRequestType_NUM; i++) {
1386657Snate@binkert.org        m_typeLatencyHist[i]->reset();
1396657Snate@binkert.org        m_hitTypeLatencyHist[i]->reset();
1406657Snate@binkert.org        m_missTypeLatencyHist[i]->reset();
1416657Snate@binkert.org        for (int j = 0; j < MachineType_NUM; j++) {
1426657Snate@binkert.org            m_hitTypeMachLatencyHist[i][j]->reset();
1436657Snate@binkert.org            m_missTypeMachLatencyHist[i][j]->reset();
1446657Snate@binkert.org        }
1456657Snate@binkert.org    }
1466657Snate@binkert.org
1479298Snilay@cs.wisc.edu    for (int i = 0; i < MachineType_NUM; i++) {
1486657Snate@binkert.org        m_missMachLatencyHist[i]->reset();
1496657Snate@binkert.org        m_hitMachLatencyHist[i]->reset();
1506657Snate@binkert.org
1516657Snate@binkert.org        m_IssueToInitialDelayHist[i]->reset();
1526657Snate@binkert.org        m_InitialToForwardDelayHist[i]->reset();
1536657Snate@binkert.org        m_ForwardToFirstResponseDelayHist[i]->reset();
1546657Snate@binkert.org        m_FirstResponseToCompletionDelayHist[i]->reset();
1556657Snate@binkert.org
1566657Snate@binkert.org        m_IncompleteTimes[i] = 0;
1576657Snate@binkert.org    }
1586657Snate@binkert.org}
1596657Snate@binkert.org
1606657Snate@binkert.orgvoid
1616657Snate@binkert.orgSequencer::printProgress(ostream& out) const
1626882SBrad.Beckmann@amd.com{
1636882SBrad.Beckmann@amd.com#if 0
1646882SBrad.Beckmann@amd.com    int total_demand = 0;
1658086SBrad.Beckmann@amd.com    out << "Sequencer Stats Version " << m_version << endl;
1668086SBrad.Beckmann@amd.com    out << "Current time = " << m_ruby_system->getTime() << endl;
1678086SBrad.Beckmann@amd.com    out << "---------------" << endl;
1689298Snilay@cs.wisc.edu    out << "outstanding requests" << endl;
1696657Snate@binkert.org
1706657Snate@binkert.org    out << "proc " << m_Read
1716657Snate@binkert.org        << " version Requests = " << m_readRequestTable.size() << endl;
1726657Snate@binkert.org
1736657Snate@binkert.org    // print the request table
1746657Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
1756657Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
1769298Snilay@cs.wisc.edu    for (; read != read_end; ++read) {
1779298Snilay@cs.wisc.edu        SequencerRequest* request = read->second;
1789298Snilay@cs.wisc.edu        out << "\tRequest[ " << i << " ] = " << request->type
1799298Snilay@cs.wisc.edu            << " Address " << rkeys[i]
1809298Snilay@cs.wisc.edu            << " Posted " << request->issue_time
1819298Snilay@cs.wisc.edu            << " PF " << PrefetchBit_No << endl;
1829298Snilay@cs.wisc.edu        total_demand++;
1839298Snilay@cs.wisc.edu    }
1849298Snilay@cs.wisc.edu
1859298Snilay@cs.wisc.edu    out << "proc " << m_version
1869298Snilay@cs.wisc.edu        << " Write Requests = " << m_writeRequestTable.size << endl;
1879298Snilay@cs.wisc.edu
1886657Snate@binkert.org    // print the request table
1896657Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1906657Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1916657Snate@binkert.org    for (; write != write_end; ++write) {
1926657Snate@binkert.org        SequencerRequest* request = write->second;
1936657Snate@binkert.org        out << "\tRequest[ " << i << " ] = " << request.getType()
1946657Snate@binkert.org            << " Address " << wkeys[i]
1956657Snate@binkert.org            << " Posted " << request.getTime()
1966657Snate@binkert.org            << " PF " << request.getPrefetch() << endl;
1976657Snate@binkert.org        if (request.getPrefetch() == PrefetchBit_No) {
1986657Snate@binkert.org            total_demand++;
1999219Spower.jg@gmail.com        }
2006657Snate@binkert.org    }
2016657Snate@binkert.org
2026657Snate@binkert.org    out << endl;
2036657Snate@binkert.org
2046657Snate@binkert.org    out << "Total Number Outstanding: " << m_outstanding_count << endl
2056657Snate@binkert.org        << "Total Number Demand     : " << total_demand << endl
2066657Snate@binkert.org        << "Total Number Prefetches : " << m_outstanding_count - total_demand
2076657Snate@binkert.org        << endl << endl << endl;
2086657Snate@binkert.org#endif
2096657Snate@binkert.org}
2106657Snate@binkert.org
2116657Snate@binkert.org// Insert the request on the correct request table.  Return true if
2126999Snate@binkert.org// the entry was already present.
2136657Snate@binkert.orgRequestStatus
2146657Snate@binkert.orgSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
2156657Snate@binkert.org{
2166657Snate@binkert.org    assert(m_outstanding_count ==
2176657Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2186657Snate@binkert.org
2196657Snate@binkert.org    // See if we should schedule a deadlock check
2207007Snate@binkert.org    if (!deadlockCheckEvent.scheduled() &&
2217007Snate@binkert.org        drainState() != DrainState::Draining) {
2226657Snate@binkert.org        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
2237002Snate@binkert.org    }
2247002Snate@binkert.org
2256657Snate@binkert.org    Address line_addr(pkt->getAddr());
2266657Snate@binkert.org    line_addr.makeLineAddress();
2276657Snate@binkert.org    // Create a default entry, mapping the address to NULL, the cast is
2286657Snate@binkert.org    // there to make gcc 4.4 happy
2296657Snate@binkert.org    RequestTable::value_type default_entry(line_addr,
2306657Snate@binkert.org                                           (SequencerRequest*) NULL);
2316657Snate@binkert.org
2326657Snate@binkert.org    if ((request_type == RubyRequestType_ST) ||
2336657Snate@binkert.org        (request_type == RubyRequestType_RMW_Read) ||
2346657Snate@binkert.org        (request_type == RubyRequestType_RMW_Write) ||
2356657Snate@binkert.org        (request_type == RubyRequestType_Load_Linked) ||
2366657Snate@binkert.org        (request_type == RubyRequestType_Store_Conditional) ||
2376657Snate@binkert.org        (request_type == RubyRequestType_Locked_RMW_Read) ||
2387007Snate@binkert.org        (request_type == RubyRequestType_Locked_RMW_Write) ||
2397007Snate@binkert.org        (request_type == RubyRequestType_FLUSH)) {
2406657Snate@binkert.org
2416657Snate@binkert.org        // Check if there is any outstanding read request for the same
2427007Snate@binkert.org        // cache line.
2436657Snate@binkert.org        if (m_readRequestTable.count(line_addr) > 0) {
2446657Snate@binkert.org            m_store_waiting_on_load++;
2456657Snate@binkert.org            return RequestStatus_Aliased;
2466657Snate@binkert.org        }
2476657Snate@binkert.org
2486657Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2496657Snate@binkert.org            m_writeRequestTable.insert(default_entry);
2506657Snate@binkert.org        if (r.second) {
2516657Snate@binkert.org            RequestTable::iterator i = r.first;
2526657Snate@binkert.org            i->second = new SequencerRequest(pkt, request_type, curCycle());
2536657Snate@binkert.org            m_outstanding_count++;
2546657Snate@binkert.org        } else {
2556657Snate@binkert.org          // There is an outstanding write request for the cache line
2566657Snate@binkert.org          m_store_waiting_on_store++;
2576657Snate@binkert.org          return RequestStatus_Aliased;
2586657Snate@binkert.org        }
2596657Snate@binkert.org    } else {
2606657Snate@binkert.org        // Check if there is any outstanding write request for the same
2616657Snate@binkert.org        // cache line.
2627453Snate@binkert.org        if (m_writeRequestTable.count(line_addr) > 0) {
2637453Snate@binkert.org            m_load_waiting_on_store++;
2647453Snate@binkert.org            return RequestStatus_Aliased;
2657453Snate@binkert.org        }
2667453Snate@binkert.org
2677453Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2687453Snate@binkert.org            m_readRequestTable.insert(default_entry);
2697453Snate@binkert.org
2707453Snate@binkert.org        if (r.second) {
2717453Snate@binkert.org            RequestTable::iterator i = r.first;
2727453Snate@binkert.org            i->second = new SequencerRequest(pkt, request_type, curCycle());
2737453Snate@binkert.org            m_outstanding_count++;
2747453Snate@binkert.org        } else {
2757453Snate@binkert.org            // There is an outstanding read request for the cache line
2767453Snate@binkert.org            m_load_waiting_on_load++;
2777453Snate@binkert.org            return RequestStatus_Aliased;
2787453Snate@binkert.org        }
2796657Snate@binkert.org    }
2806657Snate@binkert.org
2816657Snate@binkert.org    m_outstandReqHist.sample(m_outstanding_count);
2826657Snate@binkert.org    assert(m_outstanding_count ==
2836657Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2846657Snate@binkert.org
2856657Snate@binkert.org    return RequestStatus_Ready;
2866657Snate@binkert.org}
2876657Snate@binkert.org
2886657Snate@binkert.orgvoid
2896657Snate@binkert.orgSequencer::markRemoved()
2906657Snate@binkert.org{
2916657Snate@binkert.org    m_outstanding_count--;
2926657Snate@binkert.org    assert(m_outstanding_count ==
2936657Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
2946657Snate@binkert.org}
2956657Snate@binkert.org
2966657Snate@binkert.orgvoid
2976657Snate@binkert.orgSequencer::removeRequest(SequencerRequest* srequest)
2986657Snate@binkert.org{
2996657Snate@binkert.org    assert(m_outstanding_count ==
3006657Snate@binkert.org           m_writeRequestTable.size() + m_readRequestTable.size());
3017453Snate@binkert.org
3027453Snate@binkert.org    Address line_addr(srequest->pkt->getAddr());
3037453Snate@binkert.org    line_addr.makeLineAddress();
3047007Snate@binkert.org    if ((srequest->m_type == RubyRequestType_ST) ||
3057007Snate@binkert.org        (srequest->m_type == RubyRequestType_RMW_Read) ||
3066657Snate@binkert.org        (srequest->m_type == RubyRequestType_RMW_Write) ||
3076657Snate@binkert.org        (srequest->m_type == RubyRequestType_Load_Linked) ||
3086657Snate@binkert.org        (srequest->m_type == RubyRequestType_Store_Conditional) ||
3097453Snate@binkert.org        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
3107007Snate@binkert.org        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
3117007Snate@binkert.org        m_writeRequestTable.erase(line_addr);
3127453Snate@binkert.org    } else {
3137007Snate@binkert.org        m_readRequestTable.erase(line_addr);
3146657Snate@binkert.org    }
3156657Snate@binkert.org
3166657Snate@binkert.org    markRemoved();
3176657Snate@binkert.org}
3186657Snate@binkert.org
3196657Snate@binkert.orgvoid
3206657Snate@binkert.orgSequencer::invalidateSC(const Address& address)
3216657Snate@binkert.org{
3226657Snate@binkert.org    RequestTable::iterator i = m_writeRequestTable.find(address);
3236657Snate@binkert.org    if (i != m_writeRequestTable.end()) {
3247007Snate@binkert.org        SequencerRequest* request = i->second;
3257007Snate@binkert.org        // The controller has lost the coherence permissions, hence the lock
3267007Snate@binkert.org        // on the cache line maintained by the cache should be cleared.
3277007Snate@binkert.org        if (request->m_type == RubyRequestType_Store_Conditional) {
3287007Snate@binkert.org            m_dataCache_ptr->clearLocked(address);
3296657Snate@binkert.org        }
3306657Snate@binkert.org    }
3316657Snate@binkert.org}
3326657Snate@binkert.org
3336657Snate@binkert.orgbool
3346657Snate@binkert.orgSequencer::handleLlsc(const Address& address, SequencerRequest* request)
3356657Snate@binkert.org{
3366657Snate@binkert.org    //
3376657Snate@binkert.org    // The success flag indicates whether the LLSC operation was successful.
3387007Snate@binkert.org    // LL ops will always succeed, but SC may fail if the cache line is no
3397007Snate@binkert.org    // longer locked.
3407007Snate@binkert.org    //
3417007Snate@binkert.org    bool success = true;
3427007Snate@binkert.org    if (request->m_type == RubyRequestType_Store_Conditional) {
3436657Snate@binkert.org        if (!m_dataCache_ptr->isLocked(address, m_version)) {
3446657Snate@binkert.org            //
3456657Snate@binkert.org            // For failed SC requests, indicate the failure to the cpu by
3466657Snate@binkert.org            // setting the extra data to zero.
3476657Snate@binkert.org            //
3486657Snate@binkert.org            request->pkt->req->setExtraData(0);
3496657Snate@binkert.org            success = false;
3507007Snate@binkert.org        } else {
3517007Snate@binkert.org            //
3527007Snate@binkert.org            // For successful SC requests, indicate the success to the cpu by
3537007Snate@binkert.org            // setting the extra data to one.
3547007Snate@binkert.org            //
3556657Snate@binkert.org            request->pkt->req->setExtraData(1);
3566657Snate@binkert.org        }
3577002Snate@binkert.org        //
3586657Snate@binkert.org        // Independent of success, all SC operations must clear the lock
3596657Snate@binkert.org        //
3606657Snate@binkert.org        m_dataCache_ptr->clearLocked(address);
3616657Snate@binkert.org    } else if (request->m_type == RubyRequestType_Load_Linked) {
3626657Snate@binkert.org        //
3636657Snate@binkert.org        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
3646657Snate@binkert.org        // previously locked cache lines?
3656657Snate@binkert.org        //
3666657Snate@binkert.org        m_dataCache_ptr->setLocked(address, m_version);
3676657Snate@binkert.org    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
3686657Snate@binkert.org               (m_dataCache_ptr->isLocked(address, m_version))) {
3696657Snate@binkert.org        //
3706657Snate@binkert.org        // Normal writes should clear the locked address
3716657Snate@binkert.org        //
3726657Snate@binkert.org        m_dataCache_ptr->clearLocked(address);
3736657Snate@binkert.org    }
3746657Snate@binkert.org    return success;
3756657Snate@binkert.org}
3766657Snate@binkert.org
3776657Snate@binkert.orgvoid
3786657Snate@binkert.orgSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
3797007Snate@binkert.org                             const MachineType respondingMach,
3806657Snate@binkert.org                             bool isExternalHit, Cycles issuedTime,
3817007Snate@binkert.org                             Cycles initialRequestTime,
3826657Snate@binkert.org                             Cycles forwardRequestTime,
3839298Snilay@cs.wisc.edu                             Cycles firstResponseTime, Cycles completionTime)
3849298Snilay@cs.wisc.edu{
3859298Snilay@cs.wisc.edu    m_latencyHist.sample(cycles);
3869298Snilay@cs.wisc.edu    m_typeLatencyHist[type]->sample(cycles);
3879298Snilay@cs.wisc.edu
3889298Snilay@cs.wisc.edu    if (isExternalHit) {
3896657Snate@binkert.org        m_missLatencyHist.sample(cycles);
3906657Snate@binkert.org        m_missTypeLatencyHist[type]->sample(cycles);
3916657Snate@binkert.org
3926657Snate@binkert.org        if (respondingMach != MachineType_NUM) {
3937055Snate@binkert.org            m_missMachLatencyHist[respondingMach]->sample(cycles);
3947007Snate@binkert.org            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3956657Snate@binkert.org
3966657Snate@binkert.org            if ((issuedTime <= initialRequestTime) &&
3977002Snate@binkert.org                (initialRequestTime <= forwardRequestTime) &&
3986657Snate@binkert.org                (forwardRequestTime <= firstResponseTime) &&
3996657Snate@binkert.org                (firstResponseTime <= completionTime)) {
4006657Snate@binkert.org
4017007Snate@binkert.org                m_IssueToInitialDelayHist[respondingMach]->sample(
4026657Snate@binkert.org                    initialRequestTime - issuedTime);
4036657Snate@binkert.org                m_InitialToForwardDelayHist[respondingMach]->sample(
4046657Snate@binkert.org                    forwardRequestTime - initialRequestTime);
4056657Snate@binkert.org                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
4066657Snate@binkert.org                    firstResponseTime - forwardRequestTime);
4076999Snate@binkert.org                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
4086657Snate@binkert.org                    completionTime - firstResponseTime);
4096657Snate@binkert.org            } else {
4106657Snate@binkert.org                m_IncompleteTimes[respondingMach]++;
4116657Snate@binkert.org            }
4126657Snate@binkert.org        }
4136657Snate@binkert.org    } else {
4146657Snate@binkert.org        m_hitLatencyHist.sample(cycles);
4157002Snate@binkert.org        m_hitTypeLatencyHist[type]->sample(cycles);
4167002Snate@binkert.org
4176657Snate@binkert.org        if (respondingMach != MachineType_NUM) {
4187002Snate@binkert.org            m_hitMachLatencyHist[respondingMach]->sample(cycles);
4197002Snate@binkert.org            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
4206657Snate@binkert.org        }
4216657Snate@binkert.org    }
4226657Snate@binkert.org}
4236657Snate@binkert.org
4247007Snate@binkert.orgvoid
4257007Snate@binkert.orgSequencer::writeCallback(const Address& address, DataBlock& data,
4266657Snate@binkert.org                         const bool externalHit, const MachineType mach,
4276657Snate@binkert.org                         const Cycles initialRequestTime,
4286657Snate@binkert.org                         const Cycles forwardRequestTime,
4296657Snate@binkert.org                         const Cycles firstResponseTime)
4306657Snate@binkert.org{
4316657Snate@binkert.org    assert(address == line_address(address));
4326657Snate@binkert.org    assert(m_writeRequestTable.count(line_address(address)));
4336657Snate@binkert.org
4346657Snate@binkert.org    RequestTable::iterator i = m_writeRequestTable.find(address);
4356657Snate@binkert.org    assert(i != m_writeRequestTable.end());
4369206Snilay@cs.wisc.edu    SequencerRequest* request = i->second;
4376657Snate@binkert.org
4386657Snate@binkert.org    m_writeRequestTable.erase(i);
4396657Snate@binkert.org    markRemoved();
4406657Snate@binkert.org
4416657Snate@binkert.org    assert((request->m_type == RubyRequestType_ST) ||
4426657Snate@binkert.org           (request->m_type == RubyRequestType_ATOMIC) ||
4436657Snate@binkert.org           (request->m_type == RubyRequestType_RMW_Read) ||
4449298Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_RMW_Write) ||
4459298Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Load_Linked) ||
4469298Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Store_Conditional) ||
4479298Snilay@cs.wisc.edu           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
4486657Snate@binkert.org           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
4496657Snate@binkert.org           (request->m_type == RubyRequestType_FLUSH));
4506657Snate@binkert.org
4516999Snate@binkert.org    //
4526657Snate@binkert.org    // For Alpha, properly handle LL, SC, and write requests with respect to
4536657Snate@binkert.org    // locked cache blocks.
4546657Snate@binkert.org    //
4556657Snate@binkert.org    // Not valid for Network_test protocl
4566657Snate@binkert.org    //
4577007Snate@binkert.org    bool success = true;
4587007Snate@binkert.org    if(!m_usingNetworkTester)
4597007Snate@binkert.org        success = handleLlsc(address, request);
4606657Snate@binkert.org
4617002Snate@binkert.org    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
4627002Snate@binkert.org        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
4637002Snate@binkert.org    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
4648086SBrad.Beckmann@amd.com        m_controller->unblock(address);
4658086SBrad.Beckmann@amd.com    }
4668086SBrad.Beckmann@amd.com
4678086SBrad.Beckmann@amd.com    hitCallback(request, data, success, mach, externalHit,
4688602Snilay@cs.wisc.edu                initialRequestTime, forwardRequestTime, firstResponseTime);
4698602Snilay@cs.wisc.edu}
4708602Snilay@cs.wisc.edu
4718602Snilay@cs.wisc.eduvoid
4728602Snilay@cs.wisc.eduSequencer::readCallback(const Address& address, DataBlock& data,
4738602Snilay@cs.wisc.edu                        bool externalHit, const MachineType mach,
4748086SBrad.Beckmann@amd.com                        Cycles initialRequestTime,
4756657Snate@binkert.org                        Cycles forwardRequestTime,
4767007Snate@binkert.org                        Cycles firstResponseTime)
4776657Snate@binkert.org{
4786657Snate@binkert.org    assert(address == line_address(address));
4796657Snate@binkert.org    assert(m_readRequestTable.count(line_address(address)));
4806657Snate@binkert.org
4816657Snate@binkert.org    RequestTable::iterator i = m_readRequestTable.find(address);
4826657Snate@binkert.org    assert(i != m_readRequestTable.end());
4836657Snate@binkert.org    SequencerRequest* request = i->second;
4846657Snate@binkert.org
4856657Snate@binkert.org    m_readRequestTable.erase(i);
4866657Snate@binkert.org    markRemoved();
4876657Snate@binkert.org
4886862Sdrh5@cs.wisc.edu    assert((request->m_type == RubyRequestType_LD) ||
4896862Sdrh5@cs.wisc.edu           (request->m_type == RubyRequestType_IFETCH));
4906862Sdrh5@cs.wisc.edu
4916862Sdrh5@cs.wisc.edu    hitCallback(request, data, true, mach, externalHit,
4926657Snate@binkert.org                initialRequestTime, forwardRequestTime, firstResponseTime);
4936657Snate@binkert.org}
4946657Snate@binkert.org
4956657Snate@binkert.orgvoid
4966657Snate@binkert.orgSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
4977007Snate@binkert.org                       bool llscSuccess,
4987007Snate@binkert.org                       const MachineType mach, const bool externalHit,
4997002Snate@binkert.org                       const Cycles initialRequestTime,
5007007Snate@binkert.org                       const Cycles forwardRequestTime,
5017007Snate@binkert.org                       const Cycles firstResponseTime)
5027002Snate@binkert.org{
5037007Snate@binkert.org    PacketPtr pkt = srequest->pkt;
5047007Snate@binkert.org    Address request_address(pkt->getAddr());
5056657Snate@binkert.org    Address request_line_address(pkt->getAddr());
5066657Snate@binkert.org    request_line_address.makeLineAddress();
5076657Snate@binkert.org    RubyRequestType type = srequest->m_type;
5086657Snate@binkert.org    Cycles issued_time = srequest->issue_time;
5096657Snate@binkert.org
5106657Snate@binkert.org    // Set this cache entry to the most recently used
5116657Snate@binkert.org    if (type == RubyRequestType_IFETCH) {
5126657Snate@binkert.org        m_instCache_ptr->setMRU(request_line_address);
5136657Snate@binkert.org    } else {
5146657Snate@binkert.org        m_dataCache_ptr->setMRU(request_line_address);
5156657Snate@binkert.org    }
5166657Snate@binkert.org
5176657Snate@binkert.org    assert(curCycle() >= issued_time);
5188602Snilay@cs.wisc.edu    Cycles total_latency = curCycle() - issued_time;
5198602Snilay@cs.wisc.edu
5208602Snilay@cs.wisc.edu    // Profile the latency for all demand accesses.
5218602Snilay@cs.wisc.edu    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
5228602Snilay@cs.wisc.edu                      initialRequestTime, forwardRequestTime,
5238602Snilay@cs.wisc.edu                      firstResponseTime, curCycle());
5248602Snilay@cs.wisc.edu
5258602Snilay@cs.wisc.edu    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
5268602Snilay@cs.wisc.edu             curTick(), m_version, "Seq",
5278602Snilay@cs.wisc.edu             llscSuccess ? "Done" : "SC_Failed", "", "",
5288602Snilay@cs.wisc.edu             request_address, total_latency);
5298602Snilay@cs.wisc.edu
5308602Snilay@cs.wisc.edu    // update the data unless it is a non-data-carrying flush
5318602Snilay@cs.wisc.edu    if (RubySystem::getWarmupEnabled()) {
5328602Snilay@cs.wisc.edu        data.setData(pkt->getConstPtr<uint8_t>(),
5338602Snilay@cs.wisc.edu                     request_address.getOffset(), pkt->getSize());
5348602Snilay@cs.wisc.edu    } else if (!pkt->isFlush()) {
5358602Snilay@cs.wisc.edu        if ((type == RubyRequestType_LD) ||
5368602Snilay@cs.wisc.edu            (type == RubyRequestType_IFETCH) ||
5378602Snilay@cs.wisc.edu            (type == RubyRequestType_RMW_Read) ||
5388602Snilay@cs.wisc.edu            (type == RubyRequestType_Locked_RMW_Read) ||
5398602Snilay@cs.wisc.edu            (type == RubyRequestType_Load_Linked)) {
5408602Snilay@cs.wisc.edu            memcpy(pkt->getPtr<uint8_t>(),
5416657Snate@binkert.org                   data.getData(request_address.getOffset(), pkt->getSize()),
5428086SBrad.Beckmann@amd.com                   pkt->getSize());
5438086SBrad.Beckmann@amd.com            DPRINTF(RubySequencer, "read data %s\n", data);
5448086SBrad.Beckmann@amd.com        } else {
5458086SBrad.Beckmann@amd.com            data.setData(pkt->getConstPtr<uint8_t>(),
5468086SBrad.Beckmann@amd.com                         request_address.getOffset(), pkt->getSize());
5478086SBrad.Beckmann@amd.com            DPRINTF(RubySequencer, "set data %s\n", data);
5488086SBrad.Beckmann@amd.com        }
5498086SBrad.Beckmann@amd.com    }
5506657Snate@binkert.org
5516657Snate@binkert.org    // If using the RubyTester, update the RubyTester sender state's
5527002Snate@binkert.org    // subBlock with the recieved data.  The tester will later access
5536657Snate@binkert.org    // this state.
5547007Snate@binkert.org    if (m_usingRubyTester) {
5556657Snate@binkert.org        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
5566657Snate@binkert.org                pkt->cmdString(), pkt->getAddr());
5576657Snate@binkert.org        RubyTester::SenderState* testerSenderState =
5586657Snate@binkert.org            pkt->findNextSenderState<RubyTester::SenderState>();
5596657Snate@binkert.org        assert(testerSenderState);
5606999Snate@binkert.org        testerSenderState->subBlock.mergeFrom(data);
5616657Snate@binkert.org    }
5626657Snate@binkert.org
5636657Snate@binkert.org    delete srequest;
5646657Snate@binkert.org
5656657Snate@binkert.org    RubySystem *rs = m_ruby_system;
5666657Snate@binkert.org    if (RubySystem::getWarmupEnabled()) {
5677832Snate@binkert.org        assert(pkt->req);
5687002Snate@binkert.org        delete pkt->req;
5697002Snate@binkert.org        delete pkt;
5707002Snate@binkert.org        rs->m_cache_recorder->enqueueNextFetchRequest();
5717805Snilay@cs.wisc.edu    } else if (RubySystem::getCooldownEnabled()) {
5726657Snate@binkert.org        delete pkt;
5736657Snate@binkert.org        rs->m_cache_recorder->enqueueNextFlushRequest();
5747002Snate@binkert.org    } else {
5757002Snate@binkert.org        ruby_hit_callback(pkt);
5766657Snate@binkert.org    }
5776657Snate@binkert.org}
5788086SBrad.Beckmann@amd.com
5798086SBrad.Beckmann@amd.combool
5808086SBrad.Beckmann@amd.comSequencer::empty() const
5818086SBrad.Beckmann@amd.com{
5828086SBrad.Beckmann@amd.com    return m_writeRequestTable.empty() && m_readRequestTable.empty();
5838086SBrad.Beckmann@amd.com}
5848086SBrad.Beckmann@amd.com
5858086SBrad.Beckmann@amd.comRequestStatus
5868086SBrad.Beckmann@amd.comSequencer::makeRequest(PacketPtr pkt)
5878086SBrad.Beckmann@amd.com{
5888086SBrad.Beckmann@amd.com    if (m_outstanding_count >= m_max_outstanding_requests) {
5898086SBrad.Beckmann@amd.com        return RequestStatus_BufferFull;
5908086SBrad.Beckmann@amd.com    }
5918086SBrad.Beckmann@amd.com
5928086SBrad.Beckmann@amd.com    RubyRequestType primary_type = RubyRequestType_NULL;
5938086SBrad.Beckmann@amd.com    RubyRequestType secondary_type = RubyRequestType_NULL;
5948086SBrad.Beckmann@amd.com
5958086SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
5968086SBrad.Beckmann@amd.com        //
5978086SBrad.Beckmann@amd.com        // Alpha LL/SC instructions need to be handled carefully by the cache
5988086SBrad.Beckmann@amd.com        // coherence protocol to ensure they follow the proper semantics. In
5996657Snate@binkert.org        // particular, by identifying the operations as atomic, the protocol
6006657Snate@binkert.org        // should understand that migratory sharing optimizations should not
6016657Snate@binkert.org        // be performed (i.e. a load between the LL and SC should not steal
6028602Snilay@cs.wisc.edu        // away exclusive permission).
6036657Snate@binkert.org        //
6046657Snate@binkert.org        if (pkt->isWrite()) {
6057007Snate@binkert.org            DPRINTF(RubySequencer, "Issuing SC\n");
6067007Snate@binkert.org            primary_type = RubyRequestType_Store_Conditional;
6077007Snate@binkert.org        } else {
6086657Snate@binkert.org            DPRINTF(RubySequencer, "Issuing LL\n");
6096657Snate@binkert.org            assert(pkt->isRead());
6106657Snate@binkert.org            primary_type = RubyRequestType_Load_Linked;
6116657Snate@binkert.org        }
6126657Snate@binkert.org        secondary_type = RubyRequestType_ATOMIC;
6136657Snate@binkert.org    } else if (pkt->req->isLockedRMW()) {
6147007Snate@binkert.org        //
6157007Snate@binkert.org        // x86 locked instructions are translated to store cache coherence
6167007Snate@binkert.org        // requests because these requests should always be treated as read
6176657Snate@binkert.org        // exclusive operations and should leverage any migratory sharing
6186657Snate@binkert.org        // optimization built into the protocol.
6196657Snate@binkert.org        //
6206657Snate@binkert.org        if (pkt->isWrite()) {
6216657Snate@binkert.org            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
6226657Snate@binkert.org            primary_type = RubyRequestType_Locked_RMW_Write;
6236657Snate@binkert.org        } else {
6246657Snate@binkert.org            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
6256657Snate@binkert.org            assert(pkt->isRead());
6266657Snate@binkert.org            primary_type = RubyRequestType_Locked_RMW_Read;
6276657Snate@binkert.org        }
6286657Snate@binkert.org        secondary_type = RubyRequestType_ST;
6296657Snate@binkert.org    } else {
6306657Snate@binkert.org        if (pkt->isRead()) {
6317805Snilay@cs.wisc.edu            if (pkt->req->isInstFetch()) {
6326657Snate@binkert.org                primary_type = secondary_type = RubyRequestType_IFETCH;
6336657Snate@binkert.org            } else {
6346657Snate@binkert.org                bool storeCheck = false;
6357007Snate@binkert.org                // only X86 need the store check
6367007Snate@binkert.org                if (system->getArch() == Arch::X86ISA) {
6377007Snate@binkert.org                    uint32_t flags = pkt->req->getFlags();
6386657Snate@binkert.org                    storeCheck = flags &
6396657Snate@binkert.org                        (X86ISA::StoreCheck << X86ISA::FlagShift);
6406657Snate@binkert.org                }
6416657Snate@binkert.org                if (storeCheck) {
6427007Snate@binkert.org                    primary_type = RubyRequestType_RMW_Read;
6436657Snate@binkert.org                    secondary_type = RubyRequestType_ST;
6446657Snate@binkert.org                } else {
6456657Snate@binkert.org                    primary_type = secondary_type = RubyRequestType_LD;
6466657Snate@binkert.org                }
6477007Snate@binkert.org            }
6486657Snate@binkert.org        } else if (pkt->isWrite()) {
6496657Snate@binkert.org            //
6506657Snate@binkert.org            // Note: M5 packets do not differentiate ST from RMW_Write
6516657Snate@binkert.org            //
6527805Snilay@cs.wisc.edu            primary_type = secondary_type = RubyRequestType_ST;
6536657Snate@binkert.org        } else if (pkt->isFlush()) {
6546657Snate@binkert.org          primary_type = secondary_type = RubyRequestType_FLUSH;
6556657Snate@binkert.org        } else {
6567007Snate@binkert.org            panic("Unsupported ruby packet type\n");
6577007Snate@binkert.org        }
6587007Snate@binkert.org    }
6597007Snate@binkert.org
6606657Snate@binkert.org    RequestStatus status = insertRequest(pkt, primary_type);
6616657Snate@binkert.org    if (status != RequestStatus_Ready)
6626657Snate@binkert.org        return status;
6636657Snate@binkert.org
6646657Snate@binkert.org    issueRequest(pkt, secondary_type);
6656657Snate@binkert.org
6666657Snate@binkert.org    // TODO: issue hardware prefetches here
6676657Snate@binkert.org    return RequestStatus_Issued;
6686657Snate@binkert.org}
6697007Snate@binkert.org
6707007Snate@binkert.orgvoid
6716657Snate@binkert.orgSequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
6726657Snate@binkert.org{
6736657Snate@binkert.org    assert(pkt != NULL);
6746657Snate@binkert.org    ContextID proc_id = pkt->req->hasContextId() ?
6757007Snate@binkert.org        pkt->req->contextId() : InvalidContextID;
6767007Snate@binkert.org
6776657Snate@binkert.org    // If valid, copy the pc to the ruby request
6786657Snate@binkert.org    Addr pc = 0;
6796657Snate@binkert.org    if (pkt->req->hasPC()) {
6806657Snate@binkert.org        pc = pkt->req->getPC();
6816657Snate@binkert.org    }
6826657Snate@binkert.org
6836657Snate@binkert.org    // check if the packet has data as for example prefetch and flush
6846657Snate@binkert.org    // requests do not
6856657Snate@binkert.org    std::shared_ptr<RubyRequest> msg =
6866657Snate@binkert.org        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
6876657Snate@binkert.org                                      pkt->isFlush() ?
6886657Snate@binkert.org                                      nullptr : pkt->getPtr<uint8_t>(),
6896657Snate@binkert.org                                      pkt->getSize(), pc, secondary_type,
6906657Snate@binkert.org                                      RubyAccessMode_Supervisor, pkt,
6916657Snate@binkert.org                                      PrefetchBit_No, proc_id);
6926657Snate@binkert.org
6936657Snate@binkert.org    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
6947805Snilay@cs.wisc.edu            curTick(), m_version, "Seq", "Begin", "", "",
6956657Snate@binkert.org            msg->getPhysicalAddress(),
6966657Snate@binkert.org            RubyRequestType_to_string(secondary_type));
6976657Snate@binkert.org
6986657Snate@binkert.org    // The Sequencer currently assesses instruction and data cache hit latency
6996657Snate@binkert.org    // for the top-level caches at the beginning of a memory access.
7007007Snate@binkert.org    // TODO: Eventually, this latency should be moved to represent the actual
7016657Snate@binkert.org    // cache access latency portion of the memory access. This will require
7027007Snate@binkert.org    // changing cache controller protocol files to assess the latency on the
7037007Snate@binkert.org    // access response path.
7046657Snate@binkert.org    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
7056657Snate@binkert.org    if (secondary_type == RubyRequestType_IFETCH)
7066657Snate@binkert.org        latency = m_inst_cache_hit_latency;
7076657Snate@binkert.org    else
7086657Snate@binkert.org        latency = m_data_cache_hit_latency;
7096657Snate@binkert.org
7106657Snate@binkert.org    // Send the message to the cache controller
7116657Snate@binkert.org    assert(latency > 0);
7126657Snate@binkert.org
7136657Snate@binkert.org    assert(m_mandatory_q_ptr != NULL);
7146657Snate@binkert.org    m_mandatory_q_ptr->enqueue(msg, latency);
7156657Snate@binkert.org}
7166657Snate@binkert.org
7176657Snate@binkert.orgtemplate <class KEY, class VALUE>
7187805Snilay@cs.wisc.edustd::ostream &
7196657Snate@binkert.orgoperator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
7206657Snate@binkert.org{
7216657Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
7226657Snate@binkert.org    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
7236657Snate@binkert.org
7246657Snate@binkert.org    out << "[";
7256657Snate@binkert.org    for (; i != end; ++i)
7266657Snate@binkert.org        out << " " << i->first << "=" << i->second;
7277007Snate@binkert.org    out << " ]";
7287007Snate@binkert.org
7296657Snate@binkert.org    return out;
7306657Snate@binkert.org}
7316657Snate@binkert.org
7326657Snate@binkert.orgvoid
7336657Snate@binkert.orgSequencer::print(ostream& out) const
7346657Snate@binkert.org{
7356657Snate@binkert.org    out << "[Sequencer: " << m_version
7366657Snate@binkert.org        << ", outstanding requests: " << m_outstanding_count
7376657Snate@binkert.org        << ", read request table: " << m_readRequestTable
7386657Snate@binkert.org        << ", write request table: " << m_writeRequestTable
7396657Snate@binkert.org        << "]";
7406657Snate@binkert.org}
7416657Snate@binkert.org
7426657Snate@binkert.org// this can be called from setState whenever coherence permissions are
7436657Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the
7446657Snate@binkert.org// given block
7457805Snilay@cs.wisc.eduvoid
7466657Snate@binkert.orgSequencer::checkCoherence(const Address& addr)
7476657Snate@binkert.org{
7486657Snate@binkert.org#ifdef CHECK_COHERENCE
7496657Snate@binkert.org    m_ruby_system->checkGlobalCoherenceInvariant(addr);
7506657Snate@binkert.org#endif
7516657Snate@binkert.org}
7526657Snate@binkert.org
7536657Snate@binkert.orgvoid
7547007Snate@binkert.orgSequencer::recordRequestType(SequencerRequestType requestType) {
7557007Snate@binkert.org    DPRINTF(RubyStats, "Recorded statistic: %s\n",
7566657Snate@binkert.org            SequencerRequestType_to_string(requestType));
7576657Snate@binkert.org}
7586657Snate@binkert.org
7596657Snate@binkert.org
7606657Snate@binkert.orgvoid
7616657Snate@binkert.orgSequencer::evictionCallback(const Address& address)
7626657Snate@binkert.org{
7636657Snate@binkert.org    ruby_eviction_callback(address);
7646657Snate@binkert.org}
7656657Snate@binkert.org
7666657Snate@binkert.orgvoid
7676657Snate@binkert.orgSequencer::regStats()
7686657Snate@binkert.org{
7696657Snate@binkert.org    m_store_waiting_on_load
7706657Snate@binkert.org        .name(name() + ".store_waiting_on_load")
7717805Snilay@cs.wisc.edu        .desc("Number of times a store aliased with a pending load")
7726657Snate@binkert.org        .flags(Stats::nozero);
7736657Snate@binkert.org    m_store_waiting_on_store
7746657Snate@binkert.org        .name(name() + ".store_waiting_on_store")
7756657Snate@binkert.org        .desc("Number of times a store aliased with a pending store")
7768602Snilay@cs.wisc.edu        .flags(Stats::nozero);
7778602Snilay@cs.wisc.edu    m_load_waiting_on_load
7788602Snilay@cs.wisc.edu        .name(name() + ".load_waiting_on_load")
7798602Snilay@cs.wisc.edu        .desc("Number of times a load aliased with a pending load")
7808602Snilay@cs.wisc.edu        .flags(Stats::nozero);
7818602Snilay@cs.wisc.edu    m_load_waiting_on_store
7828602Snilay@cs.wisc.edu        .name(name() + ".load_waiting_on_store")
7838602Snilay@cs.wisc.edu        .desc("Number of times a load aliased with a pending store")
7848602Snilay@cs.wisc.edu        .flags(Stats::nozero);
7858602Snilay@cs.wisc.edu
7868602Snilay@cs.wisc.edu    // These statistical variables are not for display.
7878602Snilay@cs.wisc.edu    // The profiler will collate these across different
7888602Snilay@cs.wisc.edu    // sequencers and display those collated statistics.
7898602Snilay@cs.wisc.edu    m_outstandReqHist.init(10);
7908602Snilay@cs.wisc.edu    m_latencyHist.init(10);
7918602Snilay@cs.wisc.edu    m_hitLatencyHist.init(10);
7928602Snilay@cs.wisc.edu    m_missLatencyHist.init(10);
7938602Snilay@cs.wisc.edu
7948602Snilay@cs.wisc.edu    for (int i = 0; i < RubyRequestType_NUM; i++) {
7958602Snilay@cs.wisc.edu        m_typeLatencyHist.push_back(new Stats::Histogram());
7968602Snilay@cs.wisc.edu        m_typeLatencyHist[i]->init(10);
7976657Snate@binkert.org
7986657Snate@binkert.org        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
7996657Snate@binkert.org        m_hitTypeLatencyHist[i]->init(10);
8006657Snate@binkert.org
801        m_missTypeLatencyHist.push_back(new Stats::Histogram());
802        m_missTypeLatencyHist[i]->init(10);
803    }
804
805    for (int i = 0; i < MachineType_NUM; i++) {
806        m_hitMachLatencyHist.push_back(new Stats::Histogram());
807        m_hitMachLatencyHist[i]->init(10);
808
809        m_missMachLatencyHist.push_back(new Stats::Histogram());
810        m_missMachLatencyHist[i]->init(10);
811
812        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
813        m_IssueToInitialDelayHist[i]->init(10);
814
815        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
816        m_InitialToForwardDelayHist[i]->init(10);
817
818        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
819        m_ForwardToFirstResponseDelayHist[i]->init(10);
820
821        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
822        m_FirstResponseToCompletionDelayHist[i]->init(10);
823    }
824
825    for (int i = 0; i < RubyRequestType_NUM; i++) {
826        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
827        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
828
829        for (int j = 0; j < MachineType_NUM; j++) {
830            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
831            m_hitTypeMachLatencyHist[i][j]->init(10);
832
833            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
834            m_missTypeMachLatencyHist[i][j]->init(10);
835        }
836    }
837}
838