Sequencer.cc revision 12051:4cc27e53748d
1955SN/A/*
2955SN/A * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
37816Ssteve.reinhardt@amd.com * All rights reserved.
45871Snate@binkert.org *
51762SN/A * Redistribution and use in source and binary forms, with or without
6955SN/A * modification, are permitted provided that the following conditions are
7955SN/A * met: redistributions of source code must retain the above copyright
8955SN/A * notice, this list of conditions and the following disclaimer;
9955SN/A * redistributions in binary form must reproduce the above copyright
10955SN/A * notice, this list of conditions and the following disclaimer in the
11955SN/A * documentation and/or other materials provided with the distribution;
12955SN/A * neither the name of the copyright holders nor the names of its
13955SN/A * contributors may be used to endorse or promote products derived from
14955SN/A * this software without specific prior written permission.
15955SN/A *
16955SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17955SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18955SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19955SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20955SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21955SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22955SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23955SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24955SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25955SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26955SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27955SN/A */
28955SN/A
29955SN/A#include "mem/ruby/system/Sequencer.hh"
302665Ssaidi@eecs.umich.edu
312665Ssaidi@eecs.umich.edu#include "arch/x86/ldstflags.hh"
325863Snate@binkert.org#include "base/misc.hh"
33955SN/A#include "base/str.hh"
34955SN/A#include "cpu/testers/rubytest/RubyTester.hh"
35955SN/A#include "debug/MemoryAccess.hh"
36955SN/A#include "debug/ProtocolTrace.hh"
37955SN/A#include "debug/RubySequencer.hh"
388878Ssteve.reinhardt@amd.com#include "debug/RubyStats.hh"
392632Sstever@eecs.umich.edu#include "mem/packet.hh"
408878Ssteve.reinhardt@amd.com#include "mem/protocol/PrefetchBit.hh"
412632Sstever@eecs.umich.edu#include "mem/protocol/RubyAccessMode.hh"
42955SN/A#include "mem/ruby/profiler/Profiler.hh"
438878Ssteve.reinhardt@amd.com#include "mem/ruby/slicc_interface/RubyRequest.hh"
442632Sstever@eecs.umich.edu#include "mem/ruby/system/RubySystem.hh"
452761Sstever@eecs.umich.edu#include "sim/system.hh"
462632Sstever@eecs.umich.edu
472632Sstever@eecs.umich.eduusing namespace std;
482632Sstever@eecs.umich.edu
492761Sstever@eecs.umich.eduSequencer *
502761Sstever@eecs.umich.eduRubySequencerParams::create()
512761Sstever@eecs.umich.edu{
528878Ssteve.reinhardt@amd.com    return new Sequencer(this);
538878Ssteve.reinhardt@amd.com}
542761Sstever@eecs.umich.edu
552761Sstever@eecs.umich.eduSequencer::Sequencer(const Params *p)
562761Sstever@eecs.umich.edu    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
572761Sstever@eecs.umich.edu{
582761Sstever@eecs.umich.edu    m_outstanding_count = 0;
598878Ssteve.reinhardt@amd.com
608878Ssteve.reinhardt@amd.com    m_instCache_ptr = p->icache;
612632Sstever@eecs.umich.edu    m_dataCache_ptr = p->dcache;
622632Sstever@eecs.umich.edu    m_data_cache_hit_latency = p->dcache_hit_latency;
638878Ssteve.reinhardt@amd.com    m_inst_cache_hit_latency = p->icache_hit_latency;
648878Ssteve.reinhardt@amd.com    m_max_outstanding_requests = p->max_outstanding_requests;
652632Sstever@eecs.umich.edu    m_deadlock_threshold = p->deadlock_threshold;
66955SN/A
67955SN/A    m_coreId = p->coreid; // for tracking the two CorePair sequencers
68955SN/A    assert(m_max_outstanding_requests > 0);
695863Snate@binkert.org    assert(m_deadlock_threshold > 0);
705863Snate@binkert.org    assert(m_instCache_ptr != NULL);
715863Snate@binkert.org    assert(m_dataCache_ptr != NULL);
725863Snate@binkert.org    assert(m_data_cache_hit_latency > 0);
735863Snate@binkert.org    assert(m_inst_cache_hit_latency > 0);
745863Snate@binkert.org
755863Snate@binkert.org    m_runningGarnetStandalone = p->garnet_standalone;
765863Snate@binkert.org}
775863Snate@binkert.org
785863Snate@binkert.orgSequencer::~Sequencer()
795863Snate@binkert.org{
808878Ssteve.reinhardt@amd.com}
815863Snate@binkert.org
825863Snate@binkert.orgvoid
835863Snate@binkert.orgSequencer::wakeup()
845863Snate@binkert.org{
855863Snate@binkert.org    assert(drainState() != DrainState::Draining);
865863Snate@binkert.org
875863Snate@binkert.org    // Check for deadlock of any of the requests
885863Snate@binkert.org    Cycles current_time = curCycle();
895863Snate@binkert.org
905863Snate@binkert.org    // Check across all outstanding requests
915863Snate@binkert.org    int total_outstanding = 0;
925863Snate@binkert.org
935863Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
945863Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
955863Snate@binkert.org    for (; read != read_end; ++read) {
968878Ssteve.reinhardt@amd.com        SequencerRequest* request = read->second;
975863Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
985863Snate@binkert.org            continue;
995863Snate@binkert.org
1006654Snate@binkert.org        panic("Possible Deadlock detected. Aborting!\n"
101955SN/A              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
1025396Ssaidi@eecs.umich.edu              "current time: %u issue_time: %d difference: %d\n", m_version,
1035863Snate@binkert.org              request->pkt->getAddr(), m_readRequestTable.size(),
1045863Snate@binkert.org              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1054202Sbinkertn@umich.edu              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1065863Snate@binkert.org    }
1075863Snate@binkert.org
1085863Snate@binkert.org    RequestTable::iterator write = m_writeRequestTable.begin();
1095863Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
110955SN/A    for (; write != write_end; ++write) {
1116654Snate@binkert.org        SequencerRequest* request = write->second;
1125273Sstever@gmail.com        if (current_time - request->issue_time < m_deadlock_threshold)
1135871Snate@binkert.org            continue;
1145273Sstever@gmail.com
1156655Snate@binkert.org        panic("Possible Deadlock detected. Aborting!\n"
1168878Ssteve.reinhardt@amd.com              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1176655Snate@binkert.org              "current time: %u issue_time: %d difference: %d\n", m_version,
1186655Snate@binkert.org              request->pkt->getAddr(), m_writeRequestTable.size(),
1199219Spower.jg@gmail.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1206655Snate@binkert.org              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1215871Snate@binkert.org    }
1226654Snate@binkert.org
1238947Sandreas.hansson@arm.com    total_outstanding += m_writeRequestTable.size();
1245396Ssaidi@eecs.umich.edu    total_outstanding += m_readRequestTable.size();
1258120Sgblack@eecs.umich.edu
1268120Sgblack@eecs.umich.edu    assert(m_outstanding_count == total_outstanding);
1278120Sgblack@eecs.umich.edu
1288120Sgblack@eecs.umich.edu    if (m_outstanding_count > 0) {
1298120Sgblack@eecs.umich.edu        // If there are still outstanding requests, keep checking
1308120Sgblack@eecs.umich.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1318120Sgblack@eecs.umich.edu    }
1328120Sgblack@eecs.umich.edu}
1338879Ssteve.reinhardt@amd.com
1348879Ssteve.reinhardt@amd.comvoid Sequencer::resetStats()
1358879Ssteve.reinhardt@amd.com{
1368879Ssteve.reinhardt@amd.com    m_latencyHist.reset();
1378879Ssteve.reinhardt@amd.com    m_hitLatencyHist.reset();
1388879Ssteve.reinhardt@amd.com    m_missLatencyHist.reset();
1398879Ssteve.reinhardt@amd.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
1408879Ssteve.reinhardt@amd.com        m_typeLatencyHist[i]->reset();
1418879Ssteve.reinhardt@amd.com        m_hitTypeLatencyHist[i]->reset();
1428879Ssteve.reinhardt@amd.com        m_missTypeLatencyHist[i]->reset();
1438879Ssteve.reinhardt@amd.com        for (int j = 0; j < MachineType_NUM; j++) {
1448879Ssteve.reinhardt@amd.com            m_hitTypeMachLatencyHist[i][j]->reset();
1458879Ssteve.reinhardt@amd.com            m_missTypeMachLatencyHist[i][j]->reset();
1468120Sgblack@eecs.umich.edu        }
1478120Sgblack@eecs.umich.edu    }
1488120Sgblack@eecs.umich.edu
1498120Sgblack@eecs.umich.edu    for (int i = 0; i < MachineType_NUM; i++) {
1508120Sgblack@eecs.umich.edu        m_missMachLatencyHist[i]->reset();
1518120Sgblack@eecs.umich.edu        m_hitMachLatencyHist[i]->reset();
1528120Sgblack@eecs.umich.edu
1538120Sgblack@eecs.umich.edu        m_IssueToInitialDelayHist[i]->reset();
1548120Sgblack@eecs.umich.edu        m_InitialToForwardDelayHist[i]->reset();
1558120Sgblack@eecs.umich.edu        m_ForwardToFirstResponseDelayHist[i]->reset();
1568120Sgblack@eecs.umich.edu        m_FirstResponseToCompletionDelayHist[i]->reset();
1578120Sgblack@eecs.umich.edu
1588120Sgblack@eecs.umich.edu        m_IncompleteTimes[i] = 0;
1598120Sgblack@eecs.umich.edu    }
1608879Ssteve.reinhardt@amd.com}
1618879Ssteve.reinhardt@amd.com
1628879Ssteve.reinhardt@amd.com// Insert the request on the correct request table.  Return true if
1638879Ssteve.reinhardt@amd.com// the entry was already present.
1648879Ssteve.reinhardt@amd.comRequestStatus
1658879Ssteve.reinhardt@amd.comSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
1668879Ssteve.reinhardt@amd.com{
1678879Ssteve.reinhardt@amd.com    assert(m_outstanding_count ==
1689227Sandreas.hansson@arm.com        (m_writeRequestTable.size() + m_readRequestTable.size()));
1699227Sandreas.hansson@arm.com
1708879Ssteve.reinhardt@amd.com    // See if we should schedule a deadlock check
1718879Ssteve.reinhardt@amd.com    if (!deadlockCheckEvent.scheduled() &&
1728879Ssteve.reinhardt@amd.com        drainState() != DrainState::Draining) {
1738879Ssteve.reinhardt@amd.com        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1748120Sgblack@eecs.umich.edu    }
1758947Sandreas.hansson@arm.com
1767816Ssteve.reinhardt@amd.com    Addr line_addr = makeLineAddress(pkt->getAddr());
1775871Snate@binkert.org
1785871Snate@binkert.org    // Check if the line is blocked for a Locked_RMW
1796121Snate@binkert.org    if (m_controller->isBlocked(line_addr) &&
1805871Snate@binkert.org        (request_type != RubyRequestType_Locked_RMW_Write)) {
1815871Snate@binkert.org        // Return that this request's cache line address aliases with
1829119Sandreas.hansson@arm.com        // a prior request that locked the cache line. The request cannot
1839396Sandreas.hansson@arm.com        // proceed until the cache line is unlocked by a Locked_RMW_Write
1849396Sandreas.hansson@arm.com        return RequestStatus_Aliased;
185955SN/A    }
1869416SAndreas.Sandberg@ARM.com
1879416SAndreas.Sandberg@ARM.com    // Create a default entry, mapping the address to NULL, the cast is
1889416SAndreas.Sandberg@ARM.com    // there to make gcc 4.4 happy
1899416SAndreas.Sandberg@ARM.com    RequestTable::value_type default_entry(line_addr,
1909416SAndreas.Sandberg@ARM.com                                           (SequencerRequest*) NULL);
1919416SAndreas.Sandberg@ARM.com
1929416SAndreas.Sandberg@ARM.com    if ((request_type == RubyRequestType_ST) ||
1935871Snate@binkert.org        (request_type == RubyRequestType_RMW_Read) ||
1945871Snate@binkert.org        (request_type == RubyRequestType_RMW_Write) ||
1959416SAndreas.Sandberg@ARM.com        (request_type == RubyRequestType_Load_Linked) ||
1969416SAndreas.Sandberg@ARM.com        (request_type == RubyRequestType_Store_Conditional) ||
1975871Snate@binkert.org        (request_type == RubyRequestType_Locked_RMW_Read) ||
198955SN/A        (request_type == RubyRequestType_Locked_RMW_Write) ||
1996121Snate@binkert.org        (request_type == RubyRequestType_FLUSH)) {
2008881Smarc.orr@gmail.com
2016121Snate@binkert.org        // Check if there is any outstanding read request for the same
2026121Snate@binkert.org        // cache line.
2031533SN/A        if (m_readRequestTable.count(line_addr) > 0) {
2049239Sandreas.hansson@arm.com            m_store_waiting_on_load++;
2059239Sandreas.hansson@arm.com            return RequestStatus_Aliased;
2069239Sandreas.hansson@arm.com        }
2079239Sandreas.hansson@arm.com
2089239Sandreas.hansson@arm.com        pair<RequestTable::iterator, bool> r =
2099239Sandreas.hansson@arm.com            m_writeRequestTable.insert(default_entry);
2109239Sandreas.hansson@arm.com        if (r.second) {
2119239Sandreas.hansson@arm.com            RequestTable::iterator i = r.first;
2129239Sandreas.hansson@arm.com            i->second = new SequencerRequest(pkt, request_type, curCycle());
2139239Sandreas.hansson@arm.com            m_outstanding_count++;
2149239Sandreas.hansson@arm.com        } else {
2159239Sandreas.hansson@arm.com          // There is an outstanding write request for the cache line
2166655Snate@binkert.org          m_store_waiting_on_store++;
2176655Snate@binkert.org          return RequestStatus_Aliased;
2186655Snate@binkert.org        }
2196655Snate@binkert.org    } else {
2205871Snate@binkert.org        // Check if there is any outstanding write request for the same
2215871Snate@binkert.org        // cache line.
2225863Snate@binkert.org        if (m_writeRequestTable.count(line_addr) > 0) {
2235871Snate@binkert.org            m_load_waiting_on_store++;
2248878Ssteve.reinhardt@amd.com            return RequestStatus_Aliased;
2255871Snate@binkert.org        }
2265871Snate@binkert.org
2275871Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2285863Snate@binkert.org            m_readRequestTable.insert(default_entry);
2296121Snate@binkert.org
2305863Snate@binkert.org        if (r.second) {
2315871Snate@binkert.org            RequestTable::iterator i = r.first;
2328336Ssteve.reinhardt@amd.com            i->second = new SequencerRequest(pkt, request_type, curCycle());
2338336Ssteve.reinhardt@amd.com            m_outstanding_count++;
2348336Ssteve.reinhardt@amd.com        } else {
2358336Ssteve.reinhardt@amd.com            // There is an outstanding read request for the cache line
2364678Snate@binkert.org            m_load_waiting_on_load++;
2378336Ssteve.reinhardt@amd.com            return RequestStatus_Aliased;
2388336Ssteve.reinhardt@amd.com        }
2398336Ssteve.reinhardt@amd.com    }
2404678Snate@binkert.org
2414678Snate@binkert.org    m_outstandReqHist.sample(m_outstanding_count);
2424678Snate@binkert.org    assert(m_outstanding_count ==
2434678Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
2447827Snate@binkert.org
2457827Snate@binkert.org    return RequestStatus_Ready;
2468336Ssteve.reinhardt@amd.com}
2474678Snate@binkert.org
2488336Ssteve.reinhardt@amd.comvoid
2498336Ssteve.reinhardt@amd.comSequencer::markRemoved()
2508336Ssteve.reinhardt@amd.com{
2518336Ssteve.reinhardt@amd.com    m_outstanding_count--;
2528336Ssteve.reinhardt@amd.com    assert(m_outstanding_count ==
2538336Ssteve.reinhardt@amd.com           m_writeRequestTable.size() + m_readRequestTable.size());
2545871Snate@binkert.org}
2555871Snate@binkert.org
2568336Ssteve.reinhardt@amd.comvoid
2578336Ssteve.reinhardt@amd.comSequencer::invalidateSC(Addr address)
2588336Ssteve.reinhardt@amd.com{
2598336Ssteve.reinhardt@amd.com    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
2608336Ssteve.reinhardt@amd.com    // The controller has lost the coherence permissions, hence the lock
2615871Snate@binkert.org    // on the cache line maintained by the cache should be cleared.
2628336Ssteve.reinhardt@amd.com    if (e && e->isLocked(m_version)) {
2638336Ssteve.reinhardt@amd.com        e->clearLocked();
2648336Ssteve.reinhardt@amd.com    }
2658336Ssteve.reinhardt@amd.com}
2668336Ssteve.reinhardt@amd.com
2674678Snate@binkert.orgbool
2685871Snate@binkert.orgSequencer::handleLlsc(Addr address, SequencerRequest* request)
2694678Snate@binkert.org{
2708336Ssteve.reinhardt@amd.com    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
2718336Ssteve.reinhardt@amd.com    if (!e)
2728336Ssteve.reinhardt@amd.com        return true;
2738336Ssteve.reinhardt@amd.com
2748336Ssteve.reinhardt@amd.com    // The success flag indicates whether the LLSC operation was successful.
2758336Ssteve.reinhardt@amd.com    // LL ops will always succeed, but SC may fail if the cache line is no
2768336Ssteve.reinhardt@amd.com    // longer locked.
2778336Ssteve.reinhardt@amd.com    bool success = true;
2788336Ssteve.reinhardt@amd.com    if (request->m_type == RubyRequestType_Store_Conditional) {
2798336Ssteve.reinhardt@amd.com        if (!e->isLocked(m_version)) {
2808336Ssteve.reinhardt@amd.com            //
2818336Ssteve.reinhardt@amd.com            // For failed SC requests, indicate the failure to the cpu by
2828336Ssteve.reinhardt@amd.com            // setting the extra data to zero.
2838336Ssteve.reinhardt@amd.com            //
2848336Ssteve.reinhardt@amd.com            request->pkt->req->setExtraData(0);
2858336Ssteve.reinhardt@amd.com            success = false;
2868336Ssteve.reinhardt@amd.com        } else {
2875871Snate@binkert.org            //
2886121Snate@binkert.org            // For successful SC requests, indicate the success to the cpu by
289955SN/A            // setting the extra data to one.
290955SN/A            //
2912632Sstever@eecs.umich.edu            request->pkt->req->setExtraData(1);
2922632Sstever@eecs.umich.edu        }
293955SN/A        //
294955SN/A        // Independent of success, all SC operations must clear the lock
295955SN/A        //
296955SN/A        e->clearLocked();
2978878Ssteve.reinhardt@amd.com    } else if (request->m_type == RubyRequestType_Load_Linked) {
298955SN/A        //
2992632Sstever@eecs.umich.edu        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
3002632Sstever@eecs.umich.edu        // previously locked cache lines?
3012632Sstever@eecs.umich.edu        //
3022632Sstever@eecs.umich.edu        e->setLocked(m_version);
3032632Sstever@eecs.umich.edu    } else if (e->isLocked(m_version)) {
3042632Sstever@eecs.umich.edu        //
3052632Sstever@eecs.umich.edu        // Normal writes should clear the locked address
3068268Ssteve.reinhardt@amd.com        //
3078268Ssteve.reinhardt@amd.com        e->clearLocked();
3088268Ssteve.reinhardt@amd.com    }
3098268Ssteve.reinhardt@amd.com    return success;
3108268Ssteve.reinhardt@amd.com}
3118268Ssteve.reinhardt@amd.com
3128268Ssteve.reinhardt@amd.comvoid
3132632Sstever@eecs.umich.eduSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
3142632Sstever@eecs.umich.edu                             const MachineType respondingMach,
3152632Sstever@eecs.umich.edu                             bool isExternalHit, Cycles issuedTime,
3162632Sstever@eecs.umich.edu                             Cycles initialRequestTime,
3178268Ssteve.reinhardt@amd.com                             Cycles forwardRequestTime,
3182632Sstever@eecs.umich.edu                             Cycles firstResponseTime, Cycles completionTime)
3198268Ssteve.reinhardt@amd.com{
3208268Ssteve.reinhardt@amd.com    m_latencyHist.sample(cycles);
3218268Ssteve.reinhardt@amd.com    m_typeLatencyHist[type]->sample(cycles);
3228268Ssteve.reinhardt@amd.com
3233718Sstever@eecs.umich.edu    if (isExternalHit) {
3242634Sstever@eecs.umich.edu        m_missLatencyHist.sample(cycles);
3252634Sstever@eecs.umich.edu        m_missTypeLatencyHist[type]->sample(cycles);
3265863Snate@binkert.org
3272638Sstever@eecs.umich.edu        if (respondingMach != MachineType_NUM) {
3288268Ssteve.reinhardt@amd.com            m_missMachLatencyHist[respondingMach]->sample(cycles);
3292632Sstever@eecs.umich.edu            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3302632Sstever@eecs.umich.edu
3312632Sstever@eecs.umich.edu            if ((issuedTime <= initialRequestTime) &&
3322632Sstever@eecs.umich.edu                (initialRequestTime <= forwardRequestTime) &&
3332632Sstever@eecs.umich.edu                (forwardRequestTime <= firstResponseTime) &&
3341858SN/A                (firstResponseTime <= completionTime)) {
3353716Sstever@eecs.umich.edu
3362638Sstever@eecs.umich.edu                m_IssueToInitialDelayHist[respondingMach]->sample(
3372638Sstever@eecs.umich.edu                    initialRequestTime - issuedTime);
3382638Sstever@eecs.umich.edu                m_InitialToForwardDelayHist[respondingMach]->sample(
3392638Sstever@eecs.umich.edu                    forwardRequestTime - initialRequestTime);
3402638Sstever@eecs.umich.edu                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
3412638Sstever@eecs.umich.edu                    firstResponseTime - forwardRequestTime);
3422638Sstever@eecs.umich.edu                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
3435863Snate@binkert.org                    completionTime - firstResponseTime);
3445863Snate@binkert.org            } else {
3455863Snate@binkert.org                m_IncompleteTimes[respondingMach]++;
346955SN/A            }
3475341Sstever@gmail.com        }
3485341Sstever@gmail.com    } else {
3495863Snate@binkert.org        m_hitLatencyHist.sample(cycles);
3507756SAli.Saidi@ARM.com        m_hitTypeLatencyHist[type]->sample(cycles);
3515341Sstever@gmail.com
3526121Snate@binkert.org        if (respondingMach != MachineType_NUM) {
3534494Ssaidi@eecs.umich.edu            m_hitMachLatencyHist[respondingMach]->sample(cycles);
3546121Snate@binkert.org            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3551105SN/A        }
3562667Sstever@eecs.umich.edu    }
3572667Sstever@eecs.umich.edu}
3582667Sstever@eecs.umich.edu
3592667Sstever@eecs.umich.eduvoid
3606121Snate@binkert.orgSequencer::writeCallback(Addr address, DataBlock& data,
3612667Sstever@eecs.umich.edu                         const bool externalHit, const MachineType mach,
3625341Sstever@gmail.com                         const Cycles initialRequestTime,
3635863Snate@binkert.org                         const Cycles forwardRequestTime,
3645341Sstever@gmail.com                         const Cycles firstResponseTime)
3655341Sstever@gmail.com{
3665341Sstever@gmail.com    assert(address == makeLineAddress(address));
3678120Sgblack@eecs.umich.edu    assert(m_writeRequestTable.count(makeLineAddress(address)));
3685341Sstever@gmail.com
3698120Sgblack@eecs.umich.edu    RequestTable::iterator i = m_writeRequestTable.find(address);
3705341Sstever@gmail.com    assert(i != m_writeRequestTable.end());
3718120Sgblack@eecs.umich.edu    SequencerRequest* request = i->second;
3726121Snate@binkert.org
3736121Snate@binkert.org    m_writeRequestTable.erase(i);
3748980Ssteve.reinhardt@amd.com    markRemoved();
3759396Sandreas.hansson@arm.com
3765397Ssaidi@eecs.umich.edu    assert((request->m_type == RubyRequestType_ST) ||
3775397Ssaidi@eecs.umich.edu           (request->m_type == RubyRequestType_ATOMIC) ||
3787727SAli.Saidi@ARM.com           (request->m_type == RubyRequestType_RMW_Read) ||
3798268Ssteve.reinhardt@amd.com           (request->m_type == RubyRequestType_RMW_Write) ||
3806168Snate@binkert.org           (request->m_type == RubyRequestType_Load_Linked) ||
3815341Sstever@gmail.com           (request->m_type == RubyRequestType_Store_Conditional) ||
3828120Sgblack@eecs.umich.edu           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
3838120Sgblack@eecs.umich.edu           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
3848120Sgblack@eecs.umich.edu           (request->m_type == RubyRequestType_FLUSH));
3856814Sgblack@eecs.umich.edu
3865863Snate@binkert.org    //
3878120Sgblack@eecs.umich.edu    // For Alpha, properly handle LL, SC, and write requests with respect to
3885341Sstever@gmail.com    // locked cache blocks.
3895863Snate@binkert.org    //
3908268Ssteve.reinhardt@amd.com    // Not valid for Garnet_standalone protocl
3916121Snate@binkert.org    //
3926121Snate@binkert.org    bool success = true;
3938268Ssteve.reinhardt@amd.com    if (!m_runningGarnetStandalone)
3945742Snate@binkert.org        success = handleLlsc(address, request);
3955742Snate@binkert.org
3965341Sstever@gmail.com    // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
3975742Snate@binkert.org    // address variable here is assumed to be a line address, so when
3985742Snate@binkert.org    // blocking buffers, must check line addresses.
3995341Sstever@gmail.com    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
4006017Snate@binkert.org        // blockOnQueue blocks all first-level cache controller queues
4016121Snate@binkert.org        // waiting on memory accesses for the specified address that go to
4026017Snate@binkert.org        // the specified queue. In this case, a Locked_RMW_Write must go to
4037816Ssteve.reinhardt@amd.com        // the mandatory_q before unblocking the first-level controller.
4047756SAli.Saidi@ARM.com        // This will block standard loads, stores, ifetches, etc.
4057756SAli.Saidi@ARM.com        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
4067756SAli.Saidi@ARM.com    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
4077756SAli.Saidi@ARM.com        m_controller->unblock(address);
4087756SAli.Saidi@ARM.com    }
4097756SAli.Saidi@ARM.com
4107756SAli.Saidi@ARM.com    hitCallback(request, data, success, mach, externalHit,
4117756SAli.Saidi@ARM.com                initialRequestTime, forwardRequestTime, firstResponseTime);
4127816Ssteve.reinhardt@amd.com}
4137816Ssteve.reinhardt@amd.com
4147816Ssteve.reinhardt@amd.comvoid
4157816Ssteve.reinhardt@amd.comSequencer::readCallback(Addr address, DataBlock& data,
4167816Ssteve.reinhardt@amd.com                        bool externalHit, const MachineType mach,
4177816Ssteve.reinhardt@amd.com                        Cycles initialRequestTime,
4187816Ssteve.reinhardt@amd.com                        Cycles forwardRequestTime,
4197816Ssteve.reinhardt@amd.com                        Cycles firstResponseTime)
4207816Ssteve.reinhardt@amd.com{
4217816Ssteve.reinhardt@amd.com    assert(address == makeLineAddress(address));
4227756SAli.Saidi@ARM.com    assert(m_readRequestTable.count(makeLineAddress(address)));
4237816Ssteve.reinhardt@amd.com
4247816Ssteve.reinhardt@amd.com    RequestTable::iterator i = m_readRequestTable.find(address);
4257816Ssteve.reinhardt@amd.com    assert(i != m_readRequestTable.end());
4267816Ssteve.reinhardt@amd.com    SequencerRequest* request = i->second;
4277816Ssteve.reinhardt@amd.com
4287816Ssteve.reinhardt@amd.com    m_readRequestTable.erase(i);
4297816Ssteve.reinhardt@amd.com    markRemoved();
4307816Ssteve.reinhardt@amd.com
4317816Ssteve.reinhardt@amd.com    assert((request->m_type == RubyRequestType_LD) ||
4327816Ssteve.reinhardt@amd.com           (request->m_type == RubyRequestType_IFETCH));
4337816Ssteve.reinhardt@amd.com
4347816Ssteve.reinhardt@amd.com    hitCallback(request, data, true, mach, externalHit,
4357816Ssteve.reinhardt@amd.com                initialRequestTime, forwardRequestTime, firstResponseTime);
4367816Ssteve.reinhardt@amd.com}
4377816Ssteve.reinhardt@amd.com
4387816Ssteve.reinhardt@amd.comvoid
4397816Ssteve.reinhardt@amd.comSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
4407816Ssteve.reinhardt@amd.com                       bool llscSuccess,
4417816Ssteve.reinhardt@amd.com                       const MachineType mach, const bool externalHit,
4427816Ssteve.reinhardt@amd.com                       const Cycles initialRequestTime,
4437816Ssteve.reinhardt@amd.com                       const Cycles forwardRequestTime,
4447816Ssteve.reinhardt@amd.com                       const Cycles firstResponseTime)
4457816Ssteve.reinhardt@amd.com{
4467816Ssteve.reinhardt@amd.com    warn_once("Replacement policy updates recently became the responsibility "
4477816Ssteve.reinhardt@amd.com              "of SLICC state machines. Make sure to setMRU() near callbacks "
4487816Ssteve.reinhardt@amd.com              "in .sm files!");
4497816Ssteve.reinhardt@amd.com
4507816Ssteve.reinhardt@amd.com    PacketPtr pkt = srequest->pkt;
4517816Ssteve.reinhardt@amd.com    Addr request_address(pkt->getAddr());
4527816Ssteve.reinhardt@amd.com    RubyRequestType type = srequest->m_type;
4537816Ssteve.reinhardt@amd.com    Cycles issued_time = srequest->issue_time;
4547816Ssteve.reinhardt@amd.com
4557816Ssteve.reinhardt@amd.com    assert(curCycle() >= issued_time);
4567816Ssteve.reinhardt@amd.com    Cycles total_latency = curCycle() - issued_time;
4577816Ssteve.reinhardt@amd.com
4587816Ssteve.reinhardt@amd.com    // Profile the latency for all demand accesses.
4597816Ssteve.reinhardt@amd.com    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
4607816Ssteve.reinhardt@amd.com                      initialRequestTime, forwardRequestTime,
4617816Ssteve.reinhardt@amd.com                      firstResponseTime, curCycle());
4627816Ssteve.reinhardt@amd.com
4637816Ssteve.reinhardt@amd.com    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
4647816Ssteve.reinhardt@amd.com             curTick(), m_version, "Seq",
4657816Ssteve.reinhardt@amd.com             llscSuccess ? "Done" : "SC_Failed", "", "",
4667816Ssteve.reinhardt@amd.com             printAddress(request_address), total_latency);
4677816Ssteve.reinhardt@amd.com
4687816Ssteve.reinhardt@amd.com    // update the data unless it is a non-data-carrying flush
4697816Ssteve.reinhardt@amd.com    if (RubySystem::getWarmupEnabled()) {
4707816Ssteve.reinhardt@amd.com        data.setData(pkt->getConstPtr<uint8_t>(),
4717816Ssteve.reinhardt@amd.com                     getOffset(request_address), pkt->getSize());
4727816Ssteve.reinhardt@amd.com    } else if (!pkt->isFlush()) {
4737816Ssteve.reinhardt@amd.com        if ((type == RubyRequestType_LD) ||
4747816Ssteve.reinhardt@amd.com            (type == RubyRequestType_IFETCH) ||
4757816Ssteve.reinhardt@amd.com            (type == RubyRequestType_RMW_Read) ||
4767816Ssteve.reinhardt@amd.com            (type == RubyRequestType_Locked_RMW_Read) ||
4777816Ssteve.reinhardt@amd.com            (type == RubyRequestType_Load_Linked)) {
4787816Ssteve.reinhardt@amd.com            memcpy(pkt->getPtr<uint8_t>(),
4797816Ssteve.reinhardt@amd.com                   data.getData(getOffset(request_address), pkt->getSize()),
4807816Ssteve.reinhardt@amd.com                   pkt->getSize());
4817816Ssteve.reinhardt@amd.com            DPRINTF(RubySequencer, "read data %s\n", data);
4827816Ssteve.reinhardt@amd.com        } else if (pkt->req->isSwap()) {
4837816Ssteve.reinhardt@amd.com            std::vector<uint8_t> overwrite_val(pkt->getSize());
4848947Sandreas.hansson@arm.com            memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
4858947Sandreas.hansson@arm.com                   pkt->getSize());
4867756SAli.Saidi@ARM.com            memcpy(pkt->getPtr<uint8_t>(),
4878120Sgblack@eecs.umich.edu                   data.getData(getOffset(request_address), pkt->getSize()),
4887756SAli.Saidi@ARM.com                   pkt->getSize());
4897756SAli.Saidi@ARM.com            data.setData(&overwrite_val[0],
4907756SAli.Saidi@ARM.com                         getOffset(request_address), pkt->getSize());
4917756SAli.Saidi@ARM.com            DPRINTF(RubySequencer, "swap data %s\n", data);
4927816Ssteve.reinhardt@amd.com        } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
4937816Ssteve.reinhardt@amd.com            // Types of stores set the actual data here, apart from
4947816Ssteve.reinhardt@amd.com            // failed Store Conditional requests
4957816Ssteve.reinhardt@amd.com            data.setData(pkt->getConstPtr<uint8_t>(),
4967816Ssteve.reinhardt@amd.com                         getOffset(request_address), pkt->getSize());
4977816Ssteve.reinhardt@amd.com            DPRINTF(RubySequencer, "set data %s\n", data);
4987816Ssteve.reinhardt@amd.com        }
4997816Ssteve.reinhardt@amd.com    }
5007816Ssteve.reinhardt@amd.com
5017816Ssteve.reinhardt@amd.com    // If using the RubyTester, update the RubyTester sender state's
5027756SAli.Saidi@ARM.com    // subBlock with the recieved data.  The tester will later access
5037756SAli.Saidi@ARM.com    // this state.
5049227Sandreas.hansson@arm.com    if (m_usingRubyTester) {
5059227Sandreas.hansson@arm.com        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
5069227Sandreas.hansson@arm.com                pkt->cmdString(), pkt->getAddr());
5079227Sandreas.hansson@arm.com        RubyTester::SenderState* testerSenderState =
5086654Snate@binkert.org            pkt->findNextSenderState<RubyTester::SenderState>();
5096654Snate@binkert.org        assert(testerSenderState);
5105871Snate@binkert.org        testerSenderState->subBlock.mergeFrom(data);
5116121Snate@binkert.org    }
5128946Sandreas.hansson@arm.com
5139419Sandreas.hansson@arm.com    delete srequest;
5143940Ssaidi@eecs.umich.edu
5153918Ssaidi@eecs.umich.edu    RubySystem *rs = m_ruby_system;
5163918Ssaidi@eecs.umich.edu    if (RubySystem::getWarmupEnabled()) {
5171858SN/A        assert(pkt->req);
5186121Snate@binkert.org        delete pkt->req;
5199420Sandreas.hansson@arm.com        delete pkt;
5209420Sandreas.hansson@arm.com        rs->m_cache_recorder->enqueueNextFetchRequest();
5219420Sandreas.hansson@arm.com    } else if (RubySystem::getCooldownEnabled()) {
5229420Sandreas.hansson@arm.com        delete pkt;
5239420Sandreas.hansson@arm.com        rs->m_cache_recorder->enqueueNextFlushRequest();
5249420Sandreas.hansson@arm.com    } else {
5259420Sandreas.hansson@arm.com        ruby_hit_callback(pkt);
5269420Sandreas.hansson@arm.com        testDrainComplete();
5279420Sandreas.hansson@arm.com    }
5287739Sgblack@eecs.umich.edu}
5297739Sgblack@eecs.umich.edu
5306143Snate@binkert.orgbool
5319420Sandreas.hansson@arm.comSequencer::empty() const
5329420Sandreas.hansson@arm.com{
5339420Sandreas.hansson@arm.com    return m_writeRequestTable.empty() && m_readRequestTable.empty();
5347618SAli.Saidi@arm.com}
5357618SAli.Saidi@arm.com
5367618SAli.Saidi@arm.comRequestStatus
5377739Sgblack@eecs.umich.eduSequencer::makeRequest(PacketPtr pkt)
5389227Sandreas.hansson@arm.com{
5399227Sandreas.hansson@arm.com    if (m_outstanding_count >= m_max_outstanding_requests) {
5409227Sandreas.hansson@arm.com        return RequestStatus_BufferFull;
5419227Sandreas.hansson@arm.com    }
5429227Sandreas.hansson@arm.com
5439227Sandreas.hansson@arm.com    RubyRequestType primary_type = RubyRequestType_NULL;
5449227Sandreas.hansson@arm.com    RubyRequestType secondary_type = RubyRequestType_NULL;
5459227Sandreas.hansson@arm.com
5469227Sandreas.hansson@arm.com    if (pkt->isLLSC()) {
5479227Sandreas.hansson@arm.com        //
5489227Sandreas.hansson@arm.com        // Alpha LL/SC instructions need to be handled carefully by the cache
5499227Sandreas.hansson@arm.com        // coherence protocol to ensure they follow the proper semantics. In
5509227Sandreas.hansson@arm.com        // particular, by identifying the operations as atomic, the protocol
5519227Sandreas.hansson@arm.com        // should understand that migratory sharing optimizations should not
5529227Sandreas.hansson@arm.com        // be performed (i.e. a load between the LL and SC should not steal
5539227Sandreas.hansson@arm.com        // away exclusive permission).
5549227Sandreas.hansson@arm.com        //
5559227Sandreas.hansson@arm.com        if (pkt->isWrite()) {
5568737Skoansin.tan@gmail.com            DPRINTF(RubySequencer, "Issuing SC\n");
5579420Sandreas.hansson@arm.com            primary_type = RubyRequestType_Store_Conditional;
5589420Sandreas.hansson@arm.com        } else {
5599420Sandreas.hansson@arm.com            DPRINTF(RubySequencer, "Issuing LL\n");
5608737Skoansin.tan@gmail.com            assert(pkt->isRead());
5618737Skoansin.tan@gmail.com            primary_type = RubyRequestType_Load_Linked;
5628737Skoansin.tan@gmail.com        }
5638737Skoansin.tan@gmail.com        secondary_type = RubyRequestType_ATOMIC;
5648737Skoansin.tan@gmail.com    } else if (pkt->req->isLockedRMW()) {
5658737Skoansin.tan@gmail.com        //
5668737Skoansin.tan@gmail.com        // x86 locked instructions are translated to store cache coherence
5678737Skoansin.tan@gmail.com        // requests because these requests should always be treated as read
5688737Skoansin.tan@gmail.com        // exclusive operations and should leverage any migratory sharing
5698737Skoansin.tan@gmail.com        // optimization built into the protocol.
5708737Skoansin.tan@gmail.com        //
5718737Skoansin.tan@gmail.com        if (pkt->isWrite()) {
5728737Skoansin.tan@gmail.com            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
5738737Skoansin.tan@gmail.com            primary_type = RubyRequestType_Locked_RMW_Write;
5748737Skoansin.tan@gmail.com        } else {
5758737Skoansin.tan@gmail.com            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
5768737Skoansin.tan@gmail.com            assert(pkt->isRead());
5778946Sandreas.hansson@arm.com            primary_type = RubyRequestType_Locked_RMW_Read;
5788946Sandreas.hansson@arm.com        }
5798946Sandreas.hansson@arm.com        secondary_type = RubyRequestType_ST;
5809420Sandreas.hansson@arm.com    } else {
5819420Sandreas.hansson@arm.com        //
5829420Sandreas.hansson@arm.com        // To support SwapReq, we need to check isWrite() first: a SwapReq
5839420Sandreas.hansson@arm.com        // should always be treated like a write, but since a SwapReq implies
5849420Sandreas.hansson@arm.com        // both isWrite() and isRead() are true, check isWrite() first here.
5859420Sandreas.hansson@arm.com        //
5869420Sandreas.hansson@arm.com        if (pkt->isWrite()) {
5879420Sandreas.hansson@arm.com            //
5889420Sandreas.hansson@arm.com            // Note: M5 packets do not differentiate ST from RMW_Write
5899420Sandreas.hansson@arm.com            //
5909420Sandreas.hansson@arm.com            primary_type = secondary_type = RubyRequestType_ST;
5918946Sandreas.hansson@arm.com        } else if (pkt->isRead()) {
5923918Ssaidi@eecs.umich.edu            if (pkt->req->isInstFetch()) {
5939068SAli.Saidi@ARM.com                primary_type = secondary_type = RubyRequestType_IFETCH;
5949068SAli.Saidi@ARM.com            } else {
5959068SAli.Saidi@ARM.com                bool storeCheck = false;
5969068SAli.Saidi@ARM.com                // only X86 need the store check
5979068SAli.Saidi@ARM.com                if (system->getArch() == Arch::X86ISA) {
5989068SAli.Saidi@ARM.com                    uint32_t flags = pkt->req->getFlags();
5999068SAli.Saidi@ARM.com                    storeCheck = flags &
6009068SAli.Saidi@ARM.com                        (X86ISA::StoreCheck << X86ISA::FlagShift);
6019068SAli.Saidi@ARM.com                }
6029419Sandreas.hansson@arm.com                if (storeCheck) {
6039068SAli.Saidi@ARM.com                    primary_type = RubyRequestType_RMW_Read;
6049068SAli.Saidi@ARM.com                    secondary_type = RubyRequestType_ST;
6059068SAli.Saidi@ARM.com                } else {
6069068SAli.Saidi@ARM.com                    primary_type = secondary_type = RubyRequestType_LD;
6079068SAli.Saidi@ARM.com                }
6089068SAli.Saidi@ARM.com            }
6093918Ssaidi@eecs.umich.edu        } else if (pkt->isFlush()) {
6103918Ssaidi@eecs.umich.edu          primary_type = secondary_type = RubyRequestType_FLUSH;
6116157Snate@binkert.org        } else {
6126157Snate@binkert.org            panic("Unsupported ruby packet type\n");
6136157Snate@binkert.org        }
6146157Snate@binkert.org    }
6155397Ssaidi@eecs.umich.edu
6165397Ssaidi@eecs.umich.edu    RequestStatus status = insertRequest(pkt, primary_type);
6176121Snate@binkert.org    if (status != RequestStatus_Ready)
6186121Snate@binkert.org        return status;
6196121Snate@binkert.org
6206121Snate@binkert.org    issueRequest(pkt, secondary_type);
6216121Snate@binkert.org
6226121Snate@binkert.org    // TODO: issue hardware prefetches here
6235397Ssaidi@eecs.umich.edu    return RequestStatus_Issued;
6241851SN/A}
6251851SN/A
6267739Sgblack@eecs.umich.eduvoid
627955SN/ASequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
6289396Sandreas.hansson@arm.com{
6299396Sandreas.hansson@arm.com    assert(pkt != NULL);
6309396Sandreas.hansson@arm.com    ContextID proc_id = pkt->req->hasContextId() ?
6319396Sandreas.hansson@arm.com        pkt->req->contextId() : InvalidContextID;
6329396Sandreas.hansson@arm.com
6339396Sandreas.hansson@arm.com    ContextID core_id = coreId();
6349396Sandreas.hansson@arm.com
6359396Sandreas.hansson@arm.com    // If valid, copy the pc to the ruby request
6369396Sandreas.hansson@arm.com    Addr pc = 0;
6379396Sandreas.hansson@arm.com    if (pkt->req->hasPC()) {
6389396Sandreas.hansson@arm.com        pc = pkt->req->getPC();
6399396Sandreas.hansson@arm.com    }
6409396Sandreas.hansson@arm.com
6419396Sandreas.hansson@arm.com    // check if the packet has data as for example prefetch and flush
6429396Sandreas.hansson@arm.com    // requests do not
6439396Sandreas.hansson@arm.com    std::shared_ptr<RubyRequest> msg =
6449396Sandreas.hansson@arm.com        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
6459396Sandreas.hansson@arm.com                                      pkt->isFlush() ?
6469396Sandreas.hansson@arm.com                                      nullptr : pkt->getPtr<uint8_t>(),
6479396Sandreas.hansson@arm.com                                      pkt->getSize(), pc, secondary_type,
6489396Sandreas.hansson@arm.com                                      RubyAccessMode_Supervisor, pkt,
6499396Sandreas.hansson@arm.com                                      PrefetchBit_No, proc_id, core_id);
6509396Sandreas.hansson@arm.com
6519396Sandreas.hansson@arm.com    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
6529396Sandreas.hansson@arm.com            curTick(), m_version, "Seq", "Begin", "", "",
6539396Sandreas.hansson@arm.com            printAddress(msg->getPhysicalAddress()),
6549396Sandreas.hansson@arm.com            RubyRequestType_to_string(secondary_type));
6559396Sandreas.hansson@arm.com
6563053Sstever@eecs.umich.edu    // The Sequencer currently assesses instruction and data cache hit latency
6576121Snate@binkert.org    // for the top-level caches at the beginning of a memory access.
6583053Sstever@eecs.umich.edu    // TODO: Eventually, this latency should be moved to represent the actual
6593053Sstever@eecs.umich.edu    // cache access latency portion of the memory access. This will require
6603053Sstever@eecs.umich.edu    // changing cache controller protocol files to assess the latency on the
6613053Sstever@eecs.umich.edu    // access response path.
6623053Sstever@eecs.umich.edu    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
6639072Sandreas.hansson@arm.com    if (secondary_type == RubyRequestType_IFETCH)
6643053Sstever@eecs.umich.edu        latency = m_inst_cache_hit_latency;
6654742Sstever@eecs.umich.edu    else
6664742Sstever@eecs.umich.edu        latency = m_data_cache_hit_latency;
6673053Sstever@eecs.umich.edu
6683053Sstever@eecs.umich.edu    // Send the message to the cache controller
6693053Sstever@eecs.umich.edu    assert(latency > 0);
6708960Ssteve.reinhardt@amd.com
6716654Snate@binkert.org    assert(m_mandatory_q_ptr != NULL);
6723053Sstever@eecs.umich.edu    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
6733053Sstever@eecs.umich.edu}
6743053Sstever@eecs.umich.edu
6753053Sstever@eecs.umich.edutemplate <class KEY, class VALUE>
6762667Sstever@eecs.umich.edustd::ostream &
6774554Sbinkertn@umich.eduoperator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
6786121Snate@binkert.org{
6792667Sstever@eecs.umich.edu    auto i = map.begin();
6804554Sbinkertn@umich.edu    auto end = map.end();
6814554Sbinkertn@umich.edu
6824554Sbinkertn@umich.edu    out << "[";
6836121Snate@binkert.org    for (; i != end; ++i)
6844554Sbinkertn@umich.edu        out << " " << i->first << "=" << i->second;
6854554Sbinkertn@umich.edu    out << " ]";
6864554Sbinkertn@umich.edu
6874781Snate@binkert.org    return out;
6884554Sbinkertn@umich.edu}
6894554Sbinkertn@umich.edu
6902667Sstever@eecs.umich.eduvoid
6914554Sbinkertn@umich.eduSequencer::print(ostream& out) const
6924554Sbinkertn@umich.edu{
6934554Sbinkertn@umich.edu    out << "[Sequencer: " << m_version
6944554Sbinkertn@umich.edu        << ", outstanding requests: " << m_outstanding_count
6952667Sstever@eecs.umich.edu        << ", read request table: " << m_readRequestTable
6964554Sbinkertn@umich.edu        << ", write request table: " << m_writeRequestTable
6972667Sstever@eecs.umich.edu        << "]";
6984554Sbinkertn@umich.edu}
6996121Snate@binkert.org
7002667Sstever@eecs.umich.edu// this can be called from setState whenever coherence permissions are
7015522Snate@binkert.org// upgraded when invoked, coherence violations will be checked for the
7025522Snate@binkert.org// given block
7035522Snate@binkert.orgvoid
7045522Snate@binkert.orgSequencer::checkCoherence(Addr addr)
7055522Snate@binkert.org{
7065522Snate@binkert.org#ifdef CHECK_COHERENCE
7075522Snate@binkert.org    m_ruby_system->checkGlobalCoherenceInvariant(addr);
7085522Snate@binkert.org#endif
7095522Snate@binkert.org}
7105522Snate@binkert.org
7115522Snate@binkert.orgvoid
7125522Snate@binkert.orgSequencer::recordRequestType(SequencerRequestType requestType) {
7135522Snate@binkert.org    DPRINTF(RubyStats, "Recorded statistic: %s\n",
7145522Snate@binkert.org            SequencerRequestType_to_string(requestType));
7155522Snate@binkert.org}
7165522Snate@binkert.org
7175522Snate@binkert.org
7185522Snate@binkert.orgvoid
7195522Snate@binkert.orgSequencer::evictionCallback(Addr address)
7205522Snate@binkert.org{
7215522Snate@binkert.org    ruby_eviction_callback(address);
7225522Snate@binkert.org}
7235522Snate@binkert.org
7245522Snate@binkert.orgvoid
7255522Snate@binkert.orgSequencer::regStats()
7265522Snate@binkert.org{
7272638Sstever@eecs.umich.edu    RubyPort::regStats();
7282638Sstever@eecs.umich.edu
7296121Snate@binkert.org    m_store_waiting_on_load
7303716Sstever@eecs.umich.edu        .name(name() + ".store_waiting_on_load")
7315522Snate@binkert.org        .desc("Number of times a store aliased with a pending load")
7329420Sandreas.hansson@arm.com        .flags(Stats::nozero);
7335522Snate@binkert.org    m_store_waiting_on_store
7345522Snate@binkert.org        .name(name() + ".store_waiting_on_store")
7355522Snate@binkert.org        .desc("Number of times a store aliased with a pending store")
7365522Snate@binkert.org        .flags(Stats::nozero);
7371858SN/A    m_load_waiting_on_load
7385227Ssaidi@eecs.umich.edu        .name(name() + ".load_waiting_on_load")
7395227Ssaidi@eecs.umich.edu        .desc("Number of times a load aliased with a pending load")
7405227Ssaidi@eecs.umich.edu        .flags(Stats::nozero);
7415227Ssaidi@eecs.umich.edu    m_load_waiting_on_store
7426654Snate@binkert.org        .name(name() + ".load_waiting_on_store")
7436654Snate@binkert.org        .desc("Number of times a load aliased with a pending store")
7447769SAli.Saidi@ARM.com        .flags(Stats::nozero);
7457769SAli.Saidi@ARM.com
7467769SAli.Saidi@ARM.com    // These statistical variables are not for display.
7477769SAli.Saidi@ARM.com    // The profiler will collate these across different
7485227Ssaidi@eecs.umich.edu    // sequencers and display those collated statistics.
7495227Ssaidi@eecs.umich.edu    m_outstandReqHist.init(10);
7505227Ssaidi@eecs.umich.edu    m_latencyHist.init(10);
7515204Sstever@gmail.com    m_hitLatencyHist.init(10);
7525204Sstever@gmail.com    m_missLatencyHist.init(10);
7535204Sstever@gmail.com
7545204Sstever@gmail.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
7555204Sstever@gmail.com        m_typeLatencyHist.push_back(new Stats::Histogram());
7565204Sstever@gmail.com        m_typeLatencyHist[i]->init(10);
7575204Sstever@gmail.com
7585204Sstever@gmail.com        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
7595204Sstever@gmail.com        m_hitTypeLatencyHist[i]->init(10);
7605204Sstever@gmail.com
7615204Sstever@gmail.com        m_missTypeLatencyHist.push_back(new Stats::Histogram());
7625204Sstever@gmail.com        m_missTypeLatencyHist[i]->init(10);
7635204Sstever@gmail.com    }
7645204Sstever@gmail.com
7655204Sstever@gmail.com    for (int i = 0; i < MachineType_NUM; i++) {
7665204Sstever@gmail.com        m_hitMachLatencyHist.push_back(new Stats::Histogram());
7675204Sstever@gmail.com        m_hitMachLatencyHist[i]->init(10);
7686121Snate@binkert.org
7695204Sstever@gmail.com        m_missMachLatencyHist.push_back(new Stats::Histogram());
7703118Sstever@eecs.umich.edu        m_missMachLatencyHist[i]->init(10);
7713118Sstever@eecs.umich.edu
7723118Sstever@eecs.umich.edu        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
7733118Sstever@eecs.umich.edu        m_IssueToInitialDelayHist[i]->init(10);
7743118Sstever@eecs.umich.edu
7755863Snate@binkert.org        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
7763118Sstever@eecs.umich.edu        m_InitialToForwardDelayHist[i]->init(10);
7775863Snate@binkert.org
7783118Sstever@eecs.umich.edu        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
7797457Snate@binkert.org        m_ForwardToFirstResponseDelayHist[i]->init(10);
7807457Snate@binkert.org
7815863Snate@binkert.org        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
7825863Snate@binkert.org        m_FirstResponseToCompletionDelayHist[i]->init(10);
7835863Snate@binkert.org    }
7845863Snate@binkert.org
7855863Snate@binkert.org    for (int i = 0; i < RubyRequestType_NUM; i++) {
7865863Snate@binkert.org        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
7875863Snate@binkert.org        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
7886003Snate@binkert.org
7895863Snate@binkert.org        for (int j = 0; j < MachineType_NUM; j++) {
7905863Snate@binkert.org            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
7915863Snate@binkert.org            m_hitTypeMachLatencyHist[i][j]->init(10);
7926120Snate@binkert.org
7935863Snate@binkert.org            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
7945863Snate@binkert.org            m_missTypeMachLatencyHist[i][j]->init(10);
7955863Snate@binkert.org        }
7968655Sandreas.hansson@arm.com    }
7978655Sandreas.hansson@arm.com}
7988655Sandreas.hansson@arm.com