Sequencer.cc revision 11793
12SN/A/*
21762SN/A * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
32SN/A * All rights reserved.
42SN/A *
52SN/A * Redistribution and use in source and binary forms, with or without
62SN/A * modification, are permitted provided that the following conditions are
72SN/A * met: redistributions of source code must retain the above copyright
82SN/A * notice, this list of conditions and the following disclaimer;
92SN/A * redistributions in binary form must reproduce the above copyright
102SN/A * notice, this list of conditions and the following disclaimer in the
112SN/A * documentation and/or other materials provided with the distribution;
122SN/A * neither the name of the copyright holders nor the names of its
132SN/A * contributors may be used to endorse or promote products derived from
142SN/A * this software without specific prior written permission.
152SN/A *
162SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu */
282665Ssaidi@eecs.umich.edu
292665Ssaidi@eecs.umich.edu#include "mem/ruby/system/Sequencer.hh"
302SN/A
312SN/A#include "arch/x86/ldstflags.hh"
322SN/A#include "base/misc.hh"
332SN/A#include "base/str.hh"
342SN/A#include "cpu/testers/rubytest/RubyTester.hh"
352SN/A#include "debug/MemoryAccess.hh"
361354SN/A#include "debug/ProtocolTrace.hh"
371354SN/A#include "debug/RubySequencer.hh"
382SN/A#include "debug/RubyStats.hh"
392SN/A#include "mem/packet.hh"
405501Snate@binkert.org#include "mem/protocol/PrefetchBit.hh"
415546Snate@binkert.org#include "mem/protocol/RubyAccessMode.hh"
427004Snate@binkert.org#include "mem/ruby/profiler/Profiler.hh"
432SN/A#include "mem/ruby/slicc_interface/RubyRequest.hh"
442SN/A#include "mem/ruby/system/RubySystem.hh"
4556SN/A#include "sim/system.hh"
465769Snate@binkert.org
472361SN/Ausing namespace std;
481354SN/A
496216Snate@binkert.orgSequencer *
508232Snate@binkert.orgRubySequencerParams::create()
5156SN/A{
522SN/A    return new Sequencer(this);
535543Ssaidi@eecs.umich.edu}
542SN/A
551354SN/ASequencer::Sequencer(const Params *p)
561354SN/A    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
572SN/A{
582SN/A    m_outstanding_count = 0;
592SN/A
602SN/A    m_instCache_ptr = p->icache;
615501Snate@binkert.org    m_dataCache_ptr = p->dcache;
625501Snate@binkert.org    m_data_cache_hit_latency = p->dcache_hit_latency;
632SN/A    m_inst_cache_hit_latency = p->icache_hit_latency;
64395SN/A    m_max_outstanding_requests = p->max_outstanding_requests;
652SN/A    m_deadlock_threshold = p->deadlock_threshold;
662SN/A
672SN/A    m_coreId = p->coreid; // for tracking the two CorePair sequencers
685769Snate@binkert.org    assert(m_max_outstanding_requests > 0);
695769Snate@binkert.org    assert(m_deadlock_threshold > 0);
705769Snate@binkert.org    assert(m_instCache_ptr != NULL);
715769Snate@binkert.org    assert(m_dataCache_ptr != NULL);
727059Snate@binkert.org    assert(m_data_cache_hit_latency > 0);
737059Snate@binkert.org    assert(m_inst_cache_hit_latency > 0);
747059Snate@binkert.org
757059Snate@binkert.org    m_runningGarnetStandalone = p->garnet_standalone;
767059Snate@binkert.org}
777059Snate@binkert.org
787059Snate@binkert.orgSequencer::~Sequencer()
797059Snate@binkert.org{
807059Snate@binkert.org}
817059Snate@binkert.org
827059Snate@binkert.orgvoid
837059Snate@binkert.orgSequencer::wakeup()
847059Snate@binkert.org{
857059Snate@binkert.org    assert(drainState() != DrainState::Draining);
867059Snate@binkert.org
877059Snate@binkert.org    // Check for deadlock of any of the requests
885769Snate@binkert.org    Cycles current_time = curCycle();
897058Snate@binkert.org
907058Snate@binkert.org    // Check across all outstanding requests
917058Snate@binkert.org    int total_outstanding = 0;
922SN/A
935502Snate@binkert.org    RequestTable::iterator read = m_readRequestTable.begin();
945502Snate@binkert.org    RequestTable::iterator read_end = m_readRequestTable.end();
955502Snate@binkert.org    for (; read != read_end; ++read) {
965503Snate@binkert.org        SequencerRequest* request = read->second;
975503Snate@binkert.org        if (current_time - request->issue_time < m_deadlock_threshold)
985502Snate@binkert.org            continue;
995502Snate@binkert.org
1005502Snate@binkert.org        panic("Possible Deadlock detected. Aborting!\n"
1015502Snate@binkert.org              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
1025502Snate@binkert.org              "current time: %u issue_time: %d difference: %d\n", m_version,
1035502Snate@binkert.org              request->pkt->getAddr(), m_readRequestTable.size(),
1045502Snate@binkert.org              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1055602Snate@binkert.org              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1065602Snate@binkert.org    }
1075501Snate@binkert.org
1085543Ssaidi@eecs.umich.edu    RequestTable::iterator write = m_writeRequestTable.begin();
1097058Snate@binkert.org    RequestTable::iterator write_end = m_writeRequestTable.end();
1105769Snate@binkert.org    for (; write != write_end; ++write) {
1114016Sstever@eecs.umich.edu        SequencerRequest* request = write->second;
1124016Sstever@eecs.umich.edu        if (current_time - request->issue_time < m_deadlock_threshold)
1134016Sstever@eecs.umich.edu            continue;
1144016Sstever@eecs.umich.edu
1154016Sstever@eecs.umich.edu        panic("Possible Deadlock detected. Aborting!\n"
1164016Sstever@eecs.umich.edu              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1174016Sstever@eecs.umich.edu              "current time: %u issue_time: %d difference: %d\n", m_version,
1184016Sstever@eecs.umich.edu              request->pkt->getAddr(), m_writeRequestTable.size(),
1194016Sstever@eecs.umich.edu              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1205501Snate@binkert.org              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1215605Snate@binkert.org    }
1225605Snate@binkert.org
1235605Snate@binkert.org    total_outstanding += m_writeRequestTable.size();
1245605Snate@binkert.org    total_outstanding += m_readRequestTable.size();
1255501Snate@binkert.org
1264016Sstever@eecs.umich.edu    assert(m_outstanding_count == total_outstanding);
1275577SSteve.Reinhardt@amd.com
1285501Snate@binkert.org    if (m_outstanding_count > 0) {
1295501Snate@binkert.org        // If there are still outstanding requests, keep checking
1305501Snate@binkert.org        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1315502Snate@binkert.org    }
1325502Snate@binkert.org}
1335605Snate@binkert.org
1345502Snate@binkert.orgvoid Sequencer::resetStats()
1355502Snate@binkert.org{
1365605Snate@binkert.org    m_latencyHist.reset();
1375605Snate@binkert.org    m_hitLatencyHist.reset();
1385605Snate@binkert.org    m_missLatencyHist.reset();
1395577SSteve.Reinhardt@amd.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
1407823Ssteve.reinhardt@amd.com        m_typeLatencyHist[i]->reset();
1415502Snate@binkert.org        m_hitTypeLatencyHist[i]->reset();
1425502Snate@binkert.org        m_missTypeLatencyHist[i]->reset();
1435502Snate@binkert.org        for (int j = 0; j < MachineType_NUM; j++) {
1442SN/A            m_hitTypeMachLatencyHist[i][j]->reset();
1455769Snate@binkert.org            m_missTypeMachLatencyHist[i][j]->reset();
1465769Snate@binkert.org        }
1475769Snate@binkert.org    }
1485769Snate@binkert.org
1495769Snate@binkert.org    for (int i = 0; i < MachineType_NUM; i++) {
1505769Snate@binkert.org        m_missMachLatencyHist[i]->reset();
1512SN/A        m_hitMachLatencyHist[i]->reset();
1528581Ssteve.reinhardt@amd.com
1538581Ssteve.reinhardt@amd.com        m_IssueToInitialDelayHist[i]->reset();
1545769Snate@binkert.org        m_InitialToForwardDelayHist[i]->reset();
1557059Snate@binkert.org        m_ForwardToFirstResponseDelayHist[i]->reset();
1565769Snate@binkert.org        m_FirstResponseToCompletionDelayHist[i]->reset();
1575769Snate@binkert.org
1582SN/A        m_IncompleteTimes[i] = 0;
1595769Snate@binkert.org    }
1605769Snate@binkert.org}
1615769Snate@binkert.org
1625769Snate@binkert.org// Insert the request on the correct request table.  Return true if
1635769Snate@binkert.org// the entry was already present.
1645769Snate@binkert.orgRequestStatus
1655769Snate@binkert.orgSequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
1665769Snate@binkert.org{
1675769Snate@binkert.org    assert(m_outstanding_count ==
1685769Snate@binkert.org        (m_writeRequestTable.size() + m_readRequestTable.size()));
1695769Snate@binkert.org
1705769Snate@binkert.org    // See if we should schedule a deadlock check
1715769Snate@binkert.org    if (!deadlockCheckEvent.scheduled() &&
1725769Snate@binkert.org        drainState() != DrainState::Draining) {
1735769Snate@binkert.org        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1745769Snate@binkert.org    }
1755769Snate@binkert.org
1765769Snate@binkert.org    Addr line_addr = makeLineAddress(pkt->getAddr());
1775769Snate@binkert.org
1785769Snate@binkert.org    // Check if the line is blocked for a Locked_RMW
1795769Snate@binkert.org    if (m_controller->isBlocked(line_addr) &&
1805501Snate@binkert.org        (request_type != RubyRequestType_Locked_RMW_Write)) {
1815543Ssaidi@eecs.umich.edu        // Return that this request's cache line address aliases with
1822SN/A        // a prior request that locked the cache line. The request cannot
1832SN/A        // proceed until the cache line is unlocked by a Locked_RMW_Write
184396SN/A        return RequestStatus_Aliased;
185396SN/A    }
186396SN/A
187396SN/A    // Create a default entry, mapping the address to NULL, the cast is
1885501Snate@binkert.org    // there to make gcc 4.4 happy
1897058Snate@binkert.org    RequestTable::value_type default_entry(line_addr,
1907058Snate@binkert.org                                           (SequencerRequest*) NULL);
1913329Sstever@eecs.umich.edu
1927058Snate@binkert.org    if ((request_type == RubyRequestType_ST) ||
1937058Snate@binkert.org        (request_type == RubyRequestType_RMW_Read) ||
1947058Snate@binkert.org        (request_type == RubyRequestType_RMW_Write) ||
1957058Snate@binkert.org        (request_type == RubyRequestType_Load_Linked) ||
196396SN/A        (request_type == RubyRequestType_Store_Conditional) ||
1977058Snate@binkert.org        (request_type == RubyRequestType_Locked_RMW_Read) ||
1987058Snate@binkert.org        (request_type == RubyRequestType_Locked_RMW_Write) ||
1997058Snate@binkert.org        (request_type == RubyRequestType_FLUSH)) {
2007058Snate@binkert.org
2013329Sstever@eecs.umich.edu        // Check if there is any outstanding read request for the same
2027058Snate@binkert.org        // cache line.
2037058Snate@binkert.org        if (m_readRequestTable.count(line_addr) > 0) {
2047058Snate@binkert.org            m_store_waiting_on_load++;
2057058Snate@binkert.org            return RequestStatus_Aliased;
2067058Snate@binkert.org        }
207396SN/A
2087058Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2097058Snate@binkert.org            m_writeRequestTable.insert(default_entry);
2107058Snate@binkert.org        if (r.second) {
2117058Snate@binkert.org            RequestTable::iterator i = r.first;
212396SN/A            i->second = new SequencerRequest(pkt, request_type, curCycle());
2137058Snate@binkert.org            m_outstanding_count++;
2147058Snate@binkert.org        } else {
215396SN/A          // There is an outstanding write request for the cache line
2167058Snate@binkert.org          m_store_waiting_on_store++;
2177058Snate@binkert.org          return RequestStatus_Aliased;
2187058Snate@binkert.org        }
2197058Snate@binkert.org    } else {
220396SN/A        // Check if there is any outstanding write request for the same
2217058Snate@binkert.org        // cache line.
2227058Snate@binkert.org        if (m_writeRequestTable.count(line_addr) > 0) {
2237058Snate@binkert.org            m_load_waiting_on_store++;
224396SN/A            return RequestStatus_Aliased;
2257058Snate@binkert.org        }
2267058Snate@binkert.org
2277058Snate@binkert.org        pair<RequestTable::iterator, bool> r =
2284075Sbinkertn@umich.edu            m_readRequestTable.insert(default_entry);
2297058Snate@binkert.org
2307058Snate@binkert.org        if (r.second) {
2315501Snate@binkert.org            RequestTable::iterator i = r.first;
2327058Snate@binkert.org            i->second = new SequencerRequest(pkt, request_type, curCycle());
2337058Snate@binkert.org            m_outstanding_count++;
2347058Snate@binkert.org        } else {
2357058Snate@binkert.org            // There is an outstanding read request for the cache line
2367058Snate@binkert.org            m_load_waiting_on_load++;
2377058Snate@binkert.org            return RequestStatus_Aliased;
238396SN/A        }
2392SN/A    }
2402SN/A
2412SN/A    m_outstandReqHist.sample(m_outstanding_count);
2422SN/A    assert(m_outstanding_count ==
2438581Ssteve.reinhardt@amd.com        (m_writeRequestTable.size() + m_readRequestTable.size()));
2448581Ssteve.reinhardt@amd.com
2458581Ssteve.reinhardt@amd.com    return RequestStatus_Ready;
246224SN/A}
2478581Ssteve.reinhardt@amd.com
2484016Sstever@eecs.umich.eduvoid
2495501Snate@binkert.orgSequencer::markRemoved()
2505605Snate@binkert.org{
2515501Snate@binkert.org    m_outstanding_count--;
2525501Snate@binkert.org    assert(m_outstanding_count ==
2537823Ssteve.reinhardt@amd.com           m_writeRequestTable.size() + m_readRequestTable.size());
2545501Snate@binkert.org}
2554016Sstever@eecs.umich.edu
256224SN/Avoid
257224SN/ASequencer::invalidateSC(Addr address)
2585768Snate@binkert.org{
2595768Snate@binkert.org    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
260265SN/A    // The controller has lost the coherence permissions, hence the lock
2615501Snate@binkert.org    // on the cache line maintained by the cache should be cleared.
2625501Snate@binkert.org    if (e && e->isLocked(m_version)) {
2635501Snate@binkert.org        e->clearLocked();
2645501Snate@binkert.org    }
2655501Snate@binkert.org}
2665501Snate@binkert.org
2675501Snate@binkert.orgbool
2685501Snate@binkert.orgSequencer::handleLlsc(Addr address, SequencerRequest* request)
2695501Snate@binkert.org{
2705501Snate@binkert.org    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
2715501Snate@binkert.org    if (!e)
2725501Snate@binkert.org        return true;
2735501Snate@binkert.org
2745501Snate@binkert.org    // The success flag indicates whether the LLSC operation was successful.
2755501Snate@binkert.org    // LL ops will always succeed, but SC may fail if the cache line is no
2765501Snate@binkert.org    // longer locked.
2775501Snate@binkert.org    bool success = true;
2785501Snate@binkert.org    if (request->m_type == RubyRequestType_Store_Conditional) {
2795501Snate@binkert.org        if (!e->isLocked(m_version)) {
2805501Snate@binkert.org            //
2815501Snate@binkert.org            // For failed SC requests, indicate the failure to the cpu by
2822SN/A            // setting the extra data to zero.
2835769Snate@binkert.org            //
2842SN/A            request->pkt->req->setExtraData(0);
2852SN/A            success = false;
2865769Snate@binkert.org        } else {
2872SN/A            //
2882SN/A            // For successful SC requests, indicate the success to the cpu by
2895769Snate@binkert.org            // setting the extra data to one.
2902SN/A            //
2912667Sstever@eecs.umich.edu            request->pkt->req->setExtraData(1);
2925769Snate@binkert.org        }
2932667Sstever@eecs.umich.edu        //
2942SN/A        // Independent of success, all SC operations must clear the lock
2952SN/A        //
2962SN/A        e->clearLocked();
2972SN/A    } else if (request->m_type == RubyRequestType_Load_Linked) {
2987058Snate@binkert.org        //
2992SN/A        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
3005605Snate@binkert.org        // previously locked cache lines?
3015501Snate@binkert.org        //
3025501Snate@binkert.org        e->setLocked(m_version);
3032SN/A    } else if (e->isLocked(m_version)) {
3045501Snate@binkert.org        //
3055501Snate@binkert.org        // Normal writes should clear the locked address
3065501Snate@binkert.org        //
3072SN/A        e->clearLocked();
3082SN/A    }
3092SN/A    return success;
310224SN/A}
311224SN/A
312237SN/Avoid
3135605Snate@binkert.orgSequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
314571SN/A                             const MachineType respondingMach,
315571SN/A                             bool isExternalHit, Cycles issuedTime,
3167005Snate@binkert.org                             Cycles initialRequestTime,
3177005Snate@binkert.org                             Cycles forwardRequestTime,
3187005Snate@binkert.org                             Cycles firstResponseTime, Cycles completionTime)
3197005Snate@binkert.org{
3207005Snate@binkert.org    m_latencyHist.sample(cycles);
3217005Snate@binkert.org    m_typeLatencyHist[type]->sample(cycles);
3227005Snate@binkert.org
3237005Snate@binkert.org    if (isExternalHit) {
3247005Snate@binkert.org        m_missLatencyHist.sample(cycles);
3257005Snate@binkert.org        m_missTypeLatencyHist[type]->sample(cycles);
3267005Snate@binkert.org
3277005Snate@binkert.org        if (respondingMach != MachineType_NUM) {
3287005Snate@binkert.org            m_missMachLatencyHist[respondingMach]->sample(cycles);
3297005Snate@binkert.org            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3307005Snate@binkert.org
3317005Snate@binkert.org            if ((issuedTime <= initialRequestTime) &&
3327005Snate@binkert.org                (initialRequestTime <= forwardRequestTime) &&
3337005Snate@binkert.org                (forwardRequestTime <= firstResponseTime) &&
3347005Snate@binkert.org                (firstResponseTime <= completionTime)) {
3357005Snate@binkert.org
3367005Snate@binkert.org                m_IssueToInitialDelayHist[respondingMach]->sample(
3377005Snate@binkert.org                    initialRequestTime - issuedTime);
3387005Snate@binkert.org                m_InitialToForwardDelayHist[respondingMach]->sample(
3397005Snate@binkert.org                    forwardRequestTime - initialRequestTime);
3407005Snate@binkert.org                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
3417005Snate@binkert.org                    firstResponseTime - forwardRequestTime);
3427005Snate@binkert.org                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
3437005Snate@binkert.org                    completionTime - firstResponseTime);
3447005Snate@binkert.org            } else {
3457005Snate@binkert.org                m_IncompleteTimes[respondingMach]++;
3467005Snate@binkert.org            }
3477005Snate@binkert.org        }
3487005Snate@binkert.org    } else {
3497005Snate@binkert.org        m_hitLatencyHist.sample(cycles);
3507005Snate@binkert.org        m_hitTypeLatencyHist[type]->sample(cycles);
3517005Snate@binkert.org
3527005Snate@binkert.org        if (respondingMach != MachineType_NUM) {
3537005Snate@binkert.org            m_hitMachLatencyHist[respondingMach]->sample(cycles);
3547005Snate@binkert.org            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
3557005Snate@binkert.org        }
3567005Snate@binkert.org    }
3572SN/A}
3582SN/A
3592SN/Avoid
360395SN/ASequencer::writeCallback(Addr address, DataBlock& data,
3612SN/A                         const bool externalHit, const MachineType mach,
3625605Snate@binkert.org                         const Cycles initialRequestTime,
363265SN/A                         const Cycles forwardRequestTime,
3642SN/A                         const Cycles firstResponseTime)
3652SN/A{
3662SN/A    assert(address == makeLineAddress(address));
3672SN/A    assert(m_writeRequestTable.count(makeLineAddress(address)));
3682SN/A
3697063Snate@binkert.org    RequestTable::iterator i = m_writeRequestTable.find(address);
3707063Snate@binkert.org    assert(i != m_writeRequestTable.end());
3717063Snate@binkert.org    SequencerRequest* request = i->second;
3722SN/A
3737063Snate@binkert.org    m_writeRequestTable.erase(i);
3742SN/A    markRemoved();
375512SN/A
376265SN/A    assert((request->m_type == RubyRequestType_ST) ||
3772SN/A           (request->m_type == RubyRequestType_ATOMIC) ||
3785738Snate@binkert.org           (request->m_type == RubyRequestType_RMW_Read) ||
3795738Snate@binkert.org           (request->m_type == RubyRequestType_RMW_Write) ||
3805738Snate@binkert.org           (request->m_type == RubyRequestType_Load_Linked) ||
3812SN/A           (request->m_type == RubyRequestType_Store_Conditional) ||
3825501Snate@binkert.org           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
3832667Sstever@eecs.umich.edu           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
3842SN/A           (request->m_type == RubyRequestType_FLUSH));
3852SN/A
3862SN/A    //
3872SN/A    // For Alpha, properly handle LL, SC, and write requests with respect to
3885501Snate@binkert.org    // locked cache blocks.
3895501Snate@binkert.org    //
3905501Snate@binkert.org    // Not valid for Garnet_standalone protocl
3912SN/A    //
3922SN/A    bool success = true;
3932SN/A    if (!m_runningGarnetStandalone)
3942SN/A        success = handleLlsc(address, request);
3951634SN/A
3961634SN/A    // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
3971634SN/A    // address variable here is assumed to be a line address, so when
3981634SN/A    // blocking buffers, must check line addresses.
3991634SN/A    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
4002SN/A        // blockOnQueue blocks all first-level cache controller queues
4012SN/A        // waiting on memory accesses for the specified address that go to
4022SN/A        // the specified queue. In this case, a Locked_RMW_Write must go to
4032SN/A        // the mandatory_q before unblocking the first-level controller.
4042SN/A        // This will block standard loads, stores, ifetches, etc.
4055501Snate@binkert.org        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
4062SN/A    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
4075501Snate@binkert.org        m_controller->unblock(address);
4082SN/A    }
4095502Snate@binkert.org
4105502Snate@binkert.org    hitCallback(request, data, success, mach, externalHit,
4118648Snilay@cs.wisc.edu                initialRequestTime, forwardRequestTime, firstResponseTime);
4128648Snilay@cs.wisc.edu}
4138648Snilay@cs.wisc.edu
4148648Snilay@cs.wisc.eduvoid
4158648Snilay@cs.wisc.eduSequencer::readCallback(Addr address, DataBlock& data,
4168648Snilay@cs.wisc.edu                        bool externalHit, const MachineType mach,
4178648Snilay@cs.wisc.edu                        Cycles initialRequestTime,
4188648Snilay@cs.wisc.edu                        Cycles forwardRequestTime,
4198648Snilay@cs.wisc.edu                        Cycles firstResponseTime)
4208648Snilay@cs.wisc.edu{
4215605Snate@binkert.org    assert(address == makeLineAddress(address));
422217SN/A    assert(m_readRequestTable.count(makeLineAddress(address)));
423237SN/A
4245605Snate@binkert.org    RequestTable::iterator i = m_readRequestTable.find(address);
4252SN/A    assert(i != m_readRequestTable.end());
4262SN/A    SequencerRequest* request = i->second;
4275605Snate@binkert.org
4285605Snate@binkert.org    m_readRequestTable.erase(i);
4295605Snate@binkert.org    markRemoved();
4305605Snate@binkert.org
4315605Snate@binkert.org    assert((request->m_type == RubyRequestType_LD) ||
4325605Snate@binkert.org           (request->m_type == RubyRequestType_IFETCH));
4332SN/A
4345605Snate@binkert.org    hitCallback(request, data, true, mach, externalHit,
4355605Snate@binkert.org                initialRequestTime, forwardRequestTime, firstResponseTime);
4365605Snate@binkert.org}
4375605Snate@binkert.org
4382SN/Avoid
4395605Snate@binkert.orgSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
4405605Snate@binkert.org                       bool llscSuccess,
4415605Snate@binkert.org                       const MachineType mach, const bool externalHit,
4425605Snate@binkert.org                       const Cycles initialRequestTime,
4435605Snate@binkert.org                       const Cycles forwardRequestTime,
4445605Snate@binkert.org                       const Cycles firstResponseTime)
4457060Snate@binkert.org{
4467060Snate@binkert.org    warn_once("Replacement policy updates recently became the responsibility "
4477060Snate@binkert.org              "of SLICC state machines. Make sure to setMRU() near callbacks "
4487060Snate@binkert.org              "in .sm files!");
4497060Snate@binkert.org
4505605Snate@binkert.org    PacketPtr pkt = srequest->pkt;
4515605Snate@binkert.org    Addr request_address(pkt->getAddr());
4525605Snate@binkert.org    RubyRequestType type = srequest->m_type;
4535605Snate@binkert.org    Cycles issued_time = srequest->issue_time;
4545605Snate@binkert.org
4555605Snate@binkert.org    assert(curCycle() >= issued_time);
4565605Snate@binkert.org    Cycles total_latency = curCycle() - issued_time;
4575605Snate@binkert.org
4585605Snate@binkert.org    // Profile the latency for all demand accesses.
4595605Snate@binkert.org    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
4605605Snate@binkert.org                      initialRequestTime, forwardRequestTime,
4615605Snate@binkert.org                      firstResponseTime, curCycle());
4625605Snate@binkert.org
4635605Snate@binkert.org    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
4645605Snate@binkert.org             curTick(), m_version, "Seq",
4655605Snate@binkert.org             llscSuccess ? "Done" : "SC_Failed", "", "",
4665605Snate@binkert.org             printAddress(request_address), total_latency);
4675605Snate@binkert.org
4685605Snate@binkert.org    // update the data unless it is a non-data-carrying flush
4695605Snate@binkert.org    if (RubySystem::getWarmupEnabled()) {
4705605Snate@binkert.org        data.setData(pkt->getConstPtr<uint8_t>(),
4715605Snate@binkert.org                     getOffset(request_address), pkt->getSize());
4725605Snate@binkert.org    } else if (!pkt->isFlush()) {
4735605Snate@binkert.org        if ((type == RubyRequestType_LD) ||
4745605Snate@binkert.org            (type == RubyRequestType_IFETCH) ||
4755605Snate@binkert.org            (type == RubyRequestType_RMW_Read) ||
4765605Snate@binkert.org            (type == RubyRequestType_Locked_RMW_Read) ||
4775605Snate@binkert.org            (type == RubyRequestType_Load_Linked)) {
4785605Snate@binkert.org            memcpy(pkt->getPtr<uint8_t>(),
4795605Snate@binkert.org                   data.getData(getOffset(request_address), pkt->getSize()),
4805605Snate@binkert.org                   pkt->getSize());
4815605Snate@binkert.org            DPRINTF(RubySequencer, "read data %s\n", data);
4825605Snate@binkert.org        } else if (pkt->req->isSwap()) {
4835605Snate@binkert.org            std::vector<uint8_t> overwrite_val(pkt->getSize());
4845605Snate@binkert.org            memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
4855605Snate@binkert.org                   pkt->getSize());
4865605Snate@binkert.org            memcpy(pkt->getPtr<uint8_t>(),
4872SN/A                   data.getData(getOffset(request_address), pkt->getSize()),
4885605Snate@binkert.org                   pkt->getSize());
4892SN/A            data.setData(&overwrite_val[0],
4908186Sksewell@umich.edu                         getOffset(request_address), pkt->getSize());
4918186Sksewell@umich.edu            DPRINTF(RubySequencer, "swap data %s\n", data);
4927823Ssteve.reinhardt@amd.com        } else {
4935605Snate@binkert.org            data.setData(pkt->getConstPtr<uint8_t>(),
4947059Snate@binkert.org                         getOffset(request_address), pkt->getSize());
4955605Snate@binkert.org            DPRINTF(RubySequencer, "set data %s\n", data);
4965605Snate@binkert.org        }
4975605Snate@binkert.org    }
4985769Snate@binkert.org
4995605Snate@binkert.org    // If using the RubyTester, update the RubyTester sender state's
5005769Snate@binkert.org    // subBlock with the recieved data.  The tester will later access
5015605Snate@binkert.org    // this state.
5025769Snate@binkert.org    if (m_usingRubyTester) {
5035605Snate@binkert.org        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
5045605Snate@binkert.org                pkt->cmdString(), pkt->getAddr());
5055605Snate@binkert.org        RubyTester::SenderState* testerSenderState =
5062SN/A            pkt->findNextSenderState<RubyTester::SenderState>();
5072SN/A        assert(testerSenderState);
5082SN/A        testerSenderState->subBlock.mergeFrom(data);
5095605Snate@binkert.org    }
5102SN/A
5115605Snate@binkert.org    delete srequest;
5127059Snate@binkert.org
5135605Snate@binkert.org    RubySystem *rs = m_ruby_system;
5145605Snate@binkert.org    if (RubySystem::getWarmupEnabled()) {
5155605Snate@binkert.org        assert(pkt->req);
5165769Snate@binkert.org        delete pkt->req;
5175769Snate@binkert.org        delete pkt;
5185605Snate@binkert.org        rs->m_cache_recorder->enqueueNextFetchRequest();
5195769Snate@binkert.org    } else if (RubySystem::getCooldownEnabled()) {
5205605Snate@binkert.org        delete pkt;
5215605Snate@binkert.org        rs->m_cache_recorder->enqueueNextFlushRequest();
5225605Snate@binkert.org    } else {
5235605Snate@binkert.org        ruby_hit_callback(pkt);
5242SN/A        testDrainComplete();
5252SN/A    }
5262SN/A}
5275605Snate@binkert.org
5282SN/Abool
5298186Sksewell@umich.eduSequencer::empty() const
5308186Sksewell@umich.edu{
5318186Sksewell@umich.edu    return m_writeRequestTable.empty() && m_readRequestTable.empty();
5325605Snate@binkert.org}
5337059Snate@binkert.org
5345605Snate@binkert.orgRequestStatus
5355605Snate@binkert.orgSequencer::makeRequest(PacketPtr pkt)
5365605Snate@binkert.org{
5375605Snate@binkert.org    if (m_outstanding_count >= m_max_outstanding_requests) {
5385605Snate@binkert.org        return RequestStatus_BufferFull;
5395605Snate@binkert.org    }
5405769Snate@binkert.org
5415769Snate@binkert.org    RubyRequestType primary_type = RubyRequestType_NULL;
5425605Snate@binkert.org    RubyRequestType secondary_type = RubyRequestType_NULL;
5435769Snate@binkert.org
5445605Snate@binkert.org    if (pkt->isLLSC()) {
5455769Snate@binkert.org        //
5465605Snate@binkert.org        // Alpha LL/SC instructions need to be handled carefully by the cache
5475605Snate@binkert.org        // coherence protocol to ensure they follow the proper semantics. In
5485605Snate@binkert.org        // particular, by identifying the operations as atomic, the protocol
5492SN/A        // should understand that migratory sharing optimizations should not
5502SN/A        // be performed (i.e. a load between the LL and SC should not steal
5517005Snate@binkert.org        // away exclusive permission).
5527005Snate@binkert.org        //
5537005Snate@binkert.org        if (pkt->isWrite()) {
5545502Snate@binkert.org            DPRINTF(RubySequencer, "Issuing SC\n");
5557005Snate@binkert.org            primary_type = RubyRequestType_Store_Conditional;
5567005Snate@binkert.org        } else {
5577005Snate@binkert.org            DPRINTF(RubySequencer, "Issuing LL\n");
5587005Snate@binkert.org            assert(pkt->isRead());
5597005Snate@binkert.org            primary_type = RubyRequestType_Load_Linked;
5607005Snate@binkert.org        }
5617005Snate@binkert.org        secondary_type = RubyRequestType_ATOMIC;
5628581Ssteve.reinhardt@amd.com    } else if (pkt->req->isLockedRMW()) {
5638581Ssteve.reinhardt@amd.com        //
5647005Snate@binkert.org        // x86 locked instructions are translated to store cache coherence
5657005Snate@binkert.org        // requests because these requests should always be treated as read
5667005Snate@binkert.org        // exclusive operations and should leverage any migratory sharing
5677005Snate@binkert.org        // optimization built into the protocol.
5687005Snate@binkert.org        //
5695502Snate@binkert.org        if (pkt->isWrite()) {
5705502Snate@binkert.org            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
5717005Snate@binkert.org            primary_type = RubyRequestType_Locked_RMW_Write;
5727005Snate@binkert.org        } else {
5735502Snate@binkert.org            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
5747005Snate@binkert.org            assert(pkt->isRead());
5757005Snate@binkert.org            primary_type = RubyRequestType_Locked_RMW_Read;
5765502Snate@binkert.org        }
5777005Snate@binkert.org        secondary_type = RubyRequestType_ST;
5787005Snate@binkert.org    } else {
5797005Snate@binkert.org        //
5807005Snate@binkert.org        // To support SwapReq, we need to check isWrite() first: a SwapReq
5817005Snate@binkert.org        // should always be treated like a write, but since a SwapReq implies
5827005Snate@binkert.org        // both isWrite() and isRead() are true, check isWrite() first here.
5837005Snate@binkert.org        //
5845502Snate@binkert.org        if (pkt->isWrite()) {
5857066Snate@binkert.org            //
5867066Snate@binkert.org            // Note: M5 packets do not differentiate ST from RMW_Write
5877066Snate@binkert.org            //
5887066Snate@binkert.org            primary_type = secondary_type = RubyRequestType_ST;
5897066Snate@binkert.org        } else if (pkt->isRead()) {
5907066Snate@binkert.org            if (pkt->req->isInstFetch()) {
5917066Snate@binkert.org                primary_type = secondary_type = RubyRequestType_IFETCH;
5927005Snate@binkert.org            } else {
5935502Snate@binkert.org                bool storeCheck = false;
5947005Snate@binkert.org                // only X86 need the store check
5957005Snate@binkert.org                if (system->getArch() == Arch::X86ISA) {
5967005Snate@binkert.org                    uint32_t flags = pkt->req->getFlags();
5977005Snate@binkert.org                    storeCheck = flags &
5987005Snate@binkert.org                        (X86ISA::StoreCheck << X86ISA::FlagShift);
5997005Snate@binkert.org                }
6007005Snate@binkert.org                if (storeCheck) {
6017005Snate@binkert.org                    primary_type = RubyRequestType_RMW_Read;
6025605Snate@binkert.org                    secondary_type = RubyRequestType_ST;
6032SN/A                } else {
6041354SN/A                    primary_type = secondary_type = RubyRequestType_LD;
605                }
606            }
607        } else if (pkt->isFlush()) {
608          primary_type = secondary_type = RubyRequestType_FLUSH;
609        } else {
610            panic("Unsupported ruby packet type\n");
611        }
612    }
613
614    RequestStatus status = insertRequest(pkt, primary_type);
615    if (status != RequestStatus_Ready)
616        return status;
617
618    issueRequest(pkt, secondary_type);
619
620    // TODO: issue hardware prefetches here
621    return RequestStatus_Issued;
622}
623
624void
625Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
626{
627    assert(pkt != NULL);
628    ContextID proc_id = pkt->req->hasContextId() ?
629        pkt->req->contextId() : InvalidContextID;
630
631    ContextID core_id = coreId();
632
633    // If valid, copy the pc to the ruby request
634    Addr pc = 0;
635    if (pkt->req->hasPC()) {
636        pc = pkt->req->getPC();
637    }
638
639    // check if the packet has data as for example prefetch and flush
640    // requests do not
641    std::shared_ptr<RubyRequest> msg =
642        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
643                                      pkt->isFlush() ?
644                                      nullptr : pkt->getPtr<uint8_t>(),
645                                      pkt->getSize(), pc, secondary_type,
646                                      RubyAccessMode_Supervisor, pkt,
647                                      PrefetchBit_No, proc_id, core_id);
648
649    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
650            curTick(), m_version, "Seq", "Begin", "", "",
651            printAddress(msg->getPhysicalAddress()),
652            RubyRequestType_to_string(secondary_type));
653
654    // The Sequencer currently assesses instruction and data cache hit latency
655    // for the top-level caches at the beginning of a memory access.
656    // TODO: Eventually, this latency should be moved to represent the actual
657    // cache access latency portion of the memory access. This will require
658    // changing cache controller protocol files to assess the latency on the
659    // access response path.
660    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
661    if (secondary_type == RubyRequestType_IFETCH)
662        latency = m_inst_cache_hit_latency;
663    else
664        latency = m_data_cache_hit_latency;
665
666    // Send the message to the cache controller
667    assert(latency > 0);
668
669    assert(m_mandatory_q_ptr != NULL);
670    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
671}
672
673template <class KEY, class VALUE>
674std::ostream &
675operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
676{
677    auto i = map.begin();
678    auto end = map.end();
679
680    out << "[";
681    for (; i != end; ++i)
682        out << " " << i->first << "=" << i->second;
683    out << " ]";
684
685    return out;
686}
687
688void
689Sequencer::print(ostream& out) const
690{
691    out << "[Sequencer: " << m_version
692        << ", outstanding requests: " << m_outstanding_count
693        << ", read request table: " << m_readRequestTable
694        << ", write request table: " << m_writeRequestTable
695        << "]";
696}
697
698// this can be called from setState whenever coherence permissions are
699// upgraded when invoked, coherence violations will be checked for the
700// given block
701void
702Sequencer::checkCoherence(Addr addr)
703{
704#ifdef CHECK_COHERENCE
705    m_ruby_system->checkGlobalCoherenceInvariant(addr);
706#endif
707}
708
709void
710Sequencer::recordRequestType(SequencerRequestType requestType) {
711    DPRINTF(RubyStats, "Recorded statistic: %s\n",
712            SequencerRequestType_to_string(requestType));
713}
714
715
716void
717Sequencer::evictionCallback(Addr address)
718{
719    ruby_eviction_callback(address);
720}
721
722void
723Sequencer::regStats()
724{
725    RubyPort::regStats();
726
727    m_store_waiting_on_load
728        .name(name() + ".store_waiting_on_load")
729        .desc("Number of times a store aliased with a pending load")
730        .flags(Stats::nozero);
731    m_store_waiting_on_store
732        .name(name() + ".store_waiting_on_store")
733        .desc("Number of times a store aliased with a pending store")
734        .flags(Stats::nozero);
735    m_load_waiting_on_load
736        .name(name() + ".load_waiting_on_load")
737        .desc("Number of times a load aliased with a pending load")
738        .flags(Stats::nozero);
739    m_load_waiting_on_store
740        .name(name() + ".load_waiting_on_store")
741        .desc("Number of times a load aliased with a pending store")
742        .flags(Stats::nozero);
743
744    // These statistical variables are not for display.
745    // The profiler will collate these across different
746    // sequencers and display those collated statistics.
747    m_outstandReqHist.init(10);
748    m_latencyHist.init(10);
749    m_hitLatencyHist.init(10);
750    m_missLatencyHist.init(10);
751
752    for (int i = 0; i < RubyRequestType_NUM; i++) {
753        m_typeLatencyHist.push_back(new Stats::Histogram());
754        m_typeLatencyHist[i]->init(10);
755
756        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
757        m_hitTypeLatencyHist[i]->init(10);
758
759        m_missTypeLatencyHist.push_back(new Stats::Histogram());
760        m_missTypeLatencyHist[i]->init(10);
761    }
762
763    for (int i = 0; i < MachineType_NUM; i++) {
764        m_hitMachLatencyHist.push_back(new Stats::Histogram());
765        m_hitMachLatencyHist[i]->init(10);
766
767        m_missMachLatencyHist.push_back(new Stats::Histogram());
768        m_missMachLatencyHist[i]->init(10);
769
770        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
771        m_IssueToInitialDelayHist[i]->init(10);
772
773        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
774        m_InitialToForwardDelayHist[i]->init(10);
775
776        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
777        m_ForwardToFirstResponseDelayHist[i]->init(10);
778
779        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
780        m_FirstResponseToCompletionDelayHist[i]->init(10);
781    }
782
783    for (int i = 0; i < RubyRequestType_NUM; i++) {
784        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
785        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
786
787        for (int j = 0; j < MachineType_NUM; j++) {
788            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
789            m_hitTypeMachLatencyHist[i][j]->init(10);
790
791            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
792            m_missTypeMachLatencyHist[i][j]->init(10);
793        }
794    }
795}
796