RubyPort.cc revision 10961
16876Ssteve.reinhardt@amd.com/*
210089Sandreas.hansson@arm.com * Copyright (c) 2012-2013 ARM Limited
38922Swilliam.wang@arm.com * All rights reserved.
48922Swilliam.wang@arm.com *
58922Swilliam.wang@arm.com * The license below extends only to copyright in the software and shall
68922Swilliam.wang@arm.com * not be construed as granting a license to any other intellectual
78922Swilliam.wang@arm.com * property including but not limited to intellectual property relating
88922Swilliam.wang@arm.com * to a hardware implementation of the functionality of the software
98922Swilliam.wang@arm.com * licensed hereunder.  You may use the software subject to the license
108922Swilliam.wang@arm.com * terms below provided that you ensure that this notice is replicated
118922Swilliam.wang@arm.com * unmodified and in its entirety in all distributions of the software,
128922Swilliam.wang@arm.com * modified or unmodified, in source code or in binary form.
138922Swilliam.wang@arm.com *
146876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc.
158717Snilay@cs.wisc.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood
166876Ssteve.reinhardt@amd.com * All rights reserved.
176876Ssteve.reinhardt@amd.com *
186876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
196876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
206876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
216876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
226876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
236876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
246876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
256876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
266876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
276876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
286876Ssteve.reinhardt@amd.com *
296876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
306876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
316876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
346876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
396876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
406876Ssteve.reinhardt@amd.com */
416876Ssteve.reinhardt@amd.com
427632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
438688Snilay@cs.wisc.edu#include "debug/Config.hh"
449152Satgutier@umich.edu#include "debug/Drain.hh"
458232Snate@binkert.org#include "debug/Ruby.hh"
468436SBrad.Beckmann@amd.com#include "mem/protocol/AccessPermission.hh"
477039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
486285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh"
4910525Snilay@cs.wisc.edu#include "mem/simple_mem.hh"
5010117Snilay@cs.wisc.edu#include "sim/full_system.hh"
518923Sandreas.hansson@arm.com#include "sim/system.hh"
526285Snate@binkert.org
536876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p)
5410919Sbrandon.potter@amd.com    : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
5510919Sbrandon.potter@amd.com      m_controller(NULL), m_mandatory_q_ptr(NULL),
5610919Sbrandon.potter@amd.com      m_usingRubyTester(p->using_ruby_tester), system(p->system),
5710090Snilay@cs.wisc.edu      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
5810090Snilay@cs.wisc.edu      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
5910090Snilay@cs.wisc.edu      memMasterPort(csprintf("%s.mem-master-port", name()), this),
6010090Snilay@cs.wisc.edu      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
6110919Sbrandon.potter@amd.com                   p->ruby_system->getAccessBackingStore(), -1),
6210913Sandreas.sandberg@arm.com      gotAddrRanges(p->port_master_connection_count)
636876Ssteve.reinhardt@amd.com{
646876Ssteve.reinhardt@amd.com    assert(m_version != -1);
656876Ssteve.reinhardt@amd.com
668922Swilliam.wang@arm.com    // create the slave ports based on the number of connected ports
678922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
6810090Snilay@cs.wisc.edu        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
6910919Sbrandon.potter@amd.com            i), this, p->ruby_system->getAccessBackingStore(), i));
708922Swilliam.wang@arm.com    }
717039Snate@binkert.org
728922Swilliam.wang@arm.com    // create the master ports based on the number of connected ports
738922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
7410090Snilay@cs.wisc.edu        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
7510090Snilay@cs.wisc.edu            name(), i), this));
768922Swilliam.wang@arm.com    }
776876Ssteve.reinhardt@amd.com}
786876Ssteve.reinhardt@amd.com
797039Snate@binkert.orgvoid
807039Snate@binkert.orgRubyPort::init()
816882SBrad.Beckmann@amd.com{
826882SBrad.Beckmann@amd.com    assert(m_controller != NULL);
836882SBrad.Beckmann@amd.com    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
849508Snilay@cs.wisc.edu    m_mandatory_q_ptr->setSender(this);
856882SBrad.Beckmann@amd.com}
866882SBrad.Beckmann@amd.com
879294Sandreas.hansson@arm.comBaseMasterPort &
889294Sandreas.hansson@arm.comRubyPort::getMasterPort(const std::string &if_name, PortID idx)
896876Ssteve.reinhardt@amd.com{
9010090Snilay@cs.wisc.edu    if (if_name == "mem_master_port") {
9110090Snilay@cs.wisc.edu        return memMasterPort;
9210090Snilay@cs.wisc.edu    }
9310090Snilay@cs.wisc.edu
9410090Snilay@cs.wisc.edu    if (if_name == "pio_master_port") {
9510090Snilay@cs.wisc.edu        return pioMasterPort;
968922Swilliam.wang@arm.com    }
978922Swilliam.wang@arm.com
988839Sandreas.hansson@arm.com    // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
998839Sandreas.hansson@arm.com    // port
1008922Swilliam.wang@arm.com    if (if_name != "master") {
1018922Swilliam.wang@arm.com        // pass it along to our super class
1028922Swilliam.wang@arm.com        return MemObject::getMasterPort(if_name, idx);
1038922Swilliam.wang@arm.com    } else {
1049294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(master_ports.size())) {
1058922Swilliam.wang@arm.com            panic("RubyPort::getMasterPort: unknown index %d\n", idx);
1068922Swilliam.wang@arm.com        }
1078839Sandreas.hansson@arm.com
1088922Swilliam.wang@arm.com        return *master_ports[idx];
1098839Sandreas.hansson@arm.com    }
1108922Swilliam.wang@arm.com}
1118839Sandreas.hansson@arm.com
1129294Sandreas.hansson@arm.comBaseSlavePort &
1139294Sandreas.hansson@arm.comRubyPort::getSlavePort(const std::string &if_name, PortID idx)
1148922Swilliam.wang@arm.com{
11510090Snilay@cs.wisc.edu    if (if_name == "mem_slave_port") {
11610090Snilay@cs.wisc.edu        return memSlavePort;
11710090Snilay@cs.wisc.edu    }
11810090Snilay@cs.wisc.edu
11910090Snilay@cs.wisc.edu    if (if_name == "pio_slave_port")
12010090Snilay@cs.wisc.edu        return pioSlavePort;
12110090Snilay@cs.wisc.edu
1228922Swilliam.wang@arm.com    // used by the CPUs to connect the caches to the interconnect, and
1238922Swilliam.wang@arm.com    // for the x86 case also the interrupt master
1248922Swilliam.wang@arm.com    if (if_name != "slave") {
1258922Swilliam.wang@arm.com        // pass it along to our super class
1268922Swilliam.wang@arm.com        return MemObject::getSlavePort(if_name, idx);
1278922Swilliam.wang@arm.com    } else {
1289294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(slave_ports.size())) {
1298922Swilliam.wang@arm.com            panic("RubyPort::getSlavePort: unknown index %d\n", idx);
1308922Swilliam.wang@arm.com        }
1318922Swilliam.wang@arm.com
1328922Swilliam.wang@arm.com        return *slave_ports[idx];
1337039Snate@binkert.org    }
1346876Ssteve.reinhardt@amd.com}
1356882SBrad.Beckmann@amd.com
13610090Snilay@cs.wisc.eduRubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
1376882SBrad.Beckmann@amd.com                           RubyPort *_port)
13810713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
13910713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
1406882SBrad.Beckmann@amd.com{
14110090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
1426882SBrad.Beckmann@amd.com}
1436882SBrad.Beckmann@amd.com
14410090Snilay@cs.wisc.eduRubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
14510090Snilay@cs.wisc.edu                           RubyPort *_port)
14610090Snilay@cs.wisc.edu    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
14710090Snilay@cs.wisc.edu{
14810090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
14910090Snilay@cs.wisc.edu}
15010090Snilay@cs.wisc.edu
15110090Snilay@cs.wisc.eduRubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
15210090Snilay@cs.wisc.edu                           RubyPort *_port)
15310713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
15410713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
15510090Snilay@cs.wisc.edu{
15610090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
15710090Snilay@cs.wisc.edu}
15810090Snilay@cs.wisc.edu
15910090Snilay@cs.wisc.eduRubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
16010525Snilay@cs.wisc.edu                                     bool _access_backing_store, PortID id)
16110089Sandreas.hansson@arm.com    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
16210919Sbrandon.potter@amd.com      access_backing_store(_access_backing_store)
1636882SBrad.Beckmann@amd.com{
16410090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
1656882SBrad.Beckmann@amd.com}
1666882SBrad.Beckmann@amd.com
16710089Sandreas.hansson@arm.combool
16810090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
16910090Snilay@cs.wisc.edu{
17010919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
17110090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
17210090Snilay@cs.wisc.edu
17310090Snilay@cs.wisc.edu    // send next cycle
17410919Sbrandon.potter@amd.com    rp->pioSlavePort.schedTimingResp(
17510919Sbrandon.potter@amd.com            pkt, curTick() + rp->m_ruby_system->clockPeriod());
17610090Snilay@cs.wisc.edu    return true;
17710090Snilay@cs.wisc.edu}
17810090Snilay@cs.wisc.edu
17910090Snilay@cs.wisc.edubool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
18010089Sandreas.hansson@arm.com{
18110089Sandreas.hansson@arm.com    // got a response from a device
18210089Sandreas.hansson@arm.com    assert(pkt->isResponse());
1836882SBrad.Beckmann@amd.com
18410090Snilay@cs.wisc.edu    // First we must retrieve the request port from the sender State
18510090Snilay@cs.wisc.edu    RubyPort::SenderState *senderState =
18610090Snilay@cs.wisc.edu        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
18710090Snilay@cs.wisc.edu    MemSlavePort *port = senderState->port;
18810090Snilay@cs.wisc.edu    assert(port != NULL);
18910090Snilay@cs.wisc.edu    delete senderState;
1907039Snate@binkert.org
19110657Sandreas.hansson@arm.com    // In FS mode, ruby memory will receive pio responses from devices
19210657Sandreas.hansson@arm.com    // and it must forward these responses back to the particular CPU.
19310657Sandreas.hansson@arm.com    DPRINTF(RubyPort,  "Pio response for address %#x, going to %s\n",
19410657Sandreas.hansson@arm.com            pkt->getAddr(), port->name());
19510657Sandreas.hansson@arm.com
19610089Sandreas.hansson@arm.com    // attempt to send the response in the next cycle
19710919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
19810919Sbrandon.potter@amd.com    port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod());
1997039Snate@binkert.org
2006882SBrad.Beckmann@amd.com    return true;
2016882SBrad.Beckmann@amd.com}
2026882SBrad.Beckmann@amd.com
2036882SBrad.Beckmann@amd.combool
20410090Snilay@cs.wisc.eduRubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
2056882SBrad.Beckmann@amd.com{
20610090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
20710090Snilay@cs.wisc.edu
20810090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
20910090Snilay@cs.wisc.edu        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
21010090Snilay@cs.wisc.edu        for (auto it = l.begin(); it != l.end(); ++it) {
21110090Snilay@cs.wisc.edu            if (it->contains(pkt->getAddr())) {
21210412Sandreas.hansson@arm.com                // generally it is not safe to assume success here as
21310412Sandreas.hansson@arm.com                // the port could be blocked
21410412Sandreas.hansson@arm.com                bool M5_VAR_USED success =
21510412Sandreas.hansson@arm.com                    ruby_port->master_ports[i]->sendTimingReq(pkt);
21610412Sandreas.hansson@arm.com                assert(success);
21710090Snilay@cs.wisc.edu                return true;
21810090Snilay@cs.wisc.edu            }
21910090Snilay@cs.wisc.edu        }
22010090Snilay@cs.wisc.edu    }
22110090Snilay@cs.wisc.edu    panic("Should never reach here!\n");
22210090Snilay@cs.wisc.edu}
22310090Snilay@cs.wisc.edu
22410090Snilay@cs.wisc.edubool
22510090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
22610090Snilay@cs.wisc.edu{
22710090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
22810090Snilay@cs.wisc.edu            pkt->getAddr(), id);
22910090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2306882SBrad.Beckmann@amd.com
2319662Sandreas.hansson@arm.com    if (pkt->memInhibitAsserted())
2329662Sandreas.hansson@arm.com        panic("RubyPort should never see an inhibited request\n");
2336882SBrad.Beckmann@amd.com
2346882SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2356882SBrad.Beckmann@amd.com    // pio port.
2366882SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
23710090Snilay@cs.wisc.edu        assert(ruby_port->memMasterPort.isConnected());
23810090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Request address %#x assumed to be a pio address\n",
2396922SBrad.Beckmann@amd.com                pkt->getAddr());
2406882SBrad.Beckmann@amd.com
24110090Snilay@cs.wisc.edu        // Save the port in the sender state object to be used later to
24210090Snilay@cs.wisc.edu        // route the response
24310090Snilay@cs.wisc.edu        pkt->pushSenderState(new SenderState(this));
24410090Snilay@cs.wisc.edu
2459163Sandreas.hansson@arm.com        // send next cycle
24610919Sbrandon.potter@amd.com        RubySystem *rs = ruby_port->m_ruby_system;
24710090Snilay@cs.wisc.edu        ruby_port->memMasterPort.schedTimingReq(pkt,
24810919Sbrandon.potter@amd.com            curTick() + rs->clockPeriod());
2499163Sandreas.hansson@arm.com        return true;
2506882SBrad.Beckmann@amd.com    }
2516882SBrad.Beckmann@amd.com
2528615Snilay@cs.wisc.edu    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
2538615Snilay@cs.wisc.edu           RubySystem::getBlockSizeBytes());
2547906SBrad.Beckmann@amd.com
2556882SBrad.Beckmann@amd.com    // Submit the ruby request
2568615Snilay@cs.wisc.edu    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
2577023SBrad.Beckmann@amd.com
2587550SBrad.Beckmann@amd.com    // If the request successfully issued then we should return true.
25910089Sandreas.hansson@arm.com    // Otherwise, we need to tell the port to retry at a later point
26010089Sandreas.hansson@arm.com    // and return false.
2617550SBrad.Beckmann@amd.com    if (requestStatus == RequestStatus_Issued) {
26210657Sandreas.hansson@arm.com        // Save the port in the sender state object to be used later to
26310657Sandreas.hansson@arm.com        // route the response
26410657Sandreas.hansson@arm.com        pkt->pushSenderState(new SenderState(this));
26510657Sandreas.hansson@arm.com
26610089Sandreas.hansson@arm.com        DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
26710089Sandreas.hansson@arm.com                pkt->getAddr());
2686922SBrad.Beckmann@amd.com        return true;
2696882SBrad.Beckmann@amd.com    }
2707023SBrad.Beckmann@amd.com
2717910SBrad.Beckmann@amd.com    //
27210919Sbrandon.potter@amd.com    // Unless one is using the ruby tester, record the stalled M5 port for
2737910SBrad.Beckmann@amd.com    // later retry when the sequencer becomes free.
2747910SBrad.Beckmann@amd.com    //
2757910SBrad.Beckmann@amd.com    if (!ruby_port->m_usingRubyTester) {
2767910SBrad.Beckmann@amd.com        ruby_port->addToRetryList(this);
2777910SBrad.Beckmann@amd.com    }
2787910SBrad.Beckmann@amd.com
27910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Request for address %#x did not issued because %s\n",
2807039Snate@binkert.org            pkt->getAddr(), RequestStatus_to_string(requestStatus));
2817039Snate@binkert.org
2826922SBrad.Beckmann@amd.com    return false;
2836882SBrad.Beckmann@amd.com}
2846882SBrad.Beckmann@amd.com
2858436SBrad.Beckmann@amd.comvoid
28610090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
2878436SBrad.Beckmann@amd.com{
28810090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
2898436SBrad.Beckmann@amd.com
29010919Sbrandon.potter@amd.com    RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
29110919Sbrandon.potter@amd.com    RubySystem *rs = rp->m_ruby_system;
29210919Sbrandon.potter@amd.com
2938436SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2948436SBrad.Beckmann@amd.com    // pio port.
2958436SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
29610919Sbrandon.potter@amd.com        assert(rp->memMasterPort.isConnected());
29710090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
29810090Snilay@cs.wisc.edu        panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
2998436SBrad.Beckmann@amd.com    }
3008436SBrad.Beckmann@amd.com
3018436SBrad.Beckmann@amd.com    assert(pkt->getAddr() + pkt->getSize() <=
3028436SBrad.Beckmann@amd.com                line_address(Address(pkt->getAddr())).getAddress() +
3038436SBrad.Beckmann@amd.com                RubySystem::getBlockSizeBytes());
3048436SBrad.Beckmann@amd.com
30510525Snilay@cs.wisc.edu    if (access_backing_store) {
3068436SBrad.Beckmann@amd.com        // The attached physmem contains the official version of data.
3078436SBrad.Beckmann@amd.com        // The following command performs the real functional access.
3088436SBrad.Beckmann@amd.com        // This line should be removed once Ruby supplies the official version
3098436SBrad.Beckmann@amd.com        // of data.
31010919Sbrandon.potter@amd.com        rs->getPhysMem()->functionalAccess(pkt);
31110706Spower.jg@gmail.com    } else {
31210706Spower.jg@gmail.com        bool accessSucceeded = false;
31310706Spower.jg@gmail.com        bool needsResponse = pkt->needsResponse();
31410706Spower.jg@gmail.com
31510706Spower.jg@gmail.com        // Do the functional access on ruby memory
31610706Spower.jg@gmail.com        if (pkt->isRead()) {
31710919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalRead(pkt);
31810706Spower.jg@gmail.com        } else if (pkt->isWrite()) {
31910919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalWrite(pkt);
32010706Spower.jg@gmail.com        } else {
32110706Spower.jg@gmail.com            panic("Unsupported functional command %s\n", pkt->cmdString());
32210706Spower.jg@gmail.com        }
32310706Spower.jg@gmail.com
32410706Spower.jg@gmail.com        // Unless the requester explicitly said otherwise, generate an error if
32510706Spower.jg@gmail.com        // the functional request failed
32610706Spower.jg@gmail.com        if (!accessSucceeded && !pkt->suppressFuncError()) {
32710706Spower.jg@gmail.com            fatal("Ruby functional %s failed for address %#x\n",
32810706Spower.jg@gmail.com                  pkt->isWrite() ? "write" : "read", pkt->getAddr());
32910706Spower.jg@gmail.com        }
33010706Spower.jg@gmail.com
33110706Spower.jg@gmail.com        // turn packet around to go back to requester if response expected
33210706Spower.jg@gmail.com        if (needsResponse) {
33310706Spower.jg@gmail.com            pkt->setFunctionalResponseStatus(accessSucceeded);
33410706Spower.jg@gmail.com        }
33510706Spower.jg@gmail.com
33610706Spower.jg@gmail.com        DPRINTF(RubyPort, "Functional access %s!\n",
33710706Spower.jg@gmail.com                accessSucceeded ? "successful":"failed");
3388436SBrad.Beckmann@amd.com    }
3398436SBrad.Beckmann@amd.com}
3408436SBrad.Beckmann@amd.com
3416882SBrad.Beckmann@amd.comvoid
3426922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt)
3436882SBrad.Beckmann@amd.com{
34410089Sandreas.hansson@arm.com    DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
34510089Sandreas.hansson@arm.com            pkt->getAddr());
3467039Snate@binkert.org
34710089Sandreas.hansson@arm.com    // The packet was destined for memory and has not yet been turned
34810089Sandreas.hansson@arm.com    // into a response
34910089Sandreas.hansson@arm.com    assert(system->isMemAddr(pkt->getAddr()));
35010089Sandreas.hansson@arm.com    assert(pkt->isRequest());
3516882SBrad.Beckmann@amd.com
35210657Sandreas.hansson@arm.com    // First we must retrieve the request port from the sender State
35310657Sandreas.hansson@arm.com    RubyPort::SenderState *senderState =
35410657Sandreas.hansson@arm.com        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
35510657Sandreas.hansson@arm.com    MemSlavePort *port = senderState->port;
35610657Sandreas.hansson@arm.com    assert(port != NULL);
35710657Sandreas.hansson@arm.com    delete senderState;
35810089Sandreas.hansson@arm.com
35910657Sandreas.hansson@arm.com    port->hitCallback(pkt);
3607910SBrad.Beckmann@amd.com
3617910SBrad.Beckmann@amd.com    //
36210090Snilay@cs.wisc.edu    // If we had to stall the MemSlavePorts, wake them up because the sequencer
3637910SBrad.Beckmann@amd.com    // likely has free resources now.
3647910SBrad.Beckmann@amd.com    //
36510089Sandreas.hansson@arm.com    if (!retryList.empty()) {
3668162SBrad.Beckmann@amd.com        //
3678162SBrad.Beckmann@amd.com        // Record the current list of ports to retry on a temporary list before
36810917Sbrandon.potter@amd.com        // calling sendRetry on those ports.  sendRetry will cause an
3698162SBrad.Beckmann@amd.com        // immediate retry, which may result in the ports being put back on the
3708162SBrad.Beckmann@amd.com        // list. Therefore we want to clear the retryList before calling
3718162SBrad.Beckmann@amd.com        // sendRetry.
3728162SBrad.Beckmann@amd.com        //
37310090Snilay@cs.wisc.edu        std::vector<MemSlavePort *> curRetryList(retryList);
3748162SBrad.Beckmann@amd.com
3758162SBrad.Beckmann@amd.com        retryList.clear();
37610089Sandreas.hansson@arm.com
37710089Sandreas.hansson@arm.com        for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
3788162SBrad.Beckmann@amd.com            DPRINTF(RubyPort,
3797910SBrad.Beckmann@amd.com                    "Sequencer may now be free.  SendRetry to port %s\n",
3807910SBrad.Beckmann@amd.com                    (*i)->name());
38110713Sandreas.hansson@arm.com            (*i)->sendRetryReq();
3827910SBrad.Beckmann@amd.com        }
3837910SBrad.Beckmann@amd.com    }
3848688Snilay@cs.wisc.edu
3858688Snilay@cs.wisc.edu    testDrainComplete();
3868688Snilay@cs.wisc.edu}
3878688Snilay@cs.wisc.edu
3888688Snilay@cs.wisc.eduvoid
3898688Snilay@cs.wisc.eduRubyPort::testDrainComplete()
3908688Snilay@cs.wisc.edu{
3918688Snilay@cs.wisc.edu    //If we weren't able to drain before, we might be able to now.
39210913Sandreas.sandberg@arm.com    if (drainState() == DrainState::Draining) {
3939245Shestness@cs.wisc.edu        unsigned int drainCount = outstandingCount();
3949152Satgutier@umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
3958688Snilay@cs.wisc.edu        if (drainCount == 0) {
3969342SAndreas.Sandberg@arm.com            DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
39710913Sandreas.sandberg@arm.com            signalDrainDone();
3988688Snilay@cs.wisc.edu        }
3998688Snilay@cs.wisc.edu    }
4008688Snilay@cs.wisc.edu}
4018688Snilay@cs.wisc.edu
40210913Sandreas.sandberg@arm.comDrainState
40310913Sandreas.sandberg@arm.comRubyPort::drain()
4048688Snilay@cs.wisc.edu{
4058688Snilay@cs.wisc.edu    if (isDeadlockEventScheduled()) {
4068688Snilay@cs.wisc.edu        descheduleDeadlockEvent();
4078688Snilay@cs.wisc.edu    }
4088688Snilay@cs.wisc.edu
4099245Shestness@cs.wisc.edu    //
4109245Shestness@cs.wisc.edu    // If the RubyPort is not empty, then it needs to clear all outstanding
41110913Sandreas.sandberg@arm.com    // requests before it should call signalDrainDone()
4129245Shestness@cs.wisc.edu    //
4139245Shestness@cs.wisc.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
41410913Sandreas.sandberg@arm.com    if (outstandingCount() > 0) {
4159152Satgutier@umich.edu        DPRINTF(Drain, "RubyPort not drained\n");
41610913Sandreas.sandberg@arm.com        return DrainState::Draining;
41710913Sandreas.sandberg@arm.com    } else {
41810913Sandreas.sandberg@arm.com        return DrainState::Drained;
4198688Snilay@cs.wisc.edu    }
4206882SBrad.Beckmann@amd.com}
4216882SBrad.Beckmann@amd.com
4226882SBrad.Beckmann@amd.comvoid
42310090Snilay@cs.wisc.eduRubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
4246882SBrad.Beckmann@amd.com{
4256882SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
4266882SBrad.Beckmann@amd.com
42710917Sbrandon.potter@amd.com    // Unless specified at configuraiton, all responses except failed SC
4288184Ssomayeh@cs.wisc.edu    // and Flush operations access M5 physical memory.
42910525Snilay@cs.wisc.edu    bool accessPhysMem = access_backing_store;
4307550SBrad.Beckmann@amd.com
4317550SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
4327550SBrad.Beckmann@amd.com        if (pkt->isWrite()) {
4337550SBrad.Beckmann@amd.com            if (pkt->req->getExtraData() != 0) {
4347550SBrad.Beckmann@amd.com                //
4357550SBrad.Beckmann@amd.com                // Successful SC packets convert to normal writes
4367550SBrad.Beckmann@amd.com                //
4377550SBrad.Beckmann@amd.com                pkt->convertScToWrite();
4387550SBrad.Beckmann@amd.com            } else {
4397550SBrad.Beckmann@amd.com                //
4407550SBrad.Beckmann@amd.com                // Failed SC packets don't access physical memory and thus
4417550SBrad.Beckmann@amd.com                // the RubyPort itself must convert it to a response.
4427550SBrad.Beckmann@amd.com                //
4437550SBrad.Beckmann@amd.com                accessPhysMem = false;
4447550SBrad.Beckmann@amd.com            }
4457550SBrad.Beckmann@amd.com        } else {
4467550SBrad.Beckmann@amd.com            //
4477550SBrad.Beckmann@amd.com            // All LL packets convert to normal loads so that M5 PhysMem does
4487550SBrad.Beckmann@amd.com            // not lock the blocks.
4497550SBrad.Beckmann@amd.com            //
4507550SBrad.Beckmann@amd.com            pkt->convertLlToRead();
4517550SBrad.Beckmann@amd.com        }
4527550SBrad.Beckmann@amd.com    }
4538184Ssomayeh@cs.wisc.edu
4548184Ssomayeh@cs.wisc.edu    // Flush requests don't access physical memory
4558184Ssomayeh@cs.wisc.edu    if (pkt->isFlush()) {
4568184Ssomayeh@cs.wisc.edu        accessPhysMem = false;
4578184Ssomayeh@cs.wisc.edu    }
4588184Ssomayeh@cs.wisc.edu
4598161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
4606882SBrad.Beckmann@amd.com
46110919Sbrandon.potter@amd.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
46210919Sbrandon.potter@amd.com    RubySystem *rs = ruby_port->m_ruby_system;
4637550SBrad.Beckmann@amd.com    if (accessPhysMem) {
46410919Sbrandon.potter@amd.com        rs->getPhysMem()->access(pkt);
4658184Ssomayeh@cs.wisc.edu    } else if (needsResponse) {
4667915SBrad.Beckmann@amd.com        pkt->makeResponse();
4677550SBrad.Beckmann@amd.com    }
4686882SBrad.Beckmann@amd.com
4696882SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
4706882SBrad.Beckmann@amd.com    if (needsResponse) {
4718161SBrad.Beckmann@amd.com        DPRINTF(RubyPort, "Sending packet back over port\n");
47210961Sdavid.hashe@amd.com        // Send a response in the same cycle. There is no need to delay the
47310961Sdavid.hashe@amd.com        // response because the response latency is already incurred in the
47410961Sdavid.hashe@amd.com        // Ruby protocol.
47510961Sdavid.hashe@amd.com        schedTimingResp(pkt, curTick());
4766882SBrad.Beckmann@amd.com    } else {
4776882SBrad.Beckmann@amd.com        delete pkt;
4786882SBrad.Beckmann@amd.com    }
47910525Snilay@cs.wisc.edu
4808161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback done!\n");
4816882SBrad.Beckmann@amd.com}
4826882SBrad.Beckmann@amd.com
4838922Swilliam.wang@arm.comAddrRangeList
48410090Snilay@cs.wisc.eduRubyPort::PioSlavePort::getAddrRanges() const
4858922Swilliam.wang@arm.com{
4868922Swilliam.wang@arm.com    // at the moment the assumption is that the master does not care
4878922Swilliam.wang@arm.com    AddrRangeList ranges;
48810090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
48910090Snilay@cs.wisc.edu
49010090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
49110090Snilay@cs.wisc.edu        ranges.splice(ranges.begin(),
49210090Snilay@cs.wisc.edu                ruby_port->master_ports[i]->getAddrRanges());
49310090Snilay@cs.wisc.edu    }
49410481Sandreas.hansson@arm.com    for (const auto M5_VAR_USED &r : ranges)
49510481Sandreas.hansson@arm.com        DPRINTF(RubyPort, "%s\n", r.to_string());
4968922Swilliam.wang@arm.com    return ranges;
4978922Swilliam.wang@arm.com}
4988922Swilliam.wang@arm.com
4996882SBrad.Beckmann@amd.combool
50010090Snilay@cs.wisc.eduRubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
5016882SBrad.Beckmann@amd.com{
50210090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
5038931Sandreas.hansson@arm.com    return ruby_port->system->isMemAddr(addr);
5046882SBrad.Beckmann@amd.com}
5057909Shestness@cs.utexas.edu
5068717Snilay@cs.wisc.eduvoid
5078717Snilay@cs.wisc.eduRubyPort::ruby_eviction_callback(const Address& address)
5088717Snilay@cs.wisc.edu{
5098717Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Sending invalidations.\n");
5109633Sjthestness@gmail.com    // This request is deleted in the stack-allocated packet destructor
5119633Sjthestness@gmail.com    // when this function exits
5129633Sjthestness@gmail.com    // TODO: should this really be using funcMasterId?
5139633Sjthestness@gmail.com    RequestPtr req =
5149633Sjthestness@gmail.com            new Request(address.getAddress(), 0, 0, Request::funcMasterId);
5159633Sjthestness@gmail.com    // Use a single packet to signal all snooping ports of the invalidation.
5169633Sjthestness@gmail.com    // This assumes that snooping ports do NOT modify the packet/request
51710886Sandreas.hansson@arm.com    Packet pkt(req, MemCmd::InvalidateReq);
5188922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
5199088Sandreas.hansson@arm.com        // check if the connected master port is snooping
5209088Sandreas.hansson@arm.com        if ((*p)->isSnooping()) {
5218948Sandreas.hansson@arm.com            // send as a snoop request
5229633Sjthestness@gmail.com            (*p)->sendTimingSnoopReq(&pkt);
5238922Swilliam.wang@arm.com        }
5248717Snilay@cs.wisc.edu    }
5258717Snilay@cs.wisc.edu}
52610090Snilay@cs.wisc.edu
52710090Snilay@cs.wisc.eduvoid
52810090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvRangeChange()
52910090Snilay@cs.wisc.edu{
53010090Snilay@cs.wisc.edu    RubyPort &r = static_cast<RubyPort &>(owner);
53110090Snilay@cs.wisc.edu    r.gotAddrRanges--;
53210117Snilay@cs.wisc.edu    if (r.gotAddrRanges == 0 && FullSystem) {
53310090Snilay@cs.wisc.edu        r.pioSlavePort.sendRangeChange();
53410090Snilay@cs.wisc.edu    }
53510090Snilay@cs.wisc.edu}
536