RubyPort.cc revision 10467
16876Ssteve.reinhardt@amd.com/*
210089Sandreas.hansson@arm.com * Copyright (c) 2012-2013 ARM Limited
38922Swilliam.wang@arm.com * All rights reserved.
48922Swilliam.wang@arm.com *
58922Swilliam.wang@arm.com * The license below extends only to copyright in the software and shall
68922Swilliam.wang@arm.com * not be construed as granting a license to any other intellectual
78922Swilliam.wang@arm.com * property including but not limited to intellectual property relating
88922Swilliam.wang@arm.com * to a hardware implementation of the functionality of the software
98922Swilliam.wang@arm.com * licensed hereunder.  You may use the software subject to the license
108922Swilliam.wang@arm.com * terms below provided that you ensure that this notice is replicated
118922Swilliam.wang@arm.com * unmodified and in its entirety in all distributions of the software,
128922Swilliam.wang@arm.com * modified or unmodified, in source code or in binary form.
138922Swilliam.wang@arm.com *
146876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc.
158717Snilay@cs.wisc.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood
166876Ssteve.reinhardt@amd.com * All rights reserved.
176876Ssteve.reinhardt@amd.com *
186876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
196876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
206876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
216876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
226876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
236876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
246876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
256876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
266876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
276876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
286876Ssteve.reinhardt@amd.com *
296876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
306876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
316876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
346876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
396876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
406876Ssteve.reinhardt@amd.com */
416876Ssteve.reinhardt@amd.com
427632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
438688Snilay@cs.wisc.edu#include "debug/Config.hh"
449152Satgutier@umich.edu#include "debug/Drain.hh"
458232Snate@binkert.org#include "debug/Ruby.hh"
468436SBrad.Beckmann@amd.com#include "mem/protocol/AccessPermission.hh"
477039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
486285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh"
4910117Snilay@cs.wisc.edu#include "sim/full_system.hh"
508923Sandreas.hansson@arm.com#include "sim/system.hh"
516285Snate@binkert.org
526876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p)
538922Swilliam.wang@arm.com    : MemObject(p), m_version(p->version), m_controller(NULL),
5410090Snilay@cs.wisc.edu      m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
5510467Sandreas.hansson@arm.com      system(p->system),
5610090Snilay@cs.wisc.edu      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
5710090Snilay@cs.wisc.edu      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
5810090Snilay@cs.wisc.edu      memMasterPort(csprintf("%s.mem-master-port", name()), this),
5910090Snilay@cs.wisc.edu      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
6010090Snilay@cs.wisc.edu          p->ruby_system, p->access_phys_mem, -1),
6110090Snilay@cs.wisc.edu      gotAddrRanges(p->port_master_connection_count), drainManager(NULL),
6210467Sandreas.hansson@arm.com      access_phys_mem(p->access_phys_mem)
636876Ssteve.reinhardt@amd.com{
646876Ssteve.reinhardt@amd.com    assert(m_version != -1);
656876Ssteve.reinhardt@amd.com
668922Swilliam.wang@arm.com    // create the slave ports based on the number of connected ports
678922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
6810090Snilay@cs.wisc.edu        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
6910115Snilay@cs.wisc.edu            i), this, p->ruby_system, access_phys_mem, i));
708922Swilliam.wang@arm.com    }
717039Snate@binkert.org
728922Swilliam.wang@arm.com    // create the master ports based on the number of connected ports
738922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
7410090Snilay@cs.wisc.edu        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
7510090Snilay@cs.wisc.edu            name(), i), this));
768922Swilliam.wang@arm.com    }
776876Ssteve.reinhardt@amd.com}
786876Ssteve.reinhardt@amd.com
797039Snate@binkert.orgvoid
807039Snate@binkert.orgRubyPort::init()
816882SBrad.Beckmann@amd.com{
826882SBrad.Beckmann@amd.com    assert(m_controller != NULL);
836882SBrad.Beckmann@amd.com    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
849508Snilay@cs.wisc.edu    m_mandatory_q_ptr->setSender(this);
856882SBrad.Beckmann@amd.com}
866882SBrad.Beckmann@amd.com
879294Sandreas.hansson@arm.comBaseMasterPort &
889294Sandreas.hansson@arm.comRubyPort::getMasterPort(const std::string &if_name, PortID idx)
896876Ssteve.reinhardt@amd.com{
9010090Snilay@cs.wisc.edu    if (if_name == "mem_master_port") {
9110090Snilay@cs.wisc.edu        return memMasterPort;
9210090Snilay@cs.wisc.edu    }
9310090Snilay@cs.wisc.edu
9410090Snilay@cs.wisc.edu    if (if_name == "pio_master_port") {
9510090Snilay@cs.wisc.edu        return pioMasterPort;
968922Swilliam.wang@arm.com    }
978922Swilliam.wang@arm.com
988839Sandreas.hansson@arm.com    // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
998839Sandreas.hansson@arm.com    // port
1008922Swilliam.wang@arm.com    if (if_name != "master") {
1018922Swilliam.wang@arm.com        // pass it along to our super class
1028922Swilliam.wang@arm.com        return MemObject::getMasterPort(if_name, idx);
1038922Swilliam.wang@arm.com    } else {
1049294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(master_ports.size())) {
1058922Swilliam.wang@arm.com            panic("RubyPort::getMasterPort: unknown index %d\n", idx);
1068922Swilliam.wang@arm.com        }
1078839Sandreas.hansson@arm.com
1088922Swilliam.wang@arm.com        return *master_ports[idx];
1098839Sandreas.hansson@arm.com    }
1108922Swilliam.wang@arm.com}
1118839Sandreas.hansson@arm.com
1129294Sandreas.hansson@arm.comBaseSlavePort &
1139294Sandreas.hansson@arm.comRubyPort::getSlavePort(const std::string &if_name, PortID idx)
1148922Swilliam.wang@arm.com{
11510090Snilay@cs.wisc.edu    if (if_name == "mem_slave_port") {
11610090Snilay@cs.wisc.edu        return memSlavePort;
11710090Snilay@cs.wisc.edu    }
11810090Snilay@cs.wisc.edu
11910090Snilay@cs.wisc.edu    if (if_name == "pio_slave_port")
12010090Snilay@cs.wisc.edu        return pioSlavePort;
12110090Snilay@cs.wisc.edu
1228922Swilliam.wang@arm.com    // used by the CPUs to connect the caches to the interconnect, and
1238922Swilliam.wang@arm.com    // for the x86 case also the interrupt master
1248922Swilliam.wang@arm.com    if (if_name != "slave") {
1258922Swilliam.wang@arm.com        // pass it along to our super class
1268922Swilliam.wang@arm.com        return MemObject::getSlavePort(if_name, idx);
1278922Swilliam.wang@arm.com    } else {
1289294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(slave_ports.size())) {
1298922Swilliam.wang@arm.com            panic("RubyPort::getSlavePort: unknown index %d\n", idx);
1308922Swilliam.wang@arm.com        }
1318922Swilliam.wang@arm.com
1328922Swilliam.wang@arm.com        return *slave_ports[idx];
1337039Snate@binkert.org    }
1346876Ssteve.reinhardt@amd.com}
1356882SBrad.Beckmann@amd.com
13610090Snilay@cs.wisc.eduRubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
1376882SBrad.Beckmann@amd.com                           RubyPort *_port)
13810090Snilay@cs.wisc.edu    : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
1396882SBrad.Beckmann@amd.com{
14010090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
1416882SBrad.Beckmann@amd.com}
1426882SBrad.Beckmann@amd.com
14310090Snilay@cs.wisc.eduRubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
14410090Snilay@cs.wisc.edu                           RubyPort *_port)
14510090Snilay@cs.wisc.edu    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
14610090Snilay@cs.wisc.edu{
14710090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
14810090Snilay@cs.wisc.edu}
14910090Snilay@cs.wisc.edu
15010090Snilay@cs.wisc.eduRubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
15110090Snilay@cs.wisc.edu                           RubyPort *_port)
15210090Snilay@cs.wisc.edu    : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
15310090Snilay@cs.wisc.edu{
15410090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
15510090Snilay@cs.wisc.edu}
15610090Snilay@cs.wisc.edu
15710090Snilay@cs.wisc.eduRubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
15810089Sandreas.hansson@arm.com                         RubySystem *_system, bool _access_phys_mem, PortID id)
15910089Sandreas.hansson@arm.com    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
16010090Snilay@cs.wisc.edu      ruby_system(_system), access_phys_mem(_access_phys_mem)
1616882SBrad.Beckmann@amd.com{
16210090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
1636882SBrad.Beckmann@amd.com}
1646882SBrad.Beckmann@amd.com
16510089Sandreas.hansson@arm.combool
16610090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
16710090Snilay@cs.wisc.edu{
16810090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
16910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
17010090Snilay@cs.wisc.edu
17110090Snilay@cs.wisc.edu    // send next cycle
17210090Snilay@cs.wisc.edu    ruby_port->pioSlavePort.schedTimingResp(
17310090Snilay@cs.wisc.edu            pkt, curTick() + g_system_ptr->clockPeriod());
17410090Snilay@cs.wisc.edu    return true;
17510090Snilay@cs.wisc.edu}
17610090Snilay@cs.wisc.edu
17710090Snilay@cs.wisc.edubool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
17810089Sandreas.hansson@arm.com{
17910089Sandreas.hansson@arm.com    // got a response from a device
18010089Sandreas.hansson@arm.com    assert(pkt->isResponse());
1816882SBrad.Beckmann@amd.com
1827039Snate@binkert.org    // In FS mode, ruby memory will receive pio responses from devices
1837039Snate@binkert.org    // and it must forward these responses back to the particular CPU.
18410089Sandreas.hansson@arm.com    DPRINTF(RubyPort,  "Pio response for address %#x, going to %d\n",
18510089Sandreas.hansson@arm.com            pkt->getAddr(), pkt->getDest());
1866882SBrad.Beckmann@amd.com
18710090Snilay@cs.wisc.edu    // First we must retrieve the request port from the sender State
18810090Snilay@cs.wisc.edu    RubyPort::SenderState *senderState =
18910090Snilay@cs.wisc.edu        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
19010090Snilay@cs.wisc.edu    MemSlavePort *port = senderState->port;
19110090Snilay@cs.wisc.edu    assert(port != NULL);
19210090Snilay@cs.wisc.edu    delete senderState;
1937039Snate@binkert.org
19410089Sandreas.hansson@arm.com    // attempt to send the response in the next cycle
19510090Snilay@cs.wisc.edu    port->schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
1967039Snate@binkert.org
1976882SBrad.Beckmann@amd.com    return true;
1986882SBrad.Beckmann@amd.com}
1996882SBrad.Beckmann@amd.com
2006882SBrad.Beckmann@amd.combool
20110090Snilay@cs.wisc.eduRubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
2026882SBrad.Beckmann@amd.com{
20310090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
20410090Snilay@cs.wisc.edu
20510090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
20610090Snilay@cs.wisc.edu        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
20710090Snilay@cs.wisc.edu        for (auto it = l.begin(); it != l.end(); ++it) {
20810090Snilay@cs.wisc.edu            if (it->contains(pkt->getAddr())) {
20910412Sandreas.hansson@arm.com                // generally it is not safe to assume success here as
21010412Sandreas.hansson@arm.com                // the port could be blocked
21110412Sandreas.hansson@arm.com                bool M5_VAR_USED success =
21210412Sandreas.hansson@arm.com                    ruby_port->master_ports[i]->sendTimingReq(pkt);
21310412Sandreas.hansson@arm.com                assert(success);
21410090Snilay@cs.wisc.edu                return true;
21510090Snilay@cs.wisc.edu            }
21610090Snilay@cs.wisc.edu        }
21710090Snilay@cs.wisc.edu    }
21810090Snilay@cs.wisc.edu    panic("Should never reach here!\n");
21910090Snilay@cs.wisc.edu}
22010090Snilay@cs.wisc.edu
22110090Snilay@cs.wisc.edubool
22210090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
22310090Snilay@cs.wisc.edu{
22410090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
22510090Snilay@cs.wisc.edu            pkt->getAddr(), id);
22610090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2276882SBrad.Beckmann@amd.com
2289662Sandreas.hansson@arm.com    if (pkt->memInhibitAsserted())
2299662Sandreas.hansson@arm.com        panic("RubyPort should never see an inhibited request\n");
2306882SBrad.Beckmann@amd.com
2316882SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2326882SBrad.Beckmann@amd.com    // pio port.
2336882SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
23410090Snilay@cs.wisc.edu        assert(ruby_port->memMasterPort.isConnected());
23510090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Request address %#x assumed to be a pio address\n",
2366922SBrad.Beckmann@amd.com                pkt->getAddr());
2376882SBrad.Beckmann@amd.com
23810090Snilay@cs.wisc.edu        // Save the port in the sender state object to be used later to
23910090Snilay@cs.wisc.edu        // route the response
24010090Snilay@cs.wisc.edu        pkt->pushSenderState(new SenderState(this));
24110090Snilay@cs.wisc.edu
2429163Sandreas.hansson@arm.com        // send next cycle
24310090Snilay@cs.wisc.edu        ruby_port->memMasterPort.schedTimingReq(pkt,
2449206Snilay@cs.wisc.edu            curTick() + g_system_ptr->clockPeriod());
2459163Sandreas.hansson@arm.com        return true;
2466882SBrad.Beckmann@amd.com    }
2476882SBrad.Beckmann@amd.com
24810090Snilay@cs.wisc.edu    // Save the port id to be used later to route the response
24910090Snilay@cs.wisc.edu    pkt->setSrc(id);
25010090Snilay@cs.wisc.edu
2518615Snilay@cs.wisc.edu    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
2528615Snilay@cs.wisc.edu           RubySystem::getBlockSizeBytes());
2537906SBrad.Beckmann@amd.com
2546882SBrad.Beckmann@amd.com    // Submit the ruby request
2558615Snilay@cs.wisc.edu    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
2567023SBrad.Beckmann@amd.com
2577550SBrad.Beckmann@amd.com    // If the request successfully issued then we should return true.
25810089Sandreas.hansson@arm.com    // Otherwise, we need to tell the port to retry at a later point
25910089Sandreas.hansson@arm.com    // and return false.
2607550SBrad.Beckmann@amd.com    if (requestStatus == RequestStatus_Issued) {
26110089Sandreas.hansson@arm.com        DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
26210089Sandreas.hansson@arm.com                pkt->getAddr());
2636922SBrad.Beckmann@amd.com        return true;
2646882SBrad.Beckmann@amd.com    }
2657023SBrad.Beckmann@amd.com
2667910SBrad.Beckmann@amd.com    //
2677910SBrad.Beckmann@amd.com    // Unless one is using the ruby tester, record the stalled M5 port for
2687910SBrad.Beckmann@amd.com    // later retry when the sequencer becomes free.
2697910SBrad.Beckmann@amd.com    //
2707910SBrad.Beckmann@amd.com    if (!ruby_port->m_usingRubyTester) {
2717910SBrad.Beckmann@amd.com        ruby_port->addToRetryList(this);
2727910SBrad.Beckmann@amd.com    }
2737910SBrad.Beckmann@amd.com
27410090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Request for address %#x did not issued because %s\n",
2757039Snate@binkert.org            pkt->getAddr(), RequestStatus_to_string(requestStatus));
2767039Snate@binkert.org
2776922SBrad.Beckmann@amd.com    return false;
2786882SBrad.Beckmann@amd.com}
2796882SBrad.Beckmann@amd.com
2808436SBrad.Beckmann@amd.comvoid
28110090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
2828436SBrad.Beckmann@amd.com{
28310090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
28410090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2858436SBrad.Beckmann@amd.com
2868436SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2878436SBrad.Beckmann@amd.com    // pio port.
2888436SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
28910090Snilay@cs.wisc.edu        assert(ruby_port->memMasterPort.isConnected());
29010090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
29110090Snilay@cs.wisc.edu        panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
2928436SBrad.Beckmann@amd.com    }
2938436SBrad.Beckmann@amd.com
2948436SBrad.Beckmann@amd.com    assert(pkt->getAddr() + pkt->getSize() <=
2958436SBrad.Beckmann@amd.com                line_address(Address(pkt->getAddr())).getAddress() +
2968436SBrad.Beckmann@amd.com                RubySystem::getBlockSizeBytes());
2978436SBrad.Beckmann@amd.com
2988436SBrad.Beckmann@amd.com    bool accessSucceeded = false;
2998436SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
3008436SBrad.Beckmann@amd.com
3018436SBrad.Beckmann@amd.com    // Do the functional access on ruby memory
3028436SBrad.Beckmann@amd.com    if (pkt->isRead()) {
3039270Snilay@cs.wisc.edu        accessSucceeded = ruby_system->functionalRead(pkt);
3048436SBrad.Beckmann@amd.com    } else if (pkt->isWrite()) {
3059270Snilay@cs.wisc.edu        accessSucceeded = ruby_system->functionalWrite(pkt);
3068436SBrad.Beckmann@amd.com    } else {
30710090Snilay@cs.wisc.edu        panic("Unsupported functional command %s\n", pkt->cmdString());
3088436SBrad.Beckmann@amd.com    }
3098436SBrad.Beckmann@amd.com
3108436SBrad.Beckmann@amd.com    // Unless the requester explicitly said otherwise, generate an error if
3118436SBrad.Beckmann@amd.com    // the functional request failed
3128436SBrad.Beckmann@amd.com    if (!accessSucceeded && !pkt->suppressFuncError()) {
3138436SBrad.Beckmann@amd.com        fatal("Ruby functional %s failed for address %#x\n",
3148436SBrad.Beckmann@amd.com              pkt->isWrite() ? "write" : "read", pkt->getAddr());
3158436SBrad.Beckmann@amd.com    }
3168436SBrad.Beckmann@amd.com
3178436SBrad.Beckmann@amd.com    if (access_phys_mem) {
3188436SBrad.Beckmann@amd.com        // The attached physmem contains the official version of data.
3198436SBrad.Beckmann@amd.com        // The following command performs the real functional access.
3208436SBrad.Beckmann@amd.com        // This line should be removed once Ruby supplies the official version
3218436SBrad.Beckmann@amd.com        // of data.
3228931Sandreas.hansson@arm.com        ruby_port->system->getPhysMem().functionalAccess(pkt);
3238436SBrad.Beckmann@amd.com    }
3248436SBrad.Beckmann@amd.com
3258436SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
3268436SBrad.Beckmann@amd.com    if (needsResponse) {
3278436SBrad.Beckmann@amd.com        pkt->setFunctionalResponseStatus(accessSucceeded);
3288706Sandreas.hansson@arm.com
3298706Sandreas.hansson@arm.com        // @todo There should not be a reverse call since the response is
3308706Sandreas.hansson@arm.com        // communicated through the packet pointer
3318706Sandreas.hansson@arm.com        // DPRINTF(RubyPort, "Sending packet back over port\n");
3328706Sandreas.hansson@arm.com        // sendFunctional(pkt);
3338436SBrad.Beckmann@amd.com    }
3348436SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Functional access %s!\n",
3358436SBrad.Beckmann@amd.com            accessSucceeded ? "successful":"failed");
3368436SBrad.Beckmann@amd.com}
3378436SBrad.Beckmann@amd.com
3386882SBrad.Beckmann@amd.comvoid
3396922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt)
3406882SBrad.Beckmann@amd.com{
34110089Sandreas.hansson@arm.com    DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
34210089Sandreas.hansson@arm.com            pkt->getAddr());
3437039Snate@binkert.org
34410089Sandreas.hansson@arm.com    // The packet was destined for memory and has not yet been turned
34510089Sandreas.hansson@arm.com    // into a response
34610089Sandreas.hansson@arm.com    assert(system->isMemAddr(pkt->getAddr()));
34710089Sandreas.hansson@arm.com    assert(pkt->isRequest());
3486882SBrad.Beckmann@amd.com
34910089Sandreas.hansson@arm.com    // As it has not yet been turned around, the source field tells us
35010089Sandreas.hansson@arm.com    // which port it came from.
35110089Sandreas.hansson@arm.com    assert(pkt->getSrc() < slave_ports.size());
35210089Sandreas.hansson@arm.com
35310089Sandreas.hansson@arm.com    slave_ports[pkt->getSrc()]->hitCallback(pkt);
3547910SBrad.Beckmann@amd.com
3557910SBrad.Beckmann@amd.com    //
35610090Snilay@cs.wisc.edu    // If we had to stall the MemSlavePorts, wake them up because the sequencer
3577910SBrad.Beckmann@amd.com    // likely has free resources now.
3587910SBrad.Beckmann@amd.com    //
35910089Sandreas.hansson@arm.com    if (!retryList.empty()) {
3608162SBrad.Beckmann@amd.com        //
3618162SBrad.Beckmann@amd.com        // Record the current list of ports to retry on a temporary list before
3628162SBrad.Beckmann@amd.com        // calling sendRetry on those ports.  sendRetry will cause an
3638162SBrad.Beckmann@amd.com        // immediate retry, which may result in the ports being put back on the
3648162SBrad.Beckmann@amd.com        // list. Therefore we want to clear the retryList before calling
3658162SBrad.Beckmann@amd.com        // sendRetry.
3668162SBrad.Beckmann@amd.com        //
36710090Snilay@cs.wisc.edu        std::vector<MemSlavePort *> curRetryList(retryList);
3688162SBrad.Beckmann@amd.com
3698162SBrad.Beckmann@amd.com        retryList.clear();
37010089Sandreas.hansson@arm.com
37110089Sandreas.hansson@arm.com        for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
3728162SBrad.Beckmann@amd.com            DPRINTF(RubyPort,
3737910SBrad.Beckmann@amd.com                    "Sequencer may now be free.  SendRetry to port %s\n",
3747910SBrad.Beckmann@amd.com                    (*i)->name());
3758162SBrad.Beckmann@amd.com            (*i)->sendRetry();
3767910SBrad.Beckmann@amd.com        }
3777910SBrad.Beckmann@amd.com    }
3788688Snilay@cs.wisc.edu
3798688Snilay@cs.wisc.edu    testDrainComplete();
3808688Snilay@cs.wisc.edu}
3818688Snilay@cs.wisc.edu
3828688Snilay@cs.wisc.eduvoid
3838688Snilay@cs.wisc.eduRubyPort::testDrainComplete()
3848688Snilay@cs.wisc.edu{
3858688Snilay@cs.wisc.edu    //If we weren't able to drain before, we might be able to now.
3869342SAndreas.Sandberg@arm.com    if (drainManager != NULL) {
3879245Shestness@cs.wisc.edu        unsigned int drainCount = outstandingCount();
3889152Satgutier@umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
3898688Snilay@cs.wisc.edu        if (drainCount == 0) {
3909342SAndreas.Sandberg@arm.com            DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
3919342SAndreas.Sandberg@arm.com            drainManager->signalDrainDone();
3929342SAndreas.Sandberg@arm.com            // Clear the drain manager once we're done with it.
3939342SAndreas.Sandberg@arm.com            drainManager = NULL;
3948688Snilay@cs.wisc.edu        }
3958688Snilay@cs.wisc.edu    }
3968688Snilay@cs.wisc.edu}
3978688Snilay@cs.wisc.edu
3988688Snilay@cs.wisc.eduunsigned int
3999342SAndreas.Sandberg@arm.comRubyPort::getChildDrainCount(DrainManager *dm)
4008688Snilay@cs.wisc.edu{
4018688Snilay@cs.wisc.edu    int count = 0;
4028688Snilay@cs.wisc.edu
40310090Snilay@cs.wisc.edu    if (memMasterPort.isConnected()) {
40410090Snilay@cs.wisc.edu        count += memMasterPort.drain(dm);
4058688Snilay@cs.wisc.edu        DPRINTF(Config, "count after pio check %d\n", count);
4068688Snilay@cs.wisc.edu    }
4078688Snilay@cs.wisc.edu
4088922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
4099342SAndreas.Sandberg@arm.com        count += (*p)->drain(dm);
4108922Swilliam.wang@arm.com        DPRINTF(Config, "count after slave port check %d\n", count);
4118922Swilliam.wang@arm.com    }
4128922Swilliam.wang@arm.com
41310090Snilay@cs.wisc.edu    for (std::vector<PioMasterPort *>::iterator p = master_ports.begin();
4148922Swilliam.wang@arm.com         p != master_ports.end(); ++p) {
4159342SAndreas.Sandberg@arm.com        count += (*p)->drain(dm);
4168922Swilliam.wang@arm.com        DPRINTF(Config, "count after master port check %d\n", count);
4178688Snilay@cs.wisc.edu    }
4188688Snilay@cs.wisc.edu
4198688Snilay@cs.wisc.edu    DPRINTF(Config, "final count %d\n", count);
4208688Snilay@cs.wisc.edu    return count;
4218688Snilay@cs.wisc.edu}
4228688Snilay@cs.wisc.edu
4238688Snilay@cs.wisc.eduunsigned int
4249342SAndreas.Sandberg@arm.comRubyPort::drain(DrainManager *dm)
4258688Snilay@cs.wisc.edu{
4268688Snilay@cs.wisc.edu    if (isDeadlockEventScheduled()) {
4278688Snilay@cs.wisc.edu        descheduleDeadlockEvent();
4288688Snilay@cs.wisc.edu    }
4298688Snilay@cs.wisc.edu
4309245Shestness@cs.wisc.edu    //
4319245Shestness@cs.wisc.edu    // If the RubyPort is not empty, then it needs to clear all outstanding
4329342SAndreas.Sandberg@arm.com    // requests before it should call drainManager->signalDrainDone()
4339245Shestness@cs.wisc.edu    //
4349245Shestness@cs.wisc.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
4359245Shestness@cs.wisc.edu    bool need_drain = outstandingCount() > 0;
4369245Shestness@cs.wisc.edu
4379245Shestness@cs.wisc.edu    //
4389245Shestness@cs.wisc.edu    // Also, get the number of child ports that will also need to clear
4399342SAndreas.Sandberg@arm.com    // their buffered requests before they call drainManager->signalDrainDone()
4409245Shestness@cs.wisc.edu    //
4419342SAndreas.Sandberg@arm.com    unsigned int child_drain_count = getChildDrainCount(dm);
4428688Snilay@cs.wisc.edu
4438688Snilay@cs.wisc.edu    // Set status
4449245Shestness@cs.wisc.edu    if (need_drain) {
4459342SAndreas.Sandberg@arm.com        drainManager = dm;
4468688Snilay@cs.wisc.edu
4479152Satgutier@umich.edu        DPRINTF(Drain, "RubyPort not drained\n");
4489342SAndreas.Sandberg@arm.com        setDrainState(Drainable::Draining);
4499245Shestness@cs.wisc.edu        return child_drain_count + 1;
4508688Snilay@cs.wisc.edu    }
4518688Snilay@cs.wisc.edu
4529342SAndreas.Sandberg@arm.com    drainManager = NULL;
4539342SAndreas.Sandberg@arm.com    setDrainState(Drainable::Drained);
4549245Shestness@cs.wisc.edu    return child_drain_count;
4556882SBrad.Beckmann@amd.com}
4566882SBrad.Beckmann@amd.com
4576882SBrad.Beckmann@amd.comvoid
45810090Snilay@cs.wisc.eduRubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
4596882SBrad.Beckmann@amd.com{
4606882SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
4616882SBrad.Beckmann@amd.com
4627550SBrad.Beckmann@amd.com    //
4637915SBrad.Beckmann@amd.com    // Unless specified at configuraiton, all responses except failed SC
4648184Ssomayeh@cs.wisc.edu    // and Flush operations access M5 physical memory.
4657550SBrad.Beckmann@amd.com    //
4667915SBrad.Beckmann@amd.com    bool accessPhysMem = access_phys_mem;
4677550SBrad.Beckmann@amd.com
4687550SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
4697550SBrad.Beckmann@amd.com        if (pkt->isWrite()) {
4707550SBrad.Beckmann@amd.com            if (pkt->req->getExtraData() != 0) {
4717550SBrad.Beckmann@amd.com                //
4727550SBrad.Beckmann@amd.com                // Successful SC packets convert to normal writes
4737550SBrad.Beckmann@amd.com                //
4747550SBrad.Beckmann@amd.com                pkt->convertScToWrite();
4757550SBrad.Beckmann@amd.com            } else {
4767550SBrad.Beckmann@amd.com                //
4777550SBrad.Beckmann@amd.com                // Failed SC packets don't access physical memory and thus
4787550SBrad.Beckmann@amd.com                // the RubyPort itself must convert it to a response.
4797550SBrad.Beckmann@amd.com                //
4807550SBrad.Beckmann@amd.com                accessPhysMem = false;
4817550SBrad.Beckmann@amd.com            }
4827550SBrad.Beckmann@amd.com        } else {
4837550SBrad.Beckmann@amd.com            //
4847550SBrad.Beckmann@amd.com            // All LL packets convert to normal loads so that M5 PhysMem does
4857550SBrad.Beckmann@amd.com            // not lock the blocks.
4867550SBrad.Beckmann@amd.com            //
4877550SBrad.Beckmann@amd.com            pkt->convertLlToRead();
4887550SBrad.Beckmann@amd.com        }
4897550SBrad.Beckmann@amd.com    }
4908184Ssomayeh@cs.wisc.edu
4918184Ssomayeh@cs.wisc.edu    //
4928184Ssomayeh@cs.wisc.edu    // Flush requests don't access physical memory
4938184Ssomayeh@cs.wisc.edu    //
4948184Ssomayeh@cs.wisc.edu    if (pkt->isFlush()) {
4958184Ssomayeh@cs.wisc.edu        accessPhysMem = false;
4968184Ssomayeh@cs.wisc.edu    }
4978184Ssomayeh@cs.wisc.edu
4988161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
4996882SBrad.Beckmann@amd.com
5007550SBrad.Beckmann@amd.com    if (accessPhysMem) {
50110090Snilay@cs.wisc.edu        RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
5028931Sandreas.hansson@arm.com        ruby_port->system->getPhysMem().access(pkt);
5038184Ssomayeh@cs.wisc.edu    } else if (needsResponse) {
5047915SBrad.Beckmann@amd.com        pkt->makeResponse();
5057550SBrad.Beckmann@amd.com    }
5066882SBrad.Beckmann@amd.com
5076882SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
5086882SBrad.Beckmann@amd.com    if (needsResponse) {
5098161SBrad.Beckmann@amd.com        DPRINTF(RubyPort, "Sending packet back over port\n");
5109163Sandreas.hansson@arm.com        // send next cycle
5119206Snilay@cs.wisc.edu        schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
5126882SBrad.Beckmann@amd.com    } else {
5136882SBrad.Beckmann@amd.com        delete pkt;
5146882SBrad.Beckmann@amd.com    }
5158161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback done!\n");
5166882SBrad.Beckmann@amd.com}
5176882SBrad.Beckmann@amd.com
5188922Swilliam.wang@arm.comAddrRangeList
51910090Snilay@cs.wisc.eduRubyPort::PioSlavePort::getAddrRanges() const
5208922Swilliam.wang@arm.com{
5218922Swilliam.wang@arm.com    // at the moment the assumption is that the master does not care
5228922Swilliam.wang@arm.com    AddrRangeList ranges;
52310090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
52410090Snilay@cs.wisc.edu
52510090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
52610090Snilay@cs.wisc.edu        ranges.splice(ranges.begin(),
52710090Snilay@cs.wisc.edu                ruby_port->master_ports[i]->getAddrRanges());
52810090Snilay@cs.wisc.edu    }
52910090Snilay@cs.wisc.edu    for (AddrRangeConstIter r = ranges.begin(); r != ranges.end(); ++r)
53010090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "%s\n", r->to_string());
5318922Swilliam.wang@arm.com    return ranges;
5328922Swilliam.wang@arm.com}
5338922Swilliam.wang@arm.com
5346882SBrad.Beckmann@amd.combool
53510090Snilay@cs.wisc.eduRubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
5366882SBrad.Beckmann@amd.com{
53710090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
5388931Sandreas.hansson@arm.com    return ruby_port->system->isMemAddr(addr);
5396882SBrad.Beckmann@amd.com}
5407909Shestness@cs.utexas.edu
5418717Snilay@cs.wisc.eduvoid
5428717Snilay@cs.wisc.eduRubyPort::ruby_eviction_callback(const Address& address)
5438717Snilay@cs.wisc.edu{
5448717Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Sending invalidations.\n");
5459633Sjthestness@gmail.com    // This request is deleted in the stack-allocated packet destructor
5469633Sjthestness@gmail.com    // when this function exits
5479633Sjthestness@gmail.com    // TODO: should this really be using funcMasterId?
5489633Sjthestness@gmail.com    RequestPtr req =
5499633Sjthestness@gmail.com            new Request(address.getAddress(), 0, 0, Request::funcMasterId);
5509633Sjthestness@gmail.com    // Use a single packet to signal all snooping ports of the invalidation.
5519633Sjthestness@gmail.com    // This assumes that snooping ports do NOT modify the packet/request
5529633Sjthestness@gmail.com    Packet pkt(req, MemCmd::InvalidationReq);
5538922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
5549088Sandreas.hansson@arm.com        // check if the connected master port is snooping
5559088Sandreas.hansson@arm.com        if ((*p)->isSnooping()) {
5568948Sandreas.hansson@arm.com            // send as a snoop request
5579633Sjthestness@gmail.com            (*p)->sendTimingSnoopReq(&pkt);
5588922Swilliam.wang@arm.com        }
5598717Snilay@cs.wisc.edu    }
5608717Snilay@cs.wisc.edu}
56110090Snilay@cs.wisc.edu
56210090Snilay@cs.wisc.eduvoid
56310090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvRangeChange()
56410090Snilay@cs.wisc.edu{
56510090Snilay@cs.wisc.edu    RubyPort &r = static_cast<RubyPort &>(owner);
56610090Snilay@cs.wisc.edu    r.gotAddrRanges--;
56710117Snilay@cs.wisc.edu    if (r.gotAddrRanges == 0 && FullSystem) {
56810090Snilay@cs.wisc.edu        r.pioSlavePort.sendRangeChange();
56910090Snilay@cs.wisc.edu    }
57010090Snilay@cs.wisc.edu}
571