RubyPort.cc revision 10886
16876Ssteve.reinhardt@amd.com/*
210089Sandreas.hansson@arm.com * Copyright (c) 2012-2013 ARM Limited
38922Swilliam.wang@arm.com * All rights reserved.
48922Swilliam.wang@arm.com *
58922Swilliam.wang@arm.com * The license below extends only to copyright in the software and shall
68922Swilliam.wang@arm.com * not be construed as granting a license to any other intellectual
78922Swilliam.wang@arm.com * property including but not limited to intellectual property relating
88922Swilliam.wang@arm.com * to a hardware implementation of the functionality of the software
98922Swilliam.wang@arm.com * licensed hereunder.  You may use the software subject to the license
108922Swilliam.wang@arm.com * terms below provided that you ensure that this notice is replicated
118922Swilliam.wang@arm.com * unmodified and in its entirety in all distributions of the software,
128922Swilliam.wang@arm.com * modified or unmodified, in source code or in binary form.
138922Swilliam.wang@arm.com *
146876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc.
158717Snilay@cs.wisc.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood
166876Ssteve.reinhardt@amd.com * All rights reserved.
176876Ssteve.reinhardt@amd.com *
186876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
196876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
206876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
216876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
226876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
236876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
246876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
256876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
266876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
276876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
286876Ssteve.reinhardt@amd.com *
296876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
306876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
316876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
346876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
396876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
406876Ssteve.reinhardt@amd.com */
416876Ssteve.reinhardt@amd.com
427632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
438688Snilay@cs.wisc.edu#include "debug/Config.hh"
449152Satgutier@umich.edu#include "debug/Drain.hh"
458232Snate@binkert.org#include "debug/Ruby.hh"
468436SBrad.Beckmann@amd.com#include "mem/protocol/AccessPermission.hh"
477039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
486285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh"
4910525Snilay@cs.wisc.edu#include "mem/simple_mem.hh"
5010117Snilay@cs.wisc.edu#include "sim/full_system.hh"
518923Sandreas.hansson@arm.com#include "sim/system.hh"
526285Snate@binkert.org
536876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p)
548922Swilliam.wang@arm.com    : MemObject(p), m_version(p->version), m_controller(NULL),
5510090Snilay@cs.wisc.edu      m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
5610467Sandreas.hansson@arm.com      system(p->system),
5710090Snilay@cs.wisc.edu      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
5810090Snilay@cs.wisc.edu      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
5910090Snilay@cs.wisc.edu      memMasterPort(csprintf("%s.mem-master-port", name()), this),
6010090Snilay@cs.wisc.edu      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
6110706Spower.jg@gmail.com          p->ruby_system, p->ruby_system->getAccessBackingStore(), -1),
6210525Snilay@cs.wisc.edu      gotAddrRanges(p->port_master_connection_count), drainManager(NULL)
636876Ssteve.reinhardt@amd.com{
646876Ssteve.reinhardt@amd.com    assert(m_version != -1);
656876Ssteve.reinhardt@amd.com
668922Swilliam.wang@arm.com    // create the slave ports based on the number of connected ports
678922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
6810090Snilay@cs.wisc.edu        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
6910706Spower.jg@gmail.com            i), this, p->ruby_system,
7010706Spower.jg@gmail.com            p->ruby_system->getAccessBackingStore(), i));
718922Swilliam.wang@arm.com    }
727039Snate@binkert.org
738922Swilliam.wang@arm.com    // create the master ports based on the number of connected ports
748922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
7510090Snilay@cs.wisc.edu        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
7610090Snilay@cs.wisc.edu            name(), i), this));
778922Swilliam.wang@arm.com    }
786876Ssteve.reinhardt@amd.com}
796876Ssteve.reinhardt@amd.com
807039Snate@binkert.orgvoid
817039Snate@binkert.orgRubyPort::init()
826882SBrad.Beckmann@amd.com{
836882SBrad.Beckmann@amd.com    assert(m_controller != NULL);
846882SBrad.Beckmann@amd.com    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
859508Snilay@cs.wisc.edu    m_mandatory_q_ptr->setSender(this);
866882SBrad.Beckmann@amd.com}
876882SBrad.Beckmann@amd.com
889294Sandreas.hansson@arm.comBaseMasterPort &
899294Sandreas.hansson@arm.comRubyPort::getMasterPort(const std::string &if_name, PortID idx)
906876Ssteve.reinhardt@amd.com{
9110090Snilay@cs.wisc.edu    if (if_name == "mem_master_port") {
9210090Snilay@cs.wisc.edu        return memMasterPort;
9310090Snilay@cs.wisc.edu    }
9410090Snilay@cs.wisc.edu
9510090Snilay@cs.wisc.edu    if (if_name == "pio_master_port") {
9610090Snilay@cs.wisc.edu        return pioMasterPort;
978922Swilliam.wang@arm.com    }
988922Swilliam.wang@arm.com
998839Sandreas.hansson@arm.com    // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
1008839Sandreas.hansson@arm.com    // port
1018922Swilliam.wang@arm.com    if (if_name != "master") {
1028922Swilliam.wang@arm.com        // pass it along to our super class
1038922Swilliam.wang@arm.com        return MemObject::getMasterPort(if_name, idx);
1048922Swilliam.wang@arm.com    } else {
1059294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(master_ports.size())) {
1068922Swilliam.wang@arm.com            panic("RubyPort::getMasterPort: unknown index %d\n", idx);
1078922Swilliam.wang@arm.com        }
1088839Sandreas.hansson@arm.com
1098922Swilliam.wang@arm.com        return *master_ports[idx];
1108839Sandreas.hansson@arm.com    }
1118922Swilliam.wang@arm.com}
1128839Sandreas.hansson@arm.com
1139294Sandreas.hansson@arm.comBaseSlavePort &
1149294Sandreas.hansson@arm.comRubyPort::getSlavePort(const std::string &if_name, PortID idx)
1158922Swilliam.wang@arm.com{
11610090Snilay@cs.wisc.edu    if (if_name == "mem_slave_port") {
11710090Snilay@cs.wisc.edu        return memSlavePort;
11810090Snilay@cs.wisc.edu    }
11910090Snilay@cs.wisc.edu
12010090Snilay@cs.wisc.edu    if (if_name == "pio_slave_port")
12110090Snilay@cs.wisc.edu        return pioSlavePort;
12210090Snilay@cs.wisc.edu
1238922Swilliam.wang@arm.com    // used by the CPUs to connect the caches to the interconnect, and
1248922Swilliam.wang@arm.com    // for the x86 case also the interrupt master
1258922Swilliam.wang@arm.com    if (if_name != "slave") {
1268922Swilliam.wang@arm.com        // pass it along to our super class
1278922Swilliam.wang@arm.com        return MemObject::getSlavePort(if_name, idx);
1288922Swilliam.wang@arm.com    } else {
1299294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(slave_ports.size())) {
1308922Swilliam.wang@arm.com            panic("RubyPort::getSlavePort: unknown index %d\n", idx);
1318922Swilliam.wang@arm.com        }
1328922Swilliam.wang@arm.com
1338922Swilliam.wang@arm.com        return *slave_ports[idx];
1347039Snate@binkert.org    }
1356876Ssteve.reinhardt@amd.com}
1366882SBrad.Beckmann@amd.com
13710090Snilay@cs.wisc.eduRubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
1386882SBrad.Beckmann@amd.com                           RubyPort *_port)
13910713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
14010713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
1416882SBrad.Beckmann@amd.com{
14210090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
1436882SBrad.Beckmann@amd.com}
1446882SBrad.Beckmann@amd.com
14510090Snilay@cs.wisc.eduRubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
14610090Snilay@cs.wisc.edu                           RubyPort *_port)
14710090Snilay@cs.wisc.edu    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
14810090Snilay@cs.wisc.edu{
14910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
15010090Snilay@cs.wisc.edu}
15110090Snilay@cs.wisc.edu
15210090Snilay@cs.wisc.eduRubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
15310090Snilay@cs.wisc.edu                           RubyPort *_port)
15410713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
15510713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
15610090Snilay@cs.wisc.edu{
15710090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
15810090Snilay@cs.wisc.edu}
15910090Snilay@cs.wisc.edu
16010090Snilay@cs.wisc.eduRubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
16110525Snilay@cs.wisc.edu                                     RubySystem *_system,
16210525Snilay@cs.wisc.edu                                     bool _access_backing_store, PortID id)
16310089Sandreas.hansson@arm.com    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
16410525Snilay@cs.wisc.edu      ruby_system(_system), access_backing_store(_access_backing_store)
1656882SBrad.Beckmann@amd.com{
16610090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
1676882SBrad.Beckmann@amd.com}
1686882SBrad.Beckmann@amd.com
16910089Sandreas.hansson@arm.combool
17010090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
17110090Snilay@cs.wisc.edu{
17210090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
17310090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
17410090Snilay@cs.wisc.edu
17510090Snilay@cs.wisc.edu    // send next cycle
17610090Snilay@cs.wisc.edu    ruby_port->pioSlavePort.schedTimingResp(
17710090Snilay@cs.wisc.edu            pkt, curTick() + g_system_ptr->clockPeriod());
17810090Snilay@cs.wisc.edu    return true;
17910090Snilay@cs.wisc.edu}
18010090Snilay@cs.wisc.edu
18110090Snilay@cs.wisc.edubool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
18210089Sandreas.hansson@arm.com{
18310089Sandreas.hansson@arm.com    // got a response from a device
18410089Sandreas.hansson@arm.com    assert(pkt->isResponse());
1856882SBrad.Beckmann@amd.com
18610090Snilay@cs.wisc.edu    // First we must retrieve the request port from the sender State
18710090Snilay@cs.wisc.edu    RubyPort::SenderState *senderState =
18810090Snilay@cs.wisc.edu        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
18910090Snilay@cs.wisc.edu    MemSlavePort *port = senderState->port;
19010090Snilay@cs.wisc.edu    assert(port != NULL);
19110090Snilay@cs.wisc.edu    delete senderState;
1927039Snate@binkert.org
19310657Sandreas.hansson@arm.com    // In FS mode, ruby memory will receive pio responses from devices
19410657Sandreas.hansson@arm.com    // and it must forward these responses back to the particular CPU.
19510657Sandreas.hansson@arm.com    DPRINTF(RubyPort,  "Pio response for address %#x, going to %s\n",
19610657Sandreas.hansson@arm.com            pkt->getAddr(), port->name());
19710657Sandreas.hansson@arm.com
19810089Sandreas.hansson@arm.com    // attempt to send the response in the next cycle
19910090Snilay@cs.wisc.edu    port->schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
2007039Snate@binkert.org
2016882SBrad.Beckmann@amd.com    return true;
2026882SBrad.Beckmann@amd.com}
2036882SBrad.Beckmann@amd.com
2046882SBrad.Beckmann@amd.combool
20510090Snilay@cs.wisc.eduRubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
2066882SBrad.Beckmann@amd.com{
20710090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
20810090Snilay@cs.wisc.edu
20910090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
21010090Snilay@cs.wisc.edu        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
21110090Snilay@cs.wisc.edu        for (auto it = l.begin(); it != l.end(); ++it) {
21210090Snilay@cs.wisc.edu            if (it->contains(pkt->getAddr())) {
21310412Sandreas.hansson@arm.com                // generally it is not safe to assume success here as
21410412Sandreas.hansson@arm.com                // the port could be blocked
21510412Sandreas.hansson@arm.com                bool M5_VAR_USED success =
21610412Sandreas.hansson@arm.com                    ruby_port->master_ports[i]->sendTimingReq(pkt);
21710412Sandreas.hansson@arm.com                assert(success);
21810090Snilay@cs.wisc.edu                return true;
21910090Snilay@cs.wisc.edu            }
22010090Snilay@cs.wisc.edu        }
22110090Snilay@cs.wisc.edu    }
22210090Snilay@cs.wisc.edu    panic("Should never reach here!\n");
22310090Snilay@cs.wisc.edu}
22410090Snilay@cs.wisc.edu
22510090Snilay@cs.wisc.edubool
22610090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
22710090Snilay@cs.wisc.edu{
22810090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
22910090Snilay@cs.wisc.edu            pkt->getAddr(), id);
23010090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2316882SBrad.Beckmann@amd.com
2329662Sandreas.hansson@arm.com    if (pkt->memInhibitAsserted())
2339662Sandreas.hansson@arm.com        panic("RubyPort should never see an inhibited request\n");
2346882SBrad.Beckmann@amd.com
2356882SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2366882SBrad.Beckmann@amd.com    // pio port.
2376882SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
23810090Snilay@cs.wisc.edu        assert(ruby_port->memMasterPort.isConnected());
23910090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Request address %#x assumed to be a pio address\n",
2406922SBrad.Beckmann@amd.com                pkt->getAddr());
2416882SBrad.Beckmann@amd.com
24210090Snilay@cs.wisc.edu        // Save the port in the sender state object to be used later to
24310090Snilay@cs.wisc.edu        // route the response
24410090Snilay@cs.wisc.edu        pkt->pushSenderState(new SenderState(this));
24510090Snilay@cs.wisc.edu
2469163Sandreas.hansson@arm.com        // send next cycle
24710090Snilay@cs.wisc.edu        ruby_port->memMasterPort.schedTimingReq(pkt,
2489206Snilay@cs.wisc.edu            curTick() + g_system_ptr->clockPeriod());
2499163Sandreas.hansson@arm.com        return true;
2506882SBrad.Beckmann@amd.com    }
2516882SBrad.Beckmann@amd.com
2528615Snilay@cs.wisc.edu    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
2538615Snilay@cs.wisc.edu           RubySystem::getBlockSizeBytes());
2547906SBrad.Beckmann@amd.com
2556882SBrad.Beckmann@amd.com    // Submit the ruby request
2568615Snilay@cs.wisc.edu    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
2577023SBrad.Beckmann@amd.com
2587550SBrad.Beckmann@amd.com    // If the request successfully issued then we should return true.
25910089Sandreas.hansson@arm.com    // Otherwise, we need to tell the port to retry at a later point
26010089Sandreas.hansson@arm.com    // and return false.
2617550SBrad.Beckmann@amd.com    if (requestStatus == RequestStatus_Issued) {
26210657Sandreas.hansson@arm.com        // Save the port in the sender state object to be used later to
26310657Sandreas.hansson@arm.com        // route the response
26410657Sandreas.hansson@arm.com        pkt->pushSenderState(new SenderState(this));
26510657Sandreas.hansson@arm.com
26610089Sandreas.hansson@arm.com        DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
26710089Sandreas.hansson@arm.com                pkt->getAddr());
2686922SBrad.Beckmann@amd.com        return true;
2696882SBrad.Beckmann@amd.com    }
2707023SBrad.Beckmann@amd.com
2717910SBrad.Beckmann@amd.com    //
2727910SBrad.Beckmann@amd.com    // Unless one is using the ruby tester, record the stalled M5 port for
2737910SBrad.Beckmann@amd.com    // later retry when the sequencer becomes free.
2747910SBrad.Beckmann@amd.com    //
2757910SBrad.Beckmann@amd.com    if (!ruby_port->m_usingRubyTester) {
2767910SBrad.Beckmann@amd.com        ruby_port->addToRetryList(this);
2777910SBrad.Beckmann@amd.com    }
2787910SBrad.Beckmann@amd.com
27910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Request for address %#x did not issued because %s\n",
2807039Snate@binkert.org            pkt->getAddr(), RequestStatus_to_string(requestStatus));
2817039Snate@binkert.org
2826922SBrad.Beckmann@amd.com    return false;
2836882SBrad.Beckmann@amd.com}
2846882SBrad.Beckmann@amd.com
2858436SBrad.Beckmann@amd.comvoid
28610090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
2878436SBrad.Beckmann@amd.com{
28810090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
2898436SBrad.Beckmann@amd.com
2908436SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2918436SBrad.Beckmann@amd.com    // pio port.
2928436SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
29310525Snilay@cs.wisc.edu        RubyPort *ruby_port M5_VAR_USED = static_cast<RubyPort *>(&owner);
29410090Snilay@cs.wisc.edu        assert(ruby_port->memMasterPort.isConnected());
29510090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
29610090Snilay@cs.wisc.edu        panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
2978436SBrad.Beckmann@amd.com    }
2988436SBrad.Beckmann@amd.com
2998436SBrad.Beckmann@amd.com    assert(pkt->getAddr() + pkt->getSize() <=
3008436SBrad.Beckmann@amd.com                line_address(Address(pkt->getAddr())).getAddress() +
3018436SBrad.Beckmann@amd.com                RubySystem::getBlockSizeBytes());
3028436SBrad.Beckmann@amd.com
30310525Snilay@cs.wisc.edu    if (access_backing_store) {
3048436SBrad.Beckmann@amd.com        // The attached physmem contains the official version of data.
3058436SBrad.Beckmann@amd.com        // The following command performs the real functional access.
3068436SBrad.Beckmann@amd.com        // This line should be removed once Ruby supplies the official version
3078436SBrad.Beckmann@amd.com        // of data.
30810525Snilay@cs.wisc.edu        ruby_system->getPhysMem()->functionalAccess(pkt);
30910706Spower.jg@gmail.com    } else {
31010706Spower.jg@gmail.com        bool accessSucceeded = false;
31110706Spower.jg@gmail.com        bool needsResponse = pkt->needsResponse();
31210706Spower.jg@gmail.com
31310706Spower.jg@gmail.com        // Do the functional access on ruby memory
31410706Spower.jg@gmail.com        if (pkt->isRead()) {
31510706Spower.jg@gmail.com            accessSucceeded = ruby_system->functionalRead(pkt);
31610706Spower.jg@gmail.com        } else if (pkt->isWrite()) {
31710706Spower.jg@gmail.com            accessSucceeded = ruby_system->functionalWrite(pkt);
31810706Spower.jg@gmail.com        } else {
31910706Spower.jg@gmail.com            panic("Unsupported functional command %s\n", pkt->cmdString());
32010706Spower.jg@gmail.com        }
32110706Spower.jg@gmail.com
32210706Spower.jg@gmail.com        // Unless the requester explicitly said otherwise, generate an error if
32310706Spower.jg@gmail.com        // the functional request failed
32410706Spower.jg@gmail.com        if (!accessSucceeded && !pkt->suppressFuncError()) {
32510706Spower.jg@gmail.com            fatal("Ruby functional %s failed for address %#x\n",
32610706Spower.jg@gmail.com                  pkt->isWrite() ? "write" : "read", pkt->getAddr());
32710706Spower.jg@gmail.com        }
32810706Spower.jg@gmail.com
32910706Spower.jg@gmail.com        // turn packet around to go back to requester if response expected
33010706Spower.jg@gmail.com        if (needsResponse) {
33110706Spower.jg@gmail.com            pkt->setFunctionalResponseStatus(accessSucceeded);
33210706Spower.jg@gmail.com        }
33310706Spower.jg@gmail.com
33410706Spower.jg@gmail.com        DPRINTF(RubyPort, "Functional access %s!\n",
33510706Spower.jg@gmail.com                accessSucceeded ? "successful":"failed");
3368436SBrad.Beckmann@amd.com    }
3378436SBrad.Beckmann@amd.com}
3388436SBrad.Beckmann@amd.com
3396882SBrad.Beckmann@amd.comvoid
3406922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt)
3416882SBrad.Beckmann@amd.com{
34210089Sandreas.hansson@arm.com    DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
34310089Sandreas.hansson@arm.com            pkt->getAddr());
3447039Snate@binkert.org
34510089Sandreas.hansson@arm.com    // The packet was destined for memory and has not yet been turned
34610089Sandreas.hansson@arm.com    // into a response
34710089Sandreas.hansson@arm.com    assert(system->isMemAddr(pkt->getAddr()));
34810089Sandreas.hansson@arm.com    assert(pkt->isRequest());
3496882SBrad.Beckmann@amd.com
35010657Sandreas.hansson@arm.com    // First we must retrieve the request port from the sender State
35110657Sandreas.hansson@arm.com    RubyPort::SenderState *senderState =
35210657Sandreas.hansson@arm.com        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
35310657Sandreas.hansson@arm.com    MemSlavePort *port = senderState->port;
35410657Sandreas.hansson@arm.com    assert(port != NULL);
35510657Sandreas.hansson@arm.com    delete senderState;
35610089Sandreas.hansson@arm.com
35710657Sandreas.hansson@arm.com    port->hitCallback(pkt);
3587910SBrad.Beckmann@amd.com
3597910SBrad.Beckmann@amd.com    //
36010090Snilay@cs.wisc.edu    // If we had to stall the MemSlavePorts, wake them up because the sequencer
3617910SBrad.Beckmann@amd.com    // likely has free resources now.
3627910SBrad.Beckmann@amd.com    //
36310089Sandreas.hansson@arm.com    if (!retryList.empty()) {
3648162SBrad.Beckmann@amd.com        //
3658162SBrad.Beckmann@amd.com        // Record the current list of ports to retry on a temporary list before
3668162SBrad.Beckmann@amd.com        // calling sendRetry on those ports.  sendRetry will cause an
3678162SBrad.Beckmann@amd.com        // immediate retry, which may result in the ports being put back on the
3688162SBrad.Beckmann@amd.com        // list. Therefore we want to clear the retryList before calling
3698162SBrad.Beckmann@amd.com        // sendRetry.
3708162SBrad.Beckmann@amd.com        //
37110090Snilay@cs.wisc.edu        std::vector<MemSlavePort *> curRetryList(retryList);
3728162SBrad.Beckmann@amd.com
3738162SBrad.Beckmann@amd.com        retryList.clear();
37410089Sandreas.hansson@arm.com
37510089Sandreas.hansson@arm.com        for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
3768162SBrad.Beckmann@amd.com            DPRINTF(RubyPort,
3777910SBrad.Beckmann@amd.com                    "Sequencer may now be free.  SendRetry to port %s\n",
3787910SBrad.Beckmann@amd.com                    (*i)->name());
37910713Sandreas.hansson@arm.com            (*i)->sendRetryReq();
3807910SBrad.Beckmann@amd.com        }
3817910SBrad.Beckmann@amd.com    }
3828688Snilay@cs.wisc.edu
3838688Snilay@cs.wisc.edu    testDrainComplete();
3848688Snilay@cs.wisc.edu}
3858688Snilay@cs.wisc.edu
3868688Snilay@cs.wisc.eduvoid
3878688Snilay@cs.wisc.eduRubyPort::testDrainComplete()
3888688Snilay@cs.wisc.edu{
3898688Snilay@cs.wisc.edu    //If we weren't able to drain before, we might be able to now.
3909342SAndreas.Sandberg@arm.com    if (drainManager != NULL) {
3919245Shestness@cs.wisc.edu        unsigned int drainCount = outstandingCount();
3929152Satgutier@umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
3938688Snilay@cs.wisc.edu        if (drainCount == 0) {
3949342SAndreas.Sandberg@arm.com            DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
3959342SAndreas.Sandberg@arm.com            drainManager->signalDrainDone();
3969342SAndreas.Sandberg@arm.com            // Clear the drain manager once we're done with it.
3979342SAndreas.Sandberg@arm.com            drainManager = NULL;
3988688Snilay@cs.wisc.edu        }
3998688Snilay@cs.wisc.edu    }
4008688Snilay@cs.wisc.edu}
4018688Snilay@cs.wisc.edu
4028688Snilay@cs.wisc.eduunsigned int
4039342SAndreas.Sandberg@arm.comRubyPort::getChildDrainCount(DrainManager *dm)
4048688Snilay@cs.wisc.edu{
4058688Snilay@cs.wisc.edu    int count = 0;
4068688Snilay@cs.wisc.edu
40710090Snilay@cs.wisc.edu    if (memMasterPort.isConnected()) {
40810090Snilay@cs.wisc.edu        count += memMasterPort.drain(dm);
4098688Snilay@cs.wisc.edu        DPRINTF(Config, "count after pio check %d\n", count);
4108688Snilay@cs.wisc.edu    }
4118688Snilay@cs.wisc.edu
4128922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
4139342SAndreas.Sandberg@arm.com        count += (*p)->drain(dm);
4148922Swilliam.wang@arm.com        DPRINTF(Config, "count after slave port check %d\n", count);
4158922Swilliam.wang@arm.com    }
4168922Swilliam.wang@arm.com
41710090Snilay@cs.wisc.edu    for (std::vector<PioMasterPort *>::iterator p = master_ports.begin();
4188922Swilliam.wang@arm.com         p != master_ports.end(); ++p) {
4199342SAndreas.Sandberg@arm.com        count += (*p)->drain(dm);
4208922Swilliam.wang@arm.com        DPRINTF(Config, "count after master port check %d\n", count);
4218688Snilay@cs.wisc.edu    }
4228688Snilay@cs.wisc.edu
4238688Snilay@cs.wisc.edu    DPRINTF(Config, "final count %d\n", count);
4248688Snilay@cs.wisc.edu    return count;
4258688Snilay@cs.wisc.edu}
4268688Snilay@cs.wisc.edu
4278688Snilay@cs.wisc.eduunsigned int
4289342SAndreas.Sandberg@arm.comRubyPort::drain(DrainManager *dm)
4298688Snilay@cs.wisc.edu{
4308688Snilay@cs.wisc.edu    if (isDeadlockEventScheduled()) {
4318688Snilay@cs.wisc.edu        descheduleDeadlockEvent();
4328688Snilay@cs.wisc.edu    }
4338688Snilay@cs.wisc.edu
4349245Shestness@cs.wisc.edu    //
4359245Shestness@cs.wisc.edu    // If the RubyPort is not empty, then it needs to clear all outstanding
4369342SAndreas.Sandberg@arm.com    // requests before it should call drainManager->signalDrainDone()
4379245Shestness@cs.wisc.edu    //
4389245Shestness@cs.wisc.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
4399245Shestness@cs.wisc.edu    bool need_drain = outstandingCount() > 0;
4409245Shestness@cs.wisc.edu
4419245Shestness@cs.wisc.edu    //
4429245Shestness@cs.wisc.edu    // Also, get the number of child ports that will also need to clear
4439342SAndreas.Sandberg@arm.com    // their buffered requests before they call drainManager->signalDrainDone()
4449245Shestness@cs.wisc.edu    //
4459342SAndreas.Sandberg@arm.com    unsigned int child_drain_count = getChildDrainCount(dm);
4468688Snilay@cs.wisc.edu
4478688Snilay@cs.wisc.edu    // Set status
4489245Shestness@cs.wisc.edu    if (need_drain) {
4499342SAndreas.Sandberg@arm.com        drainManager = dm;
4508688Snilay@cs.wisc.edu
4519152Satgutier@umich.edu        DPRINTF(Drain, "RubyPort not drained\n");
4529342SAndreas.Sandberg@arm.com        setDrainState(Drainable::Draining);
4539245Shestness@cs.wisc.edu        return child_drain_count + 1;
4548688Snilay@cs.wisc.edu    }
4558688Snilay@cs.wisc.edu
4569342SAndreas.Sandberg@arm.com    drainManager = NULL;
4579342SAndreas.Sandberg@arm.com    setDrainState(Drainable::Drained);
4589245Shestness@cs.wisc.edu    return child_drain_count;
4596882SBrad.Beckmann@amd.com}
4606882SBrad.Beckmann@amd.com
4616882SBrad.Beckmann@amd.comvoid
46210090Snilay@cs.wisc.eduRubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
4636882SBrad.Beckmann@amd.com{
4646882SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
4656882SBrad.Beckmann@amd.com
4667915SBrad.Beckmann@amd.com    // Unless specified at configuraiton, all responses except failed SC
4678184Ssomayeh@cs.wisc.edu    // and Flush operations access M5 physical memory.
46810525Snilay@cs.wisc.edu    bool accessPhysMem = access_backing_store;
4697550SBrad.Beckmann@amd.com
4707550SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
4717550SBrad.Beckmann@amd.com        if (pkt->isWrite()) {
4727550SBrad.Beckmann@amd.com            if (pkt->req->getExtraData() != 0) {
4737550SBrad.Beckmann@amd.com                //
4747550SBrad.Beckmann@amd.com                // Successful SC packets convert to normal writes
4757550SBrad.Beckmann@amd.com                //
4767550SBrad.Beckmann@amd.com                pkt->convertScToWrite();
4777550SBrad.Beckmann@amd.com            } else {
4787550SBrad.Beckmann@amd.com                //
4797550SBrad.Beckmann@amd.com                // Failed SC packets don't access physical memory and thus
4807550SBrad.Beckmann@amd.com                // the RubyPort itself must convert it to a response.
4817550SBrad.Beckmann@amd.com                //
4827550SBrad.Beckmann@amd.com                accessPhysMem = false;
4837550SBrad.Beckmann@amd.com            }
4847550SBrad.Beckmann@amd.com        } else {
4857550SBrad.Beckmann@amd.com            //
4867550SBrad.Beckmann@amd.com            // All LL packets convert to normal loads so that M5 PhysMem does
4877550SBrad.Beckmann@amd.com            // not lock the blocks.
4887550SBrad.Beckmann@amd.com            //
4897550SBrad.Beckmann@amd.com            pkt->convertLlToRead();
4907550SBrad.Beckmann@amd.com        }
4917550SBrad.Beckmann@amd.com    }
4928184Ssomayeh@cs.wisc.edu
4938184Ssomayeh@cs.wisc.edu    // Flush requests don't access physical memory
4948184Ssomayeh@cs.wisc.edu    if (pkt->isFlush()) {
4958184Ssomayeh@cs.wisc.edu        accessPhysMem = false;
4968184Ssomayeh@cs.wisc.edu    }
4978184Ssomayeh@cs.wisc.edu
4988161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
4996882SBrad.Beckmann@amd.com
5007550SBrad.Beckmann@amd.com    if (accessPhysMem) {
50110706Spower.jg@gmail.com        ruby_system->getPhysMem()->access(pkt);
5028184Ssomayeh@cs.wisc.edu    } else if (needsResponse) {
5037915SBrad.Beckmann@amd.com        pkt->makeResponse();
5047550SBrad.Beckmann@amd.com    }
5056882SBrad.Beckmann@amd.com
5066882SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
5076882SBrad.Beckmann@amd.com    if (needsResponse) {
5088161SBrad.Beckmann@amd.com        DPRINTF(RubyPort, "Sending packet back over port\n");
5099163Sandreas.hansson@arm.com        // send next cycle
5109206Snilay@cs.wisc.edu        schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
5116882SBrad.Beckmann@amd.com    } else {
5126882SBrad.Beckmann@amd.com        delete pkt;
5136882SBrad.Beckmann@amd.com    }
51410525Snilay@cs.wisc.edu
5158161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback done!\n");
5166882SBrad.Beckmann@amd.com}
5176882SBrad.Beckmann@amd.com
5188922Swilliam.wang@arm.comAddrRangeList
51910090Snilay@cs.wisc.eduRubyPort::PioSlavePort::getAddrRanges() const
5208922Swilliam.wang@arm.com{
5218922Swilliam.wang@arm.com    // at the moment the assumption is that the master does not care
5228922Swilliam.wang@arm.com    AddrRangeList ranges;
52310090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
52410090Snilay@cs.wisc.edu
52510090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
52610090Snilay@cs.wisc.edu        ranges.splice(ranges.begin(),
52710090Snilay@cs.wisc.edu                ruby_port->master_ports[i]->getAddrRanges());
52810090Snilay@cs.wisc.edu    }
52910481Sandreas.hansson@arm.com    for (const auto M5_VAR_USED &r : ranges)
53010481Sandreas.hansson@arm.com        DPRINTF(RubyPort, "%s\n", r.to_string());
5318922Swilliam.wang@arm.com    return ranges;
5328922Swilliam.wang@arm.com}
5338922Swilliam.wang@arm.com
5346882SBrad.Beckmann@amd.combool
53510090Snilay@cs.wisc.eduRubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
5366882SBrad.Beckmann@amd.com{
53710090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
5388931Sandreas.hansson@arm.com    return ruby_port->system->isMemAddr(addr);
5396882SBrad.Beckmann@amd.com}
5407909Shestness@cs.utexas.edu
5418717Snilay@cs.wisc.eduvoid
5428717Snilay@cs.wisc.eduRubyPort::ruby_eviction_callback(const Address& address)
5438717Snilay@cs.wisc.edu{
5448717Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Sending invalidations.\n");
5459633Sjthestness@gmail.com    // This request is deleted in the stack-allocated packet destructor
5469633Sjthestness@gmail.com    // when this function exits
5479633Sjthestness@gmail.com    // TODO: should this really be using funcMasterId?
5489633Sjthestness@gmail.com    RequestPtr req =
5499633Sjthestness@gmail.com            new Request(address.getAddress(), 0, 0, Request::funcMasterId);
5509633Sjthestness@gmail.com    // Use a single packet to signal all snooping ports of the invalidation.
5519633Sjthestness@gmail.com    // This assumes that snooping ports do NOT modify the packet/request
55210886Sandreas.hansson@arm.com    Packet pkt(req, MemCmd::InvalidateReq);
5538922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
5549088Sandreas.hansson@arm.com        // check if the connected master port is snooping
5559088Sandreas.hansson@arm.com        if ((*p)->isSnooping()) {
5568948Sandreas.hansson@arm.com            // send as a snoop request
5579633Sjthestness@gmail.com            (*p)->sendTimingSnoopReq(&pkt);
5588922Swilliam.wang@arm.com        }
5598717Snilay@cs.wisc.edu    }
5608717Snilay@cs.wisc.edu}
56110090Snilay@cs.wisc.edu
56210090Snilay@cs.wisc.eduvoid
56310090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvRangeChange()
56410090Snilay@cs.wisc.edu{
56510090Snilay@cs.wisc.edu    RubyPort &r = static_cast<RubyPort &>(owner);
56610090Snilay@cs.wisc.edu    r.gotAddrRanges--;
56710117Snilay@cs.wisc.edu    if (r.gotAddrRanges == 0 && FullSystem) {
56810090Snilay@cs.wisc.edu        r.pioSlavePort.sendRangeChange();
56910090Snilay@cs.wisc.edu    }
57010090Snilay@cs.wisc.edu}
571