RubyPort.cc revision 12687
16876Ssteve.reinhardt@amd.com/*
210089Sandreas.hansson@arm.com * Copyright (c) 2012-2013 ARM Limited
38922Swilliam.wang@arm.com * All rights reserved.
48922Swilliam.wang@arm.com *
58922Swilliam.wang@arm.com * The license below extends only to copyright in the software and shall
68922Swilliam.wang@arm.com * not be construed as granting a license to any other intellectual
78922Swilliam.wang@arm.com * property including but not limited to intellectual property relating
88922Swilliam.wang@arm.com * to a hardware implementation of the functionality of the software
98922Swilliam.wang@arm.com * licensed hereunder.  You may use the software subject to the license
108922Swilliam.wang@arm.com * terms below provided that you ensure that this notice is replicated
118922Swilliam.wang@arm.com * unmodified and in its entirety in all distributions of the software,
128922Swilliam.wang@arm.com * modified or unmodified, in source code or in binary form.
138922Swilliam.wang@arm.com *
1411266SBrad.Beckmann@amd.com * Copyright (c) 2009-2013 Advanced Micro Devices, Inc.
158717Snilay@cs.wisc.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood
166876Ssteve.reinhardt@amd.com * All rights reserved.
176876Ssteve.reinhardt@amd.com *
186876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
196876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
206876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
216876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
226876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
236876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
246876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
256876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
266876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
276876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
286876Ssteve.reinhardt@amd.com *
296876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
306876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
316876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
346876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
396876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
406876Ssteve.reinhardt@amd.com */
416876Ssteve.reinhardt@amd.com
4211793Sbrandon.potter@amd.com#include "mem/ruby/system/RubyPort.hh"
4311793Sbrandon.potter@amd.com
447632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
458688Snilay@cs.wisc.edu#include "debug/Config.hh"
469152Satgutier@umich.edu#include "debug/Drain.hh"
478232Snate@binkert.org#include "debug/Ruby.hh"
488436SBrad.Beckmann@amd.com#include "mem/protocol/AccessPermission.hh"
497039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
5010525Snilay@cs.wisc.edu#include "mem/simple_mem.hh"
5110117Snilay@cs.wisc.edu#include "sim/full_system.hh"
528923Sandreas.hansson@arm.com#include "sim/system.hh"
536285Snate@binkert.org
546876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p)
5510919Sbrandon.potter@amd.com    : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
5610919Sbrandon.potter@amd.com      m_controller(NULL), m_mandatory_q_ptr(NULL),
5710919Sbrandon.potter@amd.com      m_usingRubyTester(p->using_ruby_tester), system(p->system),
5810090Snilay@cs.wisc.edu      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
5910090Snilay@cs.wisc.edu      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
6010090Snilay@cs.wisc.edu      memMasterPort(csprintf("%s.mem-master-port", name()), this),
6110090Snilay@cs.wisc.edu      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
6211266SBrad.Beckmann@amd.com                   p->ruby_system->getAccessBackingStore(), -1,
6311266SBrad.Beckmann@amd.com                   p->no_retry_on_stall),
6411308Santhony.gutierrez@amd.com      gotAddrRanges(p->port_master_connection_count),
6511308Santhony.gutierrez@amd.com      m_isCPUSequencer(p->is_cpu_sequencer)
666876Ssteve.reinhardt@amd.com{
676876Ssteve.reinhardt@amd.com    assert(m_version != -1);
686876Ssteve.reinhardt@amd.com
698922Swilliam.wang@arm.com    // create the slave ports based on the number of connected ports
708922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
7110090Snilay@cs.wisc.edu        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
7211266SBrad.Beckmann@amd.com            i), this, p->ruby_system->getAccessBackingStore(),
7311266SBrad.Beckmann@amd.com            i, p->no_retry_on_stall));
748922Swilliam.wang@arm.com    }
757039Snate@binkert.org
768922Swilliam.wang@arm.com    // create the master ports based on the number of connected ports
778922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
7810090Snilay@cs.wisc.edu        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
7910090Snilay@cs.wisc.edu            name(), i), this));
808922Swilliam.wang@arm.com    }
816876Ssteve.reinhardt@amd.com}
826876Ssteve.reinhardt@amd.com
837039Snate@binkert.orgvoid
847039Snate@binkert.orgRubyPort::init()
856882SBrad.Beckmann@amd.com{
866882SBrad.Beckmann@amd.com    assert(m_controller != NULL);
876882SBrad.Beckmann@amd.com    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
886882SBrad.Beckmann@amd.com}
896882SBrad.Beckmann@amd.com
909294Sandreas.hansson@arm.comBaseMasterPort &
919294Sandreas.hansson@arm.comRubyPort::getMasterPort(const std::string &if_name, PortID idx)
926876Ssteve.reinhardt@amd.com{
9310090Snilay@cs.wisc.edu    if (if_name == "mem_master_port") {
9410090Snilay@cs.wisc.edu        return memMasterPort;
9510090Snilay@cs.wisc.edu    }
9610090Snilay@cs.wisc.edu
9710090Snilay@cs.wisc.edu    if (if_name == "pio_master_port") {
9810090Snilay@cs.wisc.edu        return pioMasterPort;
998922Swilliam.wang@arm.com    }
1008922Swilliam.wang@arm.com
1018839Sandreas.hansson@arm.com    // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
1028839Sandreas.hansson@arm.com    // port
1038922Swilliam.wang@arm.com    if (if_name != "master") {
1048922Swilliam.wang@arm.com        // pass it along to our super class
1058922Swilliam.wang@arm.com        return MemObject::getMasterPort(if_name, idx);
1068922Swilliam.wang@arm.com    } else {
1079294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(master_ports.size())) {
1088922Swilliam.wang@arm.com            panic("RubyPort::getMasterPort: unknown index %d\n", idx);
1098922Swilliam.wang@arm.com        }
1108839Sandreas.hansson@arm.com
1118922Swilliam.wang@arm.com        return *master_ports[idx];
1128839Sandreas.hansson@arm.com    }
1138922Swilliam.wang@arm.com}
1148839Sandreas.hansson@arm.com
1159294Sandreas.hansson@arm.comBaseSlavePort &
1169294Sandreas.hansson@arm.comRubyPort::getSlavePort(const std::string &if_name, PortID idx)
1178922Swilliam.wang@arm.com{
11810090Snilay@cs.wisc.edu    if (if_name == "mem_slave_port") {
11910090Snilay@cs.wisc.edu        return memSlavePort;
12010090Snilay@cs.wisc.edu    }
12110090Snilay@cs.wisc.edu
12210090Snilay@cs.wisc.edu    if (if_name == "pio_slave_port")
12310090Snilay@cs.wisc.edu        return pioSlavePort;
12410090Snilay@cs.wisc.edu
1258922Swilliam.wang@arm.com    // used by the CPUs to connect the caches to the interconnect, and
1268922Swilliam.wang@arm.com    // for the x86 case also the interrupt master
1278922Swilliam.wang@arm.com    if (if_name != "slave") {
1288922Swilliam.wang@arm.com        // pass it along to our super class
1298922Swilliam.wang@arm.com        return MemObject::getSlavePort(if_name, idx);
1308922Swilliam.wang@arm.com    } else {
1319294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(slave_ports.size())) {
1328922Swilliam.wang@arm.com            panic("RubyPort::getSlavePort: unknown index %d\n", idx);
1338922Swilliam.wang@arm.com        }
1348922Swilliam.wang@arm.com
1358922Swilliam.wang@arm.com        return *slave_ports[idx];
1367039Snate@binkert.org    }
1376876Ssteve.reinhardt@amd.com}
1386882SBrad.Beckmann@amd.com
13910090Snilay@cs.wisc.eduRubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
1406882SBrad.Beckmann@amd.com                           RubyPort *_port)
14110713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
14210713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
1436882SBrad.Beckmann@amd.com{
14410090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
1456882SBrad.Beckmann@amd.com}
1466882SBrad.Beckmann@amd.com
14710090Snilay@cs.wisc.eduRubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
14810090Snilay@cs.wisc.edu                           RubyPort *_port)
14910090Snilay@cs.wisc.edu    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
15010090Snilay@cs.wisc.edu{
15110090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
15210090Snilay@cs.wisc.edu}
15310090Snilay@cs.wisc.edu
15410090Snilay@cs.wisc.eduRubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
15510090Snilay@cs.wisc.edu                           RubyPort *_port)
15610713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
15710713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
15810090Snilay@cs.wisc.edu{
15910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
16010090Snilay@cs.wisc.edu}
16110090Snilay@cs.wisc.edu
16210090Snilay@cs.wisc.eduRubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
16311266SBrad.Beckmann@amd.com                                     bool _access_backing_store, PortID id,
16411266SBrad.Beckmann@amd.com                                     bool _no_retry_on_stall)
16510089Sandreas.hansson@arm.com    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
16611266SBrad.Beckmann@amd.com      access_backing_store(_access_backing_store),
16711266SBrad.Beckmann@amd.com      no_retry_on_stall(_no_retry_on_stall)
1686882SBrad.Beckmann@amd.com{
16910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
1706882SBrad.Beckmann@amd.com}
1716882SBrad.Beckmann@amd.com
17210089Sandreas.hansson@arm.combool
17310090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
17410090Snilay@cs.wisc.edu{
17510919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
17610090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
17710090Snilay@cs.wisc.edu
17810090Snilay@cs.wisc.edu    // send next cycle
17910919Sbrandon.potter@amd.com    rp->pioSlavePort.schedTimingResp(
18010919Sbrandon.potter@amd.com            pkt, curTick() + rp->m_ruby_system->clockPeriod());
18110090Snilay@cs.wisc.edu    return true;
18210090Snilay@cs.wisc.edu}
18310090Snilay@cs.wisc.edu
18410090Snilay@cs.wisc.edubool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
18510089Sandreas.hansson@arm.com{
18610089Sandreas.hansson@arm.com    // got a response from a device
18710089Sandreas.hansson@arm.com    assert(pkt->isResponse());
1886882SBrad.Beckmann@amd.com
18910090Snilay@cs.wisc.edu    // First we must retrieve the request port from the sender State
19010090Snilay@cs.wisc.edu    RubyPort::SenderState *senderState =
19110090Snilay@cs.wisc.edu        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
19210090Snilay@cs.wisc.edu    MemSlavePort *port = senderState->port;
19310090Snilay@cs.wisc.edu    assert(port != NULL);
19410090Snilay@cs.wisc.edu    delete senderState;
1957039Snate@binkert.org
19610657Sandreas.hansson@arm.com    // In FS mode, ruby memory will receive pio responses from devices
19710657Sandreas.hansson@arm.com    // and it must forward these responses back to the particular CPU.
19810657Sandreas.hansson@arm.com    DPRINTF(RubyPort,  "Pio response for address %#x, going to %s\n",
19910657Sandreas.hansson@arm.com            pkt->getAddr(), port->name());
20010657Sandreas.hansson@arm.com
20110089Sandreas.hansson@arm.com    // attempt to send the response in the next cycle
20210919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
20310919Sbrandon.potter@amd.com    port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod());
2047039Snate@binkert.org
2056882SBrad.Beckmann@amd.com    return true;
2066882SBrad.Beckmann@amd.com}
2076882SBrad.Beckmann@amd.com
2086882SBrad.Beckmann@amd.combool
20910090Snilay@cs.wisc.eduRubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
2106882SBrad.Beckmann@amd.com{
21110090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
21210090Snilay@cs.wisc.edu
21310090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
21410090Snilay@cs.wisc.edu        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
21510090Snilay@cs.wisc.edu        for (auto it = l.begin(); it != l.end(); ++it) {
21610090Snilay@cs.wisc.edu            if (it->contains(pkt->getAddr())) {
21710412Sandreas.hansson@arm.com                // generally it is not safe to assume success here as
21810412Sandreas.hansson@arm.com                // the port could be blocked
21910412Sandreas.hansson@arm.com                bool M5_VAR_USED success =
22010412Sandreas.hansson@arm.com                    ruby_port->master_ports[i]->sendTimingReq(pkt);
22110412Sandreas.hansson@arm.com                assert(success);
22210090Snilay@cs.wisc.edu                return true;
22310090Snilay@cs.wisc.edu            }
22410090Snilay@cs.wisc.edu        }
22510090Snilay@cs.wisc.edu    }
22610090Snilay@cs.wisc.edu    panic("Should never reach here!\n");
22710090Snilay@cs.wisc.edu}
22810090Snilay@cs.wisc.edu
22912395Sswapnilster@gmail.comTick
23012395Sswapnilster@gmail.comRubyPort::PioSlavePort::recvAtomic(PacketPtr pkt)
23112395Sswapnilster@gmail.com{
23212395Sswapnilster@gmail.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
23312395Sswapnilster@gmail.com    // Only atomic_noncaching mode supported!
23412395Sswapnilster@gmail.com    if (!ruby_port->system->bypassCaches()) {
23512395Sswapnilster@gmail.com        panic("Ruby supports atomic accesses only in noncaching mode\n");
23612395Sswapnilster@gmail.com    }
23712395Sswapnilster@gmail.com
23812395Sswapnilster@gmail.com    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
23912395Sswapnilster@gmail.com        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
24012395Sswapnilster@gmail.com        for (auto it = l.begin(); it != l.end(); ++it) {
24112395Sswapnilster@gmail.com            if (it->contains(pkt->getAddr())) {
24212395Sswapnilster@gmail.com                return ruby_port->master_ports[i]->sendAtomic(pkt);
24312395Sswapnilster@gmail.com            }
24412395Sswapnilster@gmail.com        }
24512395Sswapnilster@gmail.com    }
24612395Sswapnilster@gmail.com    panic("Could not find address in Ruby PIO address ranges!\n");
24712395Sswapnilster@gmail.com}
24812395Sswapnilster@gmail.com
24910090Snilay@cs.wisc.edubool
25010090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
25110090Snilay@cs.wisc.edu{
25210090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
25310090Snilay@cs.wisc.edu            pkt->getAddr(), id);
25410090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2556882SBrad.Beckmann@amd.com
25611284Sandreas.hansson@arm.com    if (pkt->cacheResponding())
25711284Sandreas.hansson@arm.com        panic("RubyPort should never see request with the "
25811284Sandreas.hansson@arm.com              "cacheResponding flag set\n");
2596882SBrad.Beckmann@amd.com
26012357Snikos.nikoleris@arm.com    // ruby doesn't support cache maintenance operations at the
26112357Snikos.nikoleris@arm.com    // moment, as a workaround, we respond right away
26212357Snikos.nikoleris@arm.com    if (pkt->req->isCacheMaintenance()) {
26312357Snikos.nikoleris@arm.com        warn_once("Cache maintenance operations are not supported in Ruby.\n");
26412357Snikos.nikoleris@arm.com        pkt->makeResponse();
26512357Snikos.nikoleris@arm.com        schedTimingResp(pkt, curTick());
26612357Snikos.nikoleris@arm.com        return true;
26712357Snikos.nikoleris@arm.com    }
2686882SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2696882SBrad.Beckmann@amd.com    // pio port.
27011305Sblake.hechtman@amd.com    if (pkt->cmd != MemCmd::MemFenceReq) {
27111305Sblake.hechtman@amd.com        if (!isPhysMemAddress(pkt->getAddr())) {
27211305Sblake.hechtman@amd.com            assert(ruby_port->memMasterPort.isConnected());
27311305Sblake.hechtman@amd.com            DPRINTF(RubyPort, "Request address %#x assumed to be a "
27411305Sblake.hechtman@amd.com                    "pio address\n", pkt->getAddr());
2756882SBrad.Beckmann@amd.com
27611305Sblake.hechtman@amd.com            // Save the port in the sender state object to be used later to
27711305Sblake.hechtman@amd.com            // route the response
27811305Sblake.hechtman@amd.com            pkt->pushSenderState(new SenderState(this));
27910090Snilay@cs.wisc.edu
28011305Sblake.hechtman@amd.com            // send next cycle
28111305Sblake.hechtman@amd.com            RubySystem *rs = ruby_port->m_ruby_system;
28211305Sblake.hechtman@amd.com            ruby_port->memMasterPort.schedTimingReq(pkt,
28311305Sblake.hechtman@amd.com                curTick() + rs->clockPeriod());
28411305Sblake.hechtman@amd.com            return true;
28511305Sblake.hechtman@amd.com        }
28611305Sblake.hechtman@amd.com
28711305Sblake.hechtman@amd.com        assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
28811305Sblake.hechtman@amd.com               RubySystem::getBlockSizeBytes());
2896882SBrad.Beckmann@amd.com    }
2906882SBrad.Beckmann@amd.com
2916882SBrad.Beckmann@amd.com    // Submit the ruby request
2928615Snilay@cs.wisc.edu    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
2937023SBrad.Beckmann@amd.com
2947550SBrad.Beckmann@amd.com    // If the request successfully issued then we should return true.
29510089Sandreas.hansson@arm.com    // Otherwise, we need to tell the port to retry at a later point
29610089Sandreas.hansson@arm.com    // and return false.
2977550SBrad.Beckmann@amd.com    if (requestStatus == RequestStatus_Issued) {
29810657Sandreas.hansson@arm.com        // Save the port in the sender state object to be used later to
29910657Sandreas.hansson@arm.com        // route the response
30010657Sandreas.hansson@arm.com        pkt->pushSenderState(new SenderState(this));
30110657Sandreas.hansson@arm.com
30212687Sbrad.beckmann@amd.com        DPRINTF(RubyPort, "Request %s address %#x issued\n", pkt->cmdString(),
30310089Sandreas.hansson@arm.com                pkt->getAddr());
3046922SBrad.Beckmann@amd.com        return true;
3056882SBrad.Beckmann@amd.com    }
3067023SBrad.Beckmann@amd.com
30711305Sblake.hechtman@amd.com    if (pkt->cmd != MemCmd::MemFenceReq) {
30811305Sblake.hechtman@amd.com        DPRINTF(RubyPort,
30912687Sbrad.beckmann@amd.com                "Request %s for address %#x did not issue because %s\n",
31012687Sbrad.beckmann@amd.com                pkt->cmdString(), pkt->getAddr(),
31112687Sbrad.beckmann@amd.com                RequestStatus_to_string(requestStatus));
31211305Sblake.hechtman@amd.com    }
3137039Snate@binkert.org
31411266SBrad.Beckmann@amd.com    addToRetryList();
31511266SBrad.Beckmann@amd.com
3166922SBrad.Beckmann@amd.com    return false;
3176882SBrad.Beckmann@amd.com}
3186882SBrad.Beckmann@amd.com
31912395Sswapnilster@gmail.comTick
32012395Sswapnilster@gmail.comRubyPort::MemSlavePort::recvAtomic(PacketPtr pkt)
32112395Sswapnilster@gmail.com{
32212395Sswapnilster@gmail.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
32312395Sswapnilster@gmail.com    // Only atomic_noncaching mode supported!
32412395Sswapnilster@gmail.com    if (!ruby_port->system->bypassCaches()) {
32512395Sswapnilster@gmail.com        panic("Ruby supports atomic accesses only in noncaching mode\n");
32612395Sswapnilster@gmail.com    }
32712395Sswapnilster@gmail.com
32812395Sswapnilster@gmail.com    // Check for pio requests and directly send them to the dedicated
32912395Sswapnilster@gmail.com    // pio port.
33012395Sswapnilster@gmail.com    if (pkt->cmd != MemCmd::MemFenceReq) {
33112395Sswapnilster@gmail.com        if (!isPhysMemAddress(pkt->getAddr())) {
33212395Sswapnilster@gmail.com            assert(ruby_port->memMasterPort.isConnected());
33312395Sswapnilster@gmail.com            DPRINTF(RubyPort, "Request address %#x assumed to be a "
33412395Sswapnilster@gmail.com                    "pio address\n", pkt->getAddr());
33512395Sswapnilster@gmail.com
33612395Sswapnilster@gmail.com            // Save the port in the sender state object to be used later to
33712395Sswapnilster@gmail.com            // route the response
33812395Sswapnilster@gmail.com            pkt->pushSenderState(new SenderState(this));
33912395Sswapnilster@gmail.com
34012395Sswapnilster@gmail.com            // send next cycle
34112395Sswapnilster@gmail.com            Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt);
34212395Sswapnilster@gmail.com            return ruby_port->ticksToCycles(req_ticks);
34312395Sswapnilster@gmail.com        }
34412395Sswapnilster@gmail.com
34512395Sswapnilster@gmail.com        assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
34612395Sswapnilster@gmail.com               RubySystem::getBlockSizeBytes());
34712395Sswapnilster@gmail.com    }
34812395Sswapnilster@gmail.com
34912395Sswapnilster@gmail.com    // Find appropriate directory for address
35012395Sswapnilster@gmail.com    // This assumes that protocols have a Directory machine,
35112395Sswapnilster@gmail.com    // which has its memPort hooked up to memory. This can
35212395Sswapnilster@gmail.com    // fail for some custom protocols.
35312395Sswapnilster@gmail.com    MachineID id = ruby_port->m_controller->mapAddressToMachine(
35412395Sswapnilster@gmail.com                    pkt->getAddr(), MachineType_Directory);
35512395Sswapnilster@gmail.com    RubySystem *rs = ruby_port->m_ruby_system;
35612395Sswapnilster@gmail.com    AbstractController *directory =
35712395Sswapnilster@gmail.com        rs->m_abstract_controls[id.getType()][id.getNum()];
35812395Sswapnilster@gmail.com    return directory->recvAtomic(pkt);
35912395Sswapnilster@gmail.com}
36012395Sswapnilster@gmail.com
3618436SBrad.Beckmann@amd.comvoid
36211266SBrad.Beckmann@amd.comRubyPort::MemSlavePort::addToRetryList()
36311266SBrad.Beckmann@amd.com{
36411266SBrad.Beckmann@amd.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
36511266SBrad.Beckmann@amd.com
36611266SBrad.Beckmann@amd.com    //
36711266SBrad.Beckmann@amd.com    // Unless the requestor do not want retries (e.g., the Ruby tester),
36811266SBrad.Beckmann@amd.com    // record the stalled M5 port for later retry when the sequencer
36911266SBrad.Beckmann@amd.com    // becomes free.
37011266SBrad.Beckmann@amd.com    //
37111266SBrad.Beckmann@amd.com    if (!no_retry_on_stall && !ruby_port->onRetryList(this)) {
37211266SBrad.Beckmann@amd.com        ruby_port->addToRetryList(this);
37311266SBrad.Beckmann@amd.com    }
37411266SBrad.Beckmann@amd.com}
37511266SBrad.Beckmann@amd.com
37611266SBrad.Beckmann@amd.comvoid
37710090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
3788436SBrad.Beckmann@amd.com{
37910090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
3808436SBrad.Beckmann@amd.com
38110919Sbrandon.potter@amd.com    RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
38210919Sbrandon.potter@amd.com    RubySystem *rs = rp->m_ruby_system;
38310919Sbrandon.potter@amd.com
3848436SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
3858436SBrad.Beckmann@amd.com    // pio port.
3868436SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
38710090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
38811596Sandreas.sandberg@arm.com        assert(rp->pioMasterPort.isConnected());
38911596Sandreas.sandberg@arm.com        rp->pioMasterPort.sendFunctional(pkt);
39011596Sandreas.sandberg@arm.com        return;
3918436SBrad.Beckmann@amd.com    }
3928436SBrad.Beckmann@amd.com
3938436SBrad.Beckmann@amd.com    assert(pkt->getAddr() + pkt->getSize() <=
39411025Snilay@cs.wisc.edu           makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes());
3958436SBrad.Beckmann@amd.com
39610525Snilay@cs.wisc.edu    if (access_backing_store) {
3978436SBrad.Beckmann@amd.com        // The attached physmem contains the official version of data.
3988436SBrad.Beckmann@amd.com        // The following command performs the real functional access.
3998436SBrad.Beckmann@amd.com        // This line should be removed once Ruby supplies the official version
4008436SBrad.Beckmann@amd.com        // of data.
40110919Sbrandon.potter@amd.com        rs->getPhysMem()->functionalAccess(pkt);
40210706Spower.jg@gmail.com    } else {
40310706Spower.jg@gmail.com        bool accessSucceeded = false;
40410706Spower.jg@gmail.com        bool needsResponse = pkt->needsResponse();
40510706Spower.jg@gmail.com
40610706Spower.jg@gmail.com        // Do the functional access on ruby memory
40710706Spower.jg@gmail.com        if (pkt->isRead()) {
40810919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalRead(pkt);
40910706Spower.jg@gmail.com        } else if (pkt->isWrite()) {
41010919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalWrite(pkt);
41110706Spower.jg@gmail.com        } else {
41210706Spower.jg@gmail.com            panic("Unsupported functional command %s\n", pkt->cmdString());
41310706Spower.jg@gmail.com        }
41410706Spower.jg@gmail.com
41510706Spower.jg@gmail.com        // Unless the requester explicitly said otherwise, generate an error if
41610706Spower.jg@gmail.com        // the functional request failed
41710706Spower.jg@gmail.com        if (!accessSucceeded && !pkt->suppressFuncError()) {
41810706Spower.jg@gmail.com            fatal("Ruby functional %s failed for address %#x\n",
41910706Spower.jg@gmail.com                  pkt->isWrite() ? "write" : "read", pkt->getAddr());
42010706Spower.jg@gmail.com        }
42110706Spower.jg@gmail.com
42210706Spower.jg@gmail.com        // turn packet around to go back to requester if response expected
42310706Spower.jg@gmail.com        if (needsResponse) {
42410706Spower.jg@gmail.com            pkt->setFunctionalResponseStatus(accessSucceeded);
42510706Spower.jg@gmail.com        }
42610706Spower.jg@gmail.com
42710706Spower.jg@gmail.com        DPRINTF(RubyPort, "Functional access %s!\n",
42810706Spower.jg@gmail.com                accessSucceeded ? "successful":"failed");
4298436SBrad.Beckmann@amd.com    }
4308436SBrad.Beckmann@amd.com}
4318436SBrad.Beckmann@amd.com
4326882SBrad.Beckmann@amd.comvoid
4336922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt)
4346882SBrad.Beckmann@amd.com{
43510089Sandreas.hansson@arm.com    DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
43610089Sandreas.hansson@arm.com            pkt->getAddr());
4377039Snate@binkert.org
43810089Sandreas.hansson@arm.com    // The packet was destined for memory and has not yet been turned
43910089Sandreas.hansson@arm.com    // into a response
44010089Sandreas.hansson@arm.com    assert(system->isMemAddr(pkt->getAddr()));
44110089Sandreas.hansson@arm.com    assert(pkt->isRequest());
4426882SBrad.Beckmann@amd.com
44310657Sandreas.hansson@arm.com    // First we must retrieve the request port from the sender State
44410657Sandreas.hansson@arm.com    RubyPort::SenderState *senderState =
44510657Sandreas.hansson@arm.com        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
44610657Sandreas.hansson@arm.com    MemSlavePort *port = senderState->port;
44710657Sandreas.hansson@arm.com    assert(port != NULL);
44810657Sandreas.hansson@arm.com    delete senderState;
44910089Sandreas.hansson@arm.com
45010657Sandreas.hansson@arm.com    port->hitCallback(pkt);
4517910SBrad.Beckmann@amd.com
45211266SBrad.Beckmann@amd.com    trySendRetries();
45311266SBrad.Beckmann@amd.com}
45411266SBrad.Beckmann@amd.com
45511266SBrad.Beckmann@amd.comvoid
45611266SBrad.Beckmann@amd.comRubyPort::trySendRetries()
45711266SBrad.Beckmann@amd.com{
4587910SBrad.Beckmann@amd.com    //
45910090Snilay@cs.wisc.edu    // If we had to stall the MemSlavePorts, wake them up because the sequencer
4607910SBrad.Beckmann@amd.com    // likely has free resources now.
4617910SBrad.Beckmann@amd.com    //
46210089Sandreas.hansson@arm.com    if (!retryList.empty()) {
46311266SBrad.Beckmann@amd.com        // Record the current list of ports to retry on a temporary list
46411266SBrad.Beckmann@amd.com        // before calling sendRetryReq on those ports. sendRetryReq will cause
46511266SBrad.Beckmann@amd.com        // an immediate retry, which may result in the ports being put back on
46611266SBrad.Beckmann@amd.com        // the list. Therefore we want to clear the retryList before calling
46711266SBrad.Beckmann@amd.com        // sendRetryReq.
46810090Snilay@cs.wisc.edu        std::vector<MemSlavePort *> curRetryList(retryList);
4698162SBrad.Beckmann@amd.com
4708162SBrad.Beckmann@amd.com        retryList.clear();
47110089Sandreas.hansson@arm.com
47210089Sandreas.hansson@arm.com        for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
4738162SBrad.Beckmann@amd.com            DPRINTF(RubyPort,
47411266SBrad.Beckmann@amd.com                    "Sequencer may now be free. SendRetry to port %s\n",
4757910SBrad.Beckmann@amd.com                    (*i)->name());
47610713Sandreas.hansson@arm.com            (*i)->sendRetryReq();
4777910SBrad.Beckmann@amd.com        }
4787910SBrad.Beckmann@amd.com    }
4798688Snilay@cs.wisc.edu}
4808688Snilay@cs.wisc.edu
4818688Snilay@cs.wisc.eduvoid
4828688Snilay@cs.wisc.eduRubyPort::testDrainComplete()
4838688Snilay@cs.wisc.edu{
4848688Snilay@cs.wisc.edu    //If we weren't able to drain before, we might be able to now.
48510913Sandreas.sandberg@arm.com    if (drainState() == DrainState::Draining) {
4869245Shestness@cs.wisc.edu        unsigned int drainCount = outstandingCount();
4879152Satgutier@umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
4888688Snilay@cs.wisc.edu        if (drainCount == 0) {
4899342SAndreas.Sandberg@arm.com            DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
49010913Sandreas.sandberg@arm.com            signalDrainDone();
4918688Snilay@cs.wisc.edu        }
4928688Snilay@cs.wisc.edu    }
4938688Snilay@cs.wisc.edu}
4948688Snilay@cs.wisc.edu
49510913Sandreas.sandberg@arm.comDrainState
49610913Sandreas.sandberg@arm.comRubyPort::drain()
4978688Snilay@cs.wisc.edu{
4988688Snilay@cs.wisc.edu    if (isDeadlockEventScheduled()) {
4998688Snilay@cs.wisc.edu        descheduleDeadlockEvent();
5008688Snilay@cs.wisc.edu    }
5018688Snilay@cs.wisc.edu
5029245Shestness@cs.wisc.edu    //
5039245Shestness@cs.wisc.edu    // If the RubyPort is not empty, then it needs to clear all outstanding
50410913Sandreas.sandberg@arm.com    // requests before it should call signalDrainDone()
5059245Shestness@cs.wisc.edu    //
5069245Shestness@cs.wisc.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
50710913Sandreas.sandberg@arm.com    if (outstandingCount() > 0) {
5089152Satgutier@umich.edu        DPRINTF(Drain, "RubyPort not drained\n");
50910913Sandreas.sandberg@arm.com        return DrainState::Draining;
51010913Sandreas.sandberg@arm.com    } else {
51110913Sandreas.sandberg@arm.com        return DrainState::Drained;
5128688Snilay@cs.wisc.edu    }
5136882SBrad.Beckmann@amd.com}
5146882SBrad.Beckmann@amd.com
5156882SBrad.Beckmann@amd.comvoid
51610090Snilay@cs.wisc.eduRubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
5176882SBrad.Beckmann@amd.com{
5186882SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
5196882SBrad.Beckmann@amd.com
52010917Sbrandon.potter@amd.com    // Unless specified at configuraiton, all responses except failed SC
5218184Ssomayeh@cs.wisc.edu    // and Flush operations access M5 physical memory.
52210525Snilay@cs.wisc.edu    bool accessPhysMem = access_backing_store;
5237550SBrad.Beckmann@amd.com
5247550SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
5257550SBrad.Beckmann@amd.com        if (pkt->isWrite()) {
5267550SBrad.Beckmann@amd.com            if (pkt->req->getExtraData() != 0) {
5277550SBrad.Beckmann@amd.com                //
5287550SBrad.Beckmann@amd.com                // Successful SC packets convert to normal writes
5297550SBrad.Beckmann@amd.com                //
5307550SBrad.Beckmann@amd.com                pkt->convertScToWrite();
5317550SBrad.Beckmann@amd.com            } else {
5327550SBrad.Beckmann@amd.com                //
5337550SBrad.Beckmann@amd.com                // Failed SC packets don't access physical memory and thus
5347550SBrad.Beckmann@amd.com                // the RubyPort itself must convert it to a response.
5357550SBrad.Beckmann@amd.com                //
5367550SBrad.Beckmann@amd.com                accessPhysMem = false;
5377550SBrad.Beckmann@amd.com            }
5387550SBrad.Beckmann@amd.com        } else {
5397550SBrad.Beckmann@amd.com            //
5407550SBrad.Beckmann@amd.com            // All LL packets convert to normal loads so that M5 PhysMem does
5417550SBrad.Beckmann@amd.com            // not lock the blocks.
5427550SBrad.Beckmann@amd.com            //
5437550SBrad.Beckmann@amd.com            pkt->convertLlToRead();
5447550SBrad.Beckmann@amd.com        }
5457550SBrad.Beckmann@amd.com    }
5468184Ssomayeh@cs.wisc.edu
54711305Sblake.hechtman@amd.com    // Flush, acquire, release requests don't access physical memory
54811305Sblake.hechtman@amd.com    if (pkt->isFlush() || pkt->cmd == MemCmd::MemFenceReq) {
5498184Ssomayeh@cs.wisc.edu        accessPhysMem = false;
5508184Ssomayeh@cs.wisc.edu    }
5518184Ssomayeh@cs.wisc.edu
55211305Sblake.hechtman@amd.com    if (pkt->req->isKernel()) {
55311305Sblake.hechtman@amd.com        accessPhysMem = false;
55411305Sblake.hechtman@amd.com        needsResponse = true;
55511305Sblake.hechtman@amd.com    }
55611305Sblake.hechtman@amd.com
5578161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
5586882SBrad.Beckmann@amd.com
55910919Sbrandon.potter@amd.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
56010919Sbrandon.potter@amd.com    RubySystem *rs = ruby_port->m_ruby_system;
5617550SBrad.Beckmann@amd.com    if (accessPhysMem) {
56210919Sbrandon.potter@amd.com        rs->getPhysMem()->access(pkt);
5638184Ssomayeh@cs.wisc.edu    } else if (needsResponse) {
5647915SBrad.Beckmann@amd.com        pkt->makeResponse();
5657550SBrad.Beckmann@amd.com    }
5666882SBrad.Beckmann@amd.com
5676882SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
5686882SBrad.Beckmann@amd.com    if (needsResponse) {
5698161SBrad.Beckmann@amd.com        DPRINTF(RubyPort, "Sending packet back over port\n");
57010961Sdavid.hashe@amd.com        // Send a response in the same cycle. There is no need to delay the
57110961Sdavid.hashe@amd.com        // response because the response latency is already incurred in the
57210961Sdavid.hashe@amd.com        // Ruby protocol.
57310961Sdavid.hashe@amd.com        schedTimingResp(pkt, curTick());
5746882SBrad.Beckmann@amd.com    } else {
5756882SBrad.Beckmann@amd.com        delete pkt;
5766882SBrad.Beckmann@amd.com    }
57710525Snilay@cs.wisc.edu
5788161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback done!\n");
5796882SBrad.Beckmann@amd.com}
5806882SBrad.Beckmann@amd.com
5818922Swilliam.wang@arm.comAddrRangeList
58210090Snilay@cs.wisc.eduRubyPort::PioSlavePort::getAddrRanges() const
5838922Swilliam.wang@arm.com{
5848922Swilliam.wang@arm.com    // at the moment the assumption is that the master does not care
5858922Swilliam.wang@arm.com    AddrRangeList ranges;
58610090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
58710090Snilay@cs.wisc.edu
58810090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
58910090Snilay@cs.wisc.edu        ranges.splice(ranges.begin(),
59010090Snilay@cs.wisc.edu                ruby_port->master_ports[i]->getAddrRanges());
59110090Snilay@cs.wisc.edu    }
59210481Sandreas.hansson@arm.com    for (const auto M5_VAR_USED &r : ranges)
59310481Sandreas.hansson@arm.com        DPRINTF(RubyPort, "%s\n", r.to_string());
5948922Swilliam.wang@arm.com    return ranges;
5958922Swilliam.wang@arm.com}
5968922Swilliam.wang@arm.com
5976882SBrad.Beckmann@amd.combool
59810090Snilay@cs.wisc.eduRubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
5996882SBrad.Beckmann@amd.com{
60010090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
6018931Sandreas.hansson@arm.com    return ruby_port->system->isMemAddr(addr);
6026882SBrad.Beckmann@amd.com}
6037909Shestness@cs.utexas.edu
6048717Snilay@cs.wisc.eduvoid
60511025Snilay@cs.wisc.eduRubyPort::ruby_eviction_callback(Addr address)
6068717Snilay@cs.wisc.edu{
6078717Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Sending invalidations.\n");
60811143Sjthestness@gmail.com    // Allocate the invalidate request and packet on the stack, as it is
60911143Sjthestness@gmail.com    // assumed they will not be modified or deleted by receivers.
6109633Sjthestness@gmail.com    // TODO: should this really be using funcMasterId?
61111143Sjthestness@gmail.com    Request request(address, RubySystem::getBlockSizeBytes(), 0,
61211143Sjthestness@gmail.com                    Request::funcMasterId);
6139633Sjthestness@gmail.com    // Use a single packet to signal all snooping ports of the invalidation.
6149633Sjthestness@gmail.com    // This assumes that snooping ports do NOT modify the packet/request
61511143Sjthestness@gmail.com    Packet pkt(&request, MemCmd::InvalidateReq);
6168922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
6179088Sandreas.hansson@arm.com        // check if the connected master port is snooping
6189088Sandreas.hansson@arm.com        if ((*p)->isSnooping()) {
6198948Sandreas.hansson@arm.com            // send as a snoop request
6209633Sjthestness@gmail.com            (*p)->sendTimingSnoopReq(&pkt);
6218922Swilliam.wang@arm.com        }
6228717Snilay@cs.wisc.edu    }
6238717Snilay@cs.wisc.edu}
62410090Snilay@cs.wisc.edu
62510090Snilay@cs.wisc.eduvoid
62610090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvRangeChange()
62710090Snilay@cs.wisc.edu{
62810090Snilay@cs.wisc.edu    RubyPort &r = static_cast<RubyPort &>(owner);
62910090Snilay@cs.wisc.edu    r.gotAddrRanges--;
63010117Snilay@cs.wisc.edu    if (r.gotAddrRanges == 0 && FullSystem) {
63110090Snilay@cs.wisc.edu        r.pioSlavePort.sendRangeChange();
63210090Snilay@cs.wisc.edu    }
63310090Snilay@cs.wisc.edu}
634