16876Ssteve.reinhardt@amd.com/*
210089Sandreas.hansson@arm.com * Copyright (c) 2012-2013 ARM Limited
38922Swilliam.wang@arm.com * All rights reserved.
48922Swilliam.wang@arm.com *
58922Swilliam.wang@arm.com * The license below extends only to copyright in the software and shall
68922Swilliam.wang@arm.com * not be construed as granting a license to any other intellectual
78922Swilliam.wang@arm.com * property including but not limited to intellectual property relating
88922Swilliam.wang@arm.com * to a hardware implementation of the functionality of the software
98922Swilliam.wang@arm.com * licensed hereunder.  You may use the software subject to the license
108922Swilliam.wang@arm.com * terms below provided that you ensure that this notice is replicated
118922Swilliam.wang@arm.com * unmodified and in its entirety in all distributions of the software,
128922Swilliam.wang@arm.com * modified or unmodified, in source code or in binary form.
138922Swilliam.wang@arm.com *
1411266SBrad.Beckmann@amd.com * Copyright (c) 2009-2013 Advanced Micro Devices, Inc.
158717Snilay@cs.wisc.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood
166876Ssteve.reinhardt@amd.com * All rights reserved.
176876Ssteve.reinhardt@amd.com *
186876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
196876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
206876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
216876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
226876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
236876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
246876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
256876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
266876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
276876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
286876Ssteve.reinhardt@amd.com *
296876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
306876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
316876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
346876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
396876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
406876Ssteve.reinhardt@amd.com */
416876Ssteve.reinhardt@amd.com
4211793Sbrandon.potter@amd.com#include "mem/ruby/system/RubyPort.hh"
4311793Sbrandon.potter@amd.com
447632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
458688Snilay@cs.wisc.edu#include "debug/Config.hh"
469152Satgutier@umich.edu#include "debug/Drain.hh"
478232Snate@binkert.org#include "debug/Ruby.hh"
4814184Sgabeblack@google.com#include "mem/ruby/protocol/AccessPermission.hh"
497039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh"
5010525Snilay@cs.wisc.edu#include "mem/simple_mem.hh"
5110117Snilay@cs.wisc.edu#include "sim/full_system.hh"
528923Sandreas.hansson@arm.com#include "sim/system.hh"
536285Snate@binkert.org
546876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p)
5513892Sgabeblack@google.com    : ClockedObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
5610919Sbrandon.potter@amd.com      m_controller(NULL), m_mandatory_q_ptr(NULL),
5710919Sbrandon.potter@amd.com      m_usingRubyTester(p->using_ruby_tester), system(p->system),
5810090Snilay@cs.wisc.edu      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
5910090Snilay@cs.wisc.edu      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
6010090Snilay@cs.wisc.edu      memMasterPort(csprintf("%s.mem-master-port", name()), this),
6110090Snilay@cs.wisc.edu      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
6211266SBrad.Beckmann@amd.com                   p->ruby_system->getAccessBackingStore(), -1,
6311266SBrad.Beckmann@amd.com                   p->no_retry_on_stall),
6411308Santhony.gutierrez@amd.com      gotAddrRanges(p->port_master_connection_count),
6511308Santhony.gutierrez@amd.com      m_isCPUSequencer(p->is_cpu_sequencer)
666876Ssteve.reinhardt@amd.com{
676876Ssteve.reinhardt@amd.com    assert(m_version != -1);
686876Ssteve.reinhardt@amd.com
698922Swilliam.wang@arm.com    // create the slave ports based on the number of connected ports
708922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
7110090Snilay@cs.wisc.edu        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
7211266SBrad.Beckmann@amd.com            i), this, p->ruby_system->getAccessBackingStore(),
7311266SBrad.Beckmann@amd.com            i, p->no_retry_on_stall));
748922Swilliam.wang@arm.com    }
757039Snate@binkert.org
768922Swilliam.wang@arm.com    // create the master ports based on the number of connected ports
778922Swilliam.wang@arm.com    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
7810090Snilay@cs.wisc.edu        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
7910090Snilay@cs.wisc.edu            name(), i), this));
808922Swilliam.wang@arm.com    }
816876Ssteve.reinhardt@amd.com}
826876Ssteve.reinhardt@amd.com
837039Snate@binkert.orgvoid
847039Snate@binkert.orgRubyPort::init()
856882SBrad.Beckmann@amd.com{
866882SBrad.Beckmann@amd.com    assert(m_controller != NULL);
876882SBrad.Beckmann@amd.com    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
886882SBrad.Beckmann@amd.com}
896882SBrad.Beckmann@amd.com
9013784Sgabeblack@google.comPort &
9113784Sgabeblack@google.comRubyPort::getPort(const std::string &if_name, PortID idx)
926876Ssteve.reinhardt@amd.com{
9310090Snilay@cs.wisc.edu    if (if_name == "mem_master_port") {
9410090Snilay@cs.wisc.edu        return memMasterPort;
9513784Sgabeblack@google.com    } else if (if_name == "pio_master_port") {
9610090Snilay@cs.wisc.edu        return pioMasterPort;
9713784Sgabeblack@google.com    } else if (if_name == "mem_slave_port") {
9813784Sgabeblack@google.com        return memSlavePort;
9913784Sgabeblack@google.com    } else if (if_name == "pio_slave_port") {
10013784Sgabeblack@google.com        return pioSlavePort;
10113784Sgabeblack@google.com    } else if (if_name == "master") {
10213784Sgabeblack@google.com        // used by the x86 CPUs to connect the interrupt PIO and interrupt
10313784Sgabeblack@google.com        // slave port
1049294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(master_ports.size())) {
10513784Sgabeblack@google.com            panic("RubyPort::getPort master: unknown index %d\n", idx);
1068922Swilliam.wang@arm.com        }
1078839Sandreas.hansson@arm.com
1088922Swilliam.wang@arm.com        return *master_ports[idx];
10913784Sgabeblack@google.com    } else if (if_name == "slave") {
11013784Sgabeblack@google.com        // used by the CPUs to connect the caches to the interconnect, and
11113784Sgabeblack@google.com        // for the x86 case also the interrupt master
1129294Sandreas.hansson@arm.com        if (idx >= static_cast<PortID>(slave_ports.size())) {
11313784Sgabeblack@google.com            panic("RubyPort::getPort slave: unknown index %d\n", idx);
1148922Swilliam.wang@arm.com        }
1158922Swilliam.wang@arm.com
1168922Swilliam.wang@arm.com        return *slave_ports[idx];
1177039Snate@binkert.org    }
11813784Sgabeblack@google.com
11913784Sgabeblack@google.com    // pass it along to our super class
12013892Sgabeblack@google.com    return ClockedObject::getPort(if_name, idx);
1216876Ssteve.reinhardt@amd.com}
1226882SBrad.Beckmann@amd.com
12310090Snilay@cs.wisc.eduRubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
1246882SBrad.Beckmann@amd.com                           RubyPort *_port)
12510713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
12610713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
1276882SBrad.Beckmann@amd.com{
12810090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
1296882SBrad.Beckmann@amd.com}
1306882SBrad.Beckmann@amd.com
13110090Snilay@cs.wisc.eduRubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
13210090Snilay@cs.wisc.edu                           RubyPort *_port)
13310090Snilay@cs.wisc.edu    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
13410090Snilay@cs.wisc.edu{
13510090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
13610090Snilay@cs.wisc.edu}
13710090Snilay@cs.wisc.edu
13810090Snilay@cs.wisc.eduRubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
13910090Snilay@cs.wisc.edu                           RubyPort *_port)
14010713Sandreas.hansson@arm.com    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
14110713Sandreas.hansson@arm.com      reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
14210090Snilay@cs.wisc.edu{
14310090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
14410090Snilay@cs.wisc.edu}
14510090Snilay@cs.wisc.edu
14610090Snilay@cs.wisc.eduRubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
14711266SBrad.Beckmann@amd.com                                     bool _access_backing_store, PortID id,
14811266SBrad.Beckmann@amd.com                                     bool _no_retry_on_stall)
14910089Sandreas.hansson@arm.com    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
15011266SBrad.Beckmann@amd.com      access_backing_store(_access_backing_store),
15111266SBrad.Beckmann@amd.com      no_retry_on_stall(_no_retry_on_stall)
1526882SBrad.Beckmann@amd.com{
15310090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
1546882SBrad.Beckmann@amd.com}
1556882SBrad.Beckmann@amd.com
15610089Sandreas.hansson@arm.combool
15710090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
15810090Snilay@cs.wisc.edu{
15910919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
16010090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
16110090Snilay@cs.wisc.edu
16210090Snilay@cs.wisc.edu    // send next cycle
16310919Sbrandon.potter@amd.com    rp->pioSlavePort.schedTimingResp(
16410919Sbrandon.potter@amd.com            pkt, curTick() + rp->m_ruby_system->clockPeriod());
16510090Snilay@cs.wisc.edu    return true;
16610090Snilay@cs.wisc.edu}
16710090Snilay@cs.wisc.edu
16810090Snilay@cs.wisc.edubool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
16910089Sandreas.hansson@arm.com{
17010089Sandreas.hansson@arm.com    // got a response from a device
17110089Sandreas.hansson@arm.com    assert(pkt->isResponse());
1726882SBrad.Beckmann@amd.com
17310090Snilay@cs.wisc.edu    // First we must retrieve the request port from the sender State
17410090Snilay@cs.wisc.edu    RubyPort::SenderState *senderState =
17510090Snilay@cs.wisc.edu        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
17610090Snilay@cs.wisc.edu    MemSlavePort *port = senderState->port;
17710090Snilay@cs.wisc.edu    assert(port != NULL);
17810090Snilay@cs.wisc.edu    delete senderState;
1797039Snate@binkert.org
18010657Sandreas.hansson@arm.com    // In FS mode, ruby memory will receive pio responses from devices
18110657Sandreas.hansson@arm.com    // and it must forward these responses back to the particular CPU.
18210657Sandreas.hansson@arm.com    DPRINTF(RubyPort,  "Pio response for address %#x, going to %s\n",
18310657Sandreas.hansson@arm.com            pkt->getAddr(), port->name());
18410657Sandreas.hansson@arm.com
18510089Sandreas.hansson@arm.com    // attempt to send the response in the next cycle
18610919Sbrandon.potter@amd.com    RubyPort *rp = static_cast<RubyPort *>(&owner);
18710919Sbrandon.potter@amd.com    port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod());
1887039Snate@binkert.org
1896882SBrad.Beckmann@amd.com    return true;
1906882SBrad.Beckmann@amd.com}
1916882SBrad.Beckmann@amd.com
1926882SBrad.Beckmann@amd.combool
19310090Snilay@cs.wisc.eduRubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
1946882SBrad.Beckmann@amd.com{
19510090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
19610090Snilay@cs.wisc.edu
19710090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
19810090Snilay@cs.wisc.edu        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
19910090Snilay@cs.wisc.edu        for (auto it = l.begin(); it != l.end(); ++it) {
20010090Snilay@cs.wisc.edu            if (it->contains(pkt->getAddr())) {
20110412Sandreas.hansson@arm.com                // generally it is not safe to assume success here as
20210412Sandreas.hansson@arm.com                // the port could be blocked
20310412Sandreas.hansson@arm.com                bool M5_VAR_USED success =
20410412Sandreas.hansson@arm.com                    ruby_port->master_ports[i]->sendTimingReq(pkt);
20510412Sandreas.hansson@arm.com                assert(success);
20610090Snilay@cs.wisc.edu                return true;
20710090Snilay@cs.wisc.edu            }
20810090Snilay@cs.wisc.edu        }
20910090Snilay@cs.wisc.edu    }
21010090Snilay@cs.wisc.edu    panic("Should never reach here!\n");
21110090Snilay@cs.wisc.edu}
21210090Snilay@cs.wisc.edu
21312395Sswapnilster@gmail.comTick
21412395Sswapnilster@gmail.comRubyPort::PioSlavePort::recvAtomic(PacketPtr pkt)
21512395Sswapnilster@gmail.com{
21612395Sswapnilster@gmail.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
21712395Sswapnilster@gmail.com    // Only atomic_noncaching mode supported!
21812395Sswapnilster@gmail.com    if (!ruby_port->system->bypassCaches()) {
21912395Sswapnilster@gmail.com        panic("Ruby supports atomic accesses only in noncaching mode\n");
22012395Sswapnilster@gmail.com    }
22112395Sswapnilster@gmail.com
22212395Sswapnilster@gmail.com    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
22312395Sswapnilster@gmail.com        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
22412395Sswapnilster@gmail.com        for (auto it = l.begin(); it != l.end(); ++it) {
22512395Sswapnilster@gmail.com            if (it->contains(pkt->getAddr())) {
22612395Sswapnilster@gmail.com                return ruby_port->master_ports[i]->sendAtomic(pkt);
22712395Sswapnilster@gmail.com            }
22812395Sswapnilster@gmail.com        }
22912395Sswapnilster@gmail.com    }
23012395Sswapnilster@gmail.com    panic("Could not find address in Ruby PIO address ranges!\n");
23112395Sswapnilster@gmail.com}
23212395Sswapnilster@gmail.com
23310090Snilay@cs.wisc.edubool
23410090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
23510090Snilay@cs.wisc.edu{
23610090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
23710090Snilay@cs.wisc.edu            pkt->getAddr(), id);
23810090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
2396882SBrad.Beckmann@amd.com
24011284Sandreas.hansson@arm.com    if (pkt->cacheResponding())
24111284Sandreas.hansson@arm.com        panic("RubyPort should never see request with the "
24211284Sandreas.hansson@arm.com              "cacheResponding flag set\n");
2436882SBrad.Beckmann@amd.com
24412357Snikos.nikoleris@arm.com    // ruby doesn't support cache maintenance operations at the
24512357Snikos.nikoleris@arm.com    // moment, as a workaround, we respond right away
24612357Snikos.nikoleris@arm.com    if (pkt->req->isCacheMaintenance()) {
24712357Snikos.nikoleris@arm.com        warn_once("Cache maintenance operations are not supported in Ruby.\n");
24812357Snikos.nikoleris@arm.com        pkt->makeResponse();
24912357Snikos.nikoleris@arm.com        schedTimingResp(pkt, curTick());
25012357Snikos.nikoleris@arm.com        return true;
25112357Snikos.nikoleris@arm.com    }
2526882SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
2536882SBrad.Beckmann@amd.com    // pio port.
25411305Sblake.hechtman@amd.com    if (pkt->cmd != MemCmd::MemFenceReq) {
25511305Sblake.hechtman@amd.com        if (!isPhysMemAddress(pkt->getAddr())) {
25611305Sblake.hechtman@amd.com            assert(ruby_port->memMasterPort.isConnected());
25711305Sblake.hechtman@amd.com            DPRINTF(RubyPort, "Request address %#x assumed to be a "
25811305Sblake.hechtman@amd.com                    "pio address\n", pkt->getAddr());
2596882SBrad.Beckmann@amd.com
26011305Sblake.hechtman@amd.com            // Save the port in the sender state object to be used later to
26111305Sblake.hechtman@amd.com            // route the response
26211305Sblake.hechtman@amd.com            pkt->pushSenderState(new SenderState(this));
26310090Snilay@cs.wisc.edu
26411305Sblake.hechtman@amd.com            // send next cycle
26511305Sblake.hechtman@amd.com            RubySystem *rs = ruby_port->m_ruby_system;
26611305Sblake.hechtman@amd.com            ruby_port->memMasterPort.schedTimingReq(pkt,
26711305Sblake.hechtman@amd.com                curTick() + rs->clockPeriod());
26811305Sblake.hechtman@amd.com            return true;
26911305Sblake.hechtman@amd.com        }
27011305Sblake.hechtman@amd.com
27111305Sblake.hechtman@amd.com        assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
27211305Sblake.hechtman@amd.com               RubySystem::getBlockSizeBytes());
2736882SBrad.Beckmann@amd.com    }
2746882SBrad.Beckmann@amd.com
2756882SBrad.Beckmann@amd.com    // Submit the ruby request
2768615Snilay@cs.wisc.edu    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
2777023SBrad.Beckmann@amd.com
2787550SBrad.Beckmann@amd.com    // If the request successfully issued then we should return true.
27910089Sandreas.hansson@arm.com    // Otherwise, we need to tell the port to retry at a later point
28010089Sandreas.hansson@arm.com    // and return false.
2817550SBrad.Beckmann@amd.com    if (requestStatus == RequestStatus_Issued) {
28210657Sandreas.hansson@arm.com        // Save the port in the sender state object to be used later to
28310657Sandreas.hansson@arm.com        // route the response
28410657Sandreas.hansson@arm.com        pkt->pushSenderState(new SenderState(this));
28510657Sandreas.hansson@arm.com
28612687Sbrad.beckmann@amd.com        DPRINTF(RubyPort, "Request %s address %#x issued\n", pkt->cmdString(),
28710089Sandreas.hansson@arm.com                pkt->getAddr());
2886922SBrad.Beckmann@amd.com        return true;
2896882SBrad.Beckmann@amd.com    }
2907023SBrad.Beckmann@amd.com
29111305Sblake.hechtman@amd.com    if (pkt->cmd != MemCmd::MemFenceReq) {
29211305Sblake.hechtman@amd.com        DPRINTF(RubyPort,
29312687Sbrad.beckmann@amd.com                "Request %s for address %#x did not issue because %s\n",
29412687Sbrad.beckmann@amd.com                pkt->cmdString(), pkt->getAddr(),
29512687Sbrad.beckmann@amd.com                RequestStatus_to_string(requestStatus));
29611305Sblake.hechtman@amd.com    }
2977039Snate@binkert.org
29811266SBrad.Beckmann@amd.com    addToRetryList();
29911266SBrad.Beckmann@amd.com
3006922SBrad.Beckmann@amd.com    return false;
3016882SBrad.Beckmann@amd.com}
3026882SBrad.Beckmann@amd.com
30312395Sswapnilster@gmail.comTick
30412395Sswapnilster@gmail.comRubyPort::MemSlavePort::recvAtomic(PacketPtr pkt)
30512395Sswapnilster@gmail.com{
30612395Sswapnilster@gmail.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
30712395Sswapnilster@gmail.com    // Only atomic_noncaching mode supported!
30812395Sswapnilster@gmail.com    if (!ruby_port->system->bypassCaches()) {
30912395Sswapnilster@gmail.com        panic("Ruby supports atomic accesses only in noncaching mode\n");
31012395Sswapnilster@gmail.com    }
31112395Sswapnilster@gmail.com
31212395Sswapnilster@gmail.com    // Check for pio requests and directly send them to the dedicated
31312395Sswapnilster@gmail.com    // pio port.
31412395Sswapnilster@gmail.com    if (pkt->cmd != MemCmd::MemFenceReq) {
31512395Sswapnilster@gmail.com        if (!isPhysMemAddress(pkt->getAddr())) {
31612395Sswapnilster@gmail.com            assert(ruby_port->memMasterPort.isConnected());
31712395Sswapnilster@gmail.com            DPRINTF(RubyPort, "Request address %#x assumed to be a "
31812395Sswapnilster@gmail.com                    "pio address\n", pkt->getAddr());
31912395Sswapnilster@gmail.com
32012395Sswapnilster@gmail.com            // Save the port in the sender state object to be used later to
32112395Sswapnilster@gmail.com            // route the response
32212395Sswapnilster@gmail.com            pkt->pushSenderState(new SenderState(this));
32312395Sswapnilster@gmail.com
32412395Sswapnilster@gmail.com            // send next cycle
32512395Sswapnilster@gmail.com            Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt);
32612395Sswapnilster@gmail.com            return ruby_port->ticksToCycles(req_ticks);
32712395Sswapnilster@gmail.com        }
32812395Sswapnilster@gmail.com
32912395Sswapnilster@gmail.com        assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
33012395Sswapnilster@gmail.com               RubySystem::getBlockSizeBytes());
33112395Sswapnilster@gmail.com    }
33212395Sswapnilster@gmail.com
33312395Sswapnilster@gmail.com    // Find appropriate directory for address
33412395Sswapnilster@gmail.com    // This assumes that protocols have a Directory machine,
33512395Sswapnilster@gmail.com    // which has its memPort hooked up to memory. This can
33612395Sswapnilster@gmail.com    // fail for some custom protocols.
33712395Sswapnilster@gmail.com    MachineID id = ruby_port->m_controller->mapAddressToMachine(
33812395Sswapnilster@gmail.com                    pkt->getAddr(), MachineType_Directory);
33912395Sswapnilster@gmail.com    RubySystem *rs = ruby_port->m_ruby_system;
34012395Sswapnilster@gmail.com    AbstractController *directory =
34112395Sswapnilster@gmail.com        rs->m_abstract_controls[id.getType()][id.getNum()];
34212395Sswapnilster@gmail.com    return directory->recvAtomic(pkt);
34312395Sswapnilster@gmail.com}
34412395Sswapnilster@gmail.com
3458436SBrad.Beckmann@amd.comvoid
34611266SBrad.Beckmann@amd.comRubyPort::MemSlavePort::addToRetryList()
34711266SBrad.Beckmann@amd.com{
34811266SBrad.Beckmann@amd.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
34911266SBrad.Beckmann@amd.com
35011266SBrad.Beckmann@amd.com    //
35111266SBrad.Beckmann@amd.com    // Unless the requestor do not want retries (e.g., the Ruby tester),
35211266SBrad.Beckmann@amd.com    // record the stalled M5 port for later retry when the sequencer
35311266SBrad.Beckmann@amd.com    // becomes free.
35411266SBrad.Beckmann@amd.com    //
35511266SBrad.Beckmann@amd.com    if (!no_retry_on_stall && !ruby_port->onRetryList(this)) {
35611266SBrad.Beckmann@amd.com        ruby_port->addToRetryList(this);
35711266SBrad.Beckmann@amd.com    }
35811266SBrad.Beckmann@amd.com}
35911266SBrad.Beckmann@amd.com
36011266SBrad.Beckmann@amd.comvoid
36110090Snilay@cs.wisc.eduRubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
3628436SBrad.Beckmann@amd.com{
36310090Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
3648436SBrad.Beckmann@amd.com
36510919Sbrandon.potter@amd.com    RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
36610919Sbrandon.potter@amd.com    RubySystem *rs = rp->m_ruby_system;
36710919Sbrandon.potter@amd.com
3688436SBrad.Beckmann@amd.com    // Check for pio requests and directly send them to the dedicated
3698436SBrad.Beckmann@amd.com    // pio port.
3708436SBrad.Beckmann@amd.com    if (!isPhysMemAddress(pkt->getAddr())) {
37110090Snilay@cs.wisc.edu        DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
37211596Sandreas.sandberg@arm.com        assert(rp->pioMasterPort.isConnected());
37311596Sandreas.sandberg@arm.com        rp->pioMasterPort.sendFunctional(pkt);
37411596Sandreas.sandberg@arm.com        return;
3758436SBrad.Beckmann@amd.com    }
3768436SBrad.Beckmann@amd.com
3778436SBrad.Beckmann@amd.com    assert(pkt->getAddr() + pkt->getSize() <=
37811025Snilay@cs.wisc.edu           makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes());
3798436SBrad.Beckmann@amd.com
38010525Snilay@cs.wisc.edu    if (access_backing_store) {
3818436SBrad.Beckmann@amd.com        // The attached physmem contains the official version of data.
3828436SBrad.Beckmann@amd.com        // The following command performs the real functional access.
3838436SBrad.Beckmann@amd.com        // This line should be removed once Ruby supplies the official version
3848436SBrad.Beckmann@amd.com        // of data.
38510919Sbrandon.potter@amd.com        rs->getPhysMem()->functionalAccess(pkt);
38610706Spower.jg@gmail.com    } else {
38710706Spower.jg@gmail.com        bool accessSucceeded = false;
38810706Spower.jg@gmail.com        bool needsResponse = pkt->needsResponse();
38910706Spower.jg@gmail.com
39010706Spower.jg@gmail.com        // Do the functional access on ruby memory
39110706Spower.jg@gmail.com        if (pkt->isRead()) {
39210919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalRead(pkt);
39310706Spower.jg@gmail.com        } else if (pkt->isWrite()) {
39410919Sbrandon.potter@amd.com            accessSucceeded = rs->functionalWrite(pkt);
39510706Spower.jg@gmail.com        } else {
39610706Spower.jg@gmail.com            panic("Unsupported functional command %s\n", pkt->cmdString());
39710706Spower.jg@gmail.com        }
39810706Spower.jg@gmail.com
39910706Spower.jg@gmail.com        // Unless the requester explicitly said otherwise, generate an error if
40010706Spower.jg@gmail.com        // the functional request failed
40110706Spower.jg@gmail.com        if (!accessSucceeded && !pkt->suppressFuncError()) {
40210706Spower.jg@gmail.com            fatal("Ruby functional %s failed for address %#x\n",
40310706Spower.jg@gmail.com                  pkt->isWrite() ? "write" : "read", pkt->getAddr());
40410706Spower.jg@gmail.com        }
40510706Spower.jg@gmail.com
40610706Spower.jg@gmail.com        // turn packet around to go back to requester if response expected
40710706Spower.jg@gmail.com        if (needsResponse) {
40810706Spower.jg@gmail.com            pkt->setFunctionalResponseStatus(accessSucceeded);
40910706Spower.jg@gmail.com        }
41010706Spower.jg@gmail.com
41110706Spower.jg@gmail.com        DPRINTF(RubyPort, "Functional access %s!\n",
41210706Spower.jg@gmail.com                accessSucceeded ? "successful":"failed");
4138436SBrad.Beckmann@amd.com    }
4148436SBrad.Beckmann@amd.com}
4158436SBrad.Beckmann@amd.com
4166882SBrad.Beckmann@amd.comvoid
4176922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt)
4186882SBrad.Beckmann@amd.com{
41910089Sandreas.hansson@arm.com    DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
42010089Sandreas.hansson@arm.com            pkt->getAddr());
4217039Snate@binkert.org
42210089Sandreas.hansson@arm.com    // The packet was destined for memory and has not yet been turned
42310089Sandreas.hansson@arm.com    // into a response
42410089Sandreas.hansson@arm.com    assert(system->isMemAddr(pkt->getAddr()));
42510089Sandreas.hansson@arm.com    assert(pkt->isRequest());
4266882SBrad.Beckmann@amd.com
42710657Sandreas.hansson@arm.com    // First we must retrieve the request port from the sender State
42810657Sandreas.hansson@arm.com    RubyPort::SenderState *senderState =
42910657Sandreas.hansson@arm.com        safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
43010657Sandreas.hansson@arm.com    MemSlavePort *port = senderState->port;
43110657Sandreas.hansson@arm.com    assert(port != NULL);
43210657Sandreas.hansson@arm.com    delete senderState;
43310089Sandreas.hansson@arm.com
43410657Sandreas.hansson@arm.com    port->hitCallback(pkt);
4357910SBrad.Beckmann@amd.com
43611266SBrad.Beckmann@amd.com    trySendRetries();
43711266SBrad.Beckmann@amd.com}
43811266SBrad.Beckmann@amd.com
43911266SBrad.Beckmann@amd.comvoid
44011266SBrad.Beckmann@amd.comRubyPort::trySendRetries()
44111266SBrad.Beckmann@amd.com{
4427910SBrad.Beckmann@amd.com    //
44310090Snilay@cs.wisc.edu    // If we had to stall the MemSlavePorts, wake them up because the sequencer
4447910SBrad.Beckmann@amd.com    // likely has free resources now.
4457910SBrad.Beckmann@amd.com    //
44610089Sandreas.hansson@arm.com    if (!retryList.empty()) {
44711266SBrad.Beckmann@amd.com        // Record the current list of ports to retry on a temporary list
44811266SBrad.Beckmann@amd.com        // before calling sendRetryReq on those ports. sendRetryReq will cause
44911266SBrad.Beckmann@amd.com        // an immediate retry, which may result in the ports being put back on
45011266SBrad.Beckmann@amd.com        // the list. Therefore we want to clear the retryList before calling
45111266SBrad.Beckmann@amd.com        // sendRetryReq.
45210090Snilay@cs.wisc.edu        std::vector<MemSlavePort *> curRetryList(retryList);
4538162SBrad.Beckmann@amd.com
4548162SBrad.Beckmann@amd.com        retryList.clear();
45510089Sandreas.hansson@arm.com
45610089Sandreas.hansson@arm.com        for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
4578162SBrad.Beckmann@amd.com            DPRINTF(RubyPort,
45811266SBrad.Beckmann@amd.com                    "Sequencer may now be free. SendRetry to port %s\n",
4597910SBrad.Beckmann@amd.com                    (*i)->name());
46010713Sandreas.hansson@arm.com            (*i)->sendRetryReq();
4617910SBrad.Beckmann@amd.com        }
4627910SBrad.Beckmann@amd.com    }
4638688Snilay@cs.wisc.edu}
4648688Snilay@cs.wisc.edu
4658688Snilay@cs.wisc.eduvoid
4668688Snilay@cs.wisc.eduRubyPort::testDrainComplete()
4678688Snilay@cs.wisc.edu{
4688688Snilay@cs.wisc.edu    //If we weren't able to drain before, we might be able to now.
46910913Sandreas.sandberg@arm.com    if (drainState() == DrainState::Draining) {
4709245Shestness@cs.wisc.edu        unsigned int drainCount = outstandingCount();
4719152Satgutier@umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
4728688Snilay@cs.wisc.edu        if (drainCount == 0) {
4739342SAndreas.Sandberg@arm.com            DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
47410913Sandreas.sandberg@arm.com            signalDrainDone();
4758688Snilay@cs.wisc.edu        }
4768688Snilay@cs.wisc.edu    }
4778688Snilay@cs.wisc.edu}
4788688Snilay@cs.wisc.edu
47910913Sandreas.sandberg@arm.comDrainState
48010913Sandreas.sandberg@arm.comRubyPort::drain()
4818688Snilay@cs.wisc.edu{
4828688Snilay@cs.wisc.edu    if (isDeadlockEventScheduled()) {
4838688Snilay@cs.wisc.edu        descheduleDeadlockEvent();
4848688Snilay@cs.wisc.edu    }
4858688Snilay@cs.wisc.edu
4869245Shestness@cs.wisc.edu    //
4879245Shestness@cs.wisc.edu    // If the RubyPort is not empty, then it needs to clear all outstanding
48810913Sandreas.sandberg@arm.com    // requests before it should call signalDrainDone()
4899245Shestness@cs.wisc.edu    //
4909245Shestness@cs.wisc.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
49110913Sandreas.sandberg@arm.com    if (outstandingCount() > 0) {
4929152Satgutier@umich.edu        DPRINTF(Drain, "RubyPort not drained\n");
49310913Sandreas.sandberg@arm.com        return DrainState::Draining;
49410913Sandreas.sandberg@arm.com    } else {
49510913Sandreas.sandberg@arm.com        return DrainState::Drained;
4968688Snilay@cs.wisc.edu    }
4976882SBrad.Beckmann@amd.com}
4986882SBrad.Beckmann@amd.com
4996882SBrad.Beckmann@amd.comvoid
50010090Snilay@cs.wisc.eduRubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
5016882SBrad.Beckmann@amd.com{
5026882SBrad.Beckmann@amd.com    bool needsResponse = pkt->needsResponse();
5036882SBrad.Beckmann@amd.com
50410917Sbrandon.potter@amd.com    // Unless specified at configuraiton, all responses except failed SC
5058184Ssomayeh@cs.wisc.edu    // and Flush operations access M5 physical memory.
50610525Snilay@cs.wisc.edu    bool accessPhysMem = access_backing_store;
5077550SBrad.Beckmann@amd.com
5087550SBrad.Beckmann@amd.com    if (pkt->isLLSC()) {
5097550SBrad.Beckmann@amd.com        if (pkt->isWrite()) {
5107550SBrad.Beckmann@amd.com            if (pkt->req->getExtraData() != 0) {
5117550SBrad.Beckmann@amd.com                //
5127550SBrad.Beckmann@amd.com                // Successful SC packets convert to normal writes
5137550SBrad.Beckmann@amd.com                //
5147550SBrad.Beckmann@amd.com                pkt->convertScToWrite();
5157550SBrad.Beckmann@amd.com            } else {
5167550SBrad.Beckmann@amd.com                //
5177550SBrad.Beckmann@amd.com                // Failed SC packets don't access physical memory and thus
5187550SBrad.Beckmann@amd.com                // the RubyPort itself must convert it to a response.
5197550SBrad.Beckmann@amd.com                //
5207550SBrad.Beckmann@amd.com                accessPhysMem = false;
5217550SBrad.Beckmann@amd.com            }
5227550SBrad.Beckmann@amd.com        } else {
5237550SBrad.Beckmann@amd.com            //
5247550SBrad.Beckmann@amd.com            // All LL packets convert to normal loads so that M5 PhysMem does
5257550SBrad.Beckmann@amd.com            // not lock the blocks.
5267550SBrad.Beckmann@amd.com            //
5277550SBrad.Beckmann@amd.com            pkt->convertLlToRead();
5287550SBrad.Beckmann@amd.com        }
5297550SBrad.Beckmann@amd.com    }
5308184Ssomayeh@cs.wisc.edu
53111305Sblake.hechtman@amd.com    // Flush, acquire, release requests don't access physical memory
53211305Sblake.hechtman@amd.com    if (pkt->isFlush() || pkt->cmd == MemCmd::MemFenceReq) {
5338184Ssomayeh@cs.wisc.edu        accessPhysMem = false;
5348184Ssomayeh@cs.wisc.edu    }
5358184Ssomayeh@cs.wisc.edu
53611305Sblake.hechtman@amd.com    if (pkt->req->isKernel()) {
53711305Sblake.hechtman@amd.com        accessPhysMem = false;
53811305Sblake.hechtman@amd.com        needsResponse = true;
53911305Sblake.hechtman@amd.com    }
54011305Sblake.hechtman@amd.com
5418161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
5426882SBrad.Beckmann@amd.com
54310919Sbrandon.potter@amd.com    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
54410919Sbrandon.potter@amd.com    RubySystem *rs = ruby_port->m_ruby_system;
5457550SBrad.Beckmann@amd.com    if (accessPhysMem) {
54610919Sbrandon.potter@amd.com        rs->getPhysMem()->access(pkt);
5478184Ssomayeh@cs.wisc.edu    } else if (needsResponse) {
5487915SBrad.Beckmann@amd.com        pkt->makeResponse();
5497550SBrad.Beckmann@amd.com    }
5506882SBrad.Beckmann@amd.com
5516882SBrad.Beckmann@amd.com    // turn packet around to go back to requester if response expected
5526882SBrad.Beckmann@amd.com    if (needsResponse) {
5538161SBrad.Beckmann@amd.com        DPRINTF(RubyPort, "Sending packet back over port\n");
55410961Sdavid.hashe@amd.com        // Send a response in the same cycle. There is no need to delay the
55510961Sdavid.hashe@amd.com        // response because the response latency is already incurred in the
55610961Sdavid.hashe@amd.com        // Ruby protocol.
55710961Sdavid.hashe@amd.com        schedTimingResp(pkt, curTick());
5586882SBrad.Beckmann@amd.com    } else {
5596882SBrad.Beckmann@amd.com        delete pkt;
5606882SBrad.Beckmann@amd.com    }
56110525Snilay@cs.wisc.edu
5628161SBrad.Beckmann@amd.com    DPRINTF(RubyPort, "Hit callback done!\n");
5636882SBrad.Beckmann@amd.com}
5646882SBrad.Beckmann@amd.com
5658922Swilliam.wang@arm.comAddrRangeList
56610090Snilay@cs.wisc.eduRubyPort::PioSlavePort::getAddrRanges() const
5678922Swilliam.wang@arm.com{
5688922Swilliam.wang@arm.com    // at the moment the assumption is that the master does not care
5698922Swilliam.wang@arm.com    AddrRangeList ranges;
57010090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
57110090Snilay@cs.wisc.edu
57210090Snilay@cs.wisc.edu    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
57310090Snilay@cs.wisc.edu        ranges.splice(ranges.begin(),
57410090Snilay@cs.wisc.edu                ruby_port->master_ports[i]->getAddrRanges());
57510090Snilay@cs.wisc.edu    }
57610481Sandreas.hansson@arm.com    for (const auto M5_VAR_USED &r : ranges)
57710481Sandreas.hansson@arm.com        DPRINTF(RubyPort, "%s\n", r.to_string());
5788922Swilliam.wang@arm.com    return ranges;
5798922Swilliam.wang@arm.com}
5808922Swilliam.wang@arm.com
5816882SBrad.Beckmann@amd.combool
58210090Snilay@cs.wisc.eduRubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
5836882SBrad.Beckmann@amd.com{
58410090Snilay@cs.wisc.edu    RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
5858931Sandreas.hansson@arm.com    return ruby_port->system->isMemAddr(addr);
5866882SBrad.Beckmann@amd.com}
5877909Shestness@cs.utexas.edu
5888717Snilay@cs.wisc.eduvoid
58911025Snilay@cs.wisc.eduRubyPort::ruby_eviction_callback(Addr address)
5908717Snilay@cs.wisc.edu{
5918717Snilay@cs.wisc.edu    DPRINTF(RubyPort, "Sending invalidations.\n");
59211143Sjthestness@gmail.com    // Allocate the invalidate request and packet on the stack, as it is
59311143Sjthestness@gmail.com    // assumed they will not be modified or deleted by receivers.
5949633Sjthestness@gmail.com    // TODO: should this really be using funcMasterId?
59512749Sgiacomo.travaglini@arm.com    auto request = std::make_shared<Request>(
59612749Sgiacomo.travaglini@arm.com        address, RubySystem::getBlockSizeBytes(), 0,
59712749Sgiacomo.travaglini@arm.com        Request::funcMasterId);
59812749Sgiacomo.travaglini@arm.com
5999633Sjthestness@gmail.com    // Use a single packet to signal all snooping ports of the invalidation.
6009633Sjthestness@gmail.com    // This assumes that snooping ports do NOT modify the packet/request
60112749Sgiacomo.travaglini@arm.com    Packet pkt(request, MemCmd::InvalidateReq);
6028922Swilliam.wang@arm.com    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
6039088Sandreas.hansson@arm.com        // check if the connected master port is snooping
6049088Sandreas.hansson@arm.com        if ((*p)->isSnooping()) {
6058948Sandreas.hansson@arm.com            // send as a snoop request
6069633Sjthestness@gmail.com            (*p)->sendTimingSnoopReq(&pkt);
6078922Swilliam.wang@arm.com        }
6088717Snilay@cs.wisc.edu    }
6098717Snilay@cs.wisc.edu}
61010090Snilay@cs.wisc.edu
61110090Snilay@cs.wisc.eduvoid
61210090Snilay@cs.wisc.eduRubyPort::PioMasterPort::recvRangeChange()
61310090Snilay@cs.wisc.edu{
61410090Snilay@cs.wisc.edu    RubyPort &r = static_cast<RubyPort &>(owner);
61510090Snilay@cs.wisc.edu    r.gotAddrRanges--;
61610117Snilay@cs.wisc.edu    if (r.gotAddrRanges == 0 && FullSystem) {
61710090Snilay@cs.wisc.edu        r.pioSlavePort.sendRangeChange();
61810090Snilay@cs.wisc.edu    }
61910090Snilay@cs.wisc.edu}
620