RubyPort.cc revision 9270:92aad0e984ff
14120Sgblack@eecs.umich.edu/* 24120Sgblack@eecs.umich.edu * Copyright (c) 2012 ARM Limited 37087Snate@binkert.org * All rights reserved. 47087Snate@binkert.org * 57087Snate@binkert.org * The license below extends only to copyright in the software and shall 67087Snate@binkert.org * not be construed as granting a license to any other intellectual 77087Snate@binkert.org * property including but not limited to intellectual property relating 87087Snate@binkert.org * to a hardware implementation of the functionality of the software 97087Snate@binkert.org * licensed hereunder. You may use the software subject to the license 107087Snate@binkert.org * terms below provided that you ensure that this notice is replicated 117087Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 127087Snate@binkert.org * modified or unmodified, in source code or in binary form. 137087Snate@binkert.org * 147087Snate@binkert.org * Copyright (c) 2009 Advanced Micro Devices, Inc. 154120Sgblack@eecs.umich.edu * Copyright (c) 2011 Mark D. Hill and David A. Wood 164120Sgblack@eecs.umich.edu * All rights reserved. 174120Sgblack@eecs.umich.edu * 184120Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 194120Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are 204120Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright 214120Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 224120Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 234120Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 244120Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution; 254120Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its 264120Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from 274120Sgblack@eecs.umich.edu * this software without specific prior written permission. 284120Sgblack@eecs.umich.edu * 294120Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 304120Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 314120Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 324120Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 334120Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 344120Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 354120Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 364120Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 374120Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 384120Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 394120Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 404120Sgblack@eecs.umich.edu */ 414120Sgblack@eecs.umich.edu 424120Sgblack@eecs.umich.edu#include "cpu/testers/rubytest/RubyTester.hh" 434202Sbinkertn@umich.edu#include "debug/Config.hh" 445069Sgblack@eecs.umich.edu#include "debug/Drain.hh" 454202Sbinkertn@umich.edu#include "debug/Ruby.hh" 465659Sgblack@eecs.umich.edu#include "mem/protocol/AccessPermission.hh" 479022Sgblack@eecs.umich.edu#include "mem/ruby/slicc_interface/AbstractController.hh" 489023Sgblack@eecs.umich.edu#include "mem/ruby/system/RubyPort.hh" 494601Sgblack@eecs.umich.edu#include "sim/system.hh" 505124Sgblack@eecs.umich.edu 517966Sgblack@eecs.umich.eduRubyPort::RubyPort(const Params *p) 525083Sgblack@eecs.umich.edu : MemObject(p), m_version(p->version), m_controller(NULL), 534679Sgblack@eecs.umich.edu m_mandatory_q_ptr(NULL), 546515Sgblack@eecs.umich.edu pio_port(csprintf("%s-pio-port", name()), this), 555083Sgblack@eecs.umich.edu m_usingRubyTester(p->using_ruby_tester), m_request_cnt(0), 564679Sgblack@eecs.umich.edu drainEvent(NULL), ruby_system(p->ruby_system), system(p->system), 574679Sgblack@eecs.umich.edu waitingOnSequencer(false), access_phys_mem(p->access_phys_mem) 588745Sgblack@eecs.umich.edu{ 596313Sgblack@eecs.umich.edu assert(m_version != -1); 608771Sgblack@eecs.umich.edu 618771Sgblack@eecs.umich.edu // create the slave ports based on the number of connected ports 628771Sgblack@eecs.umich.edu for (size_t i = 0; i < p->port_slave_connection_count; ++i) { 638771Sgblack@eecs.umich.edu slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i), 646365Sgblack@eecs.umich.edu this, ruby_system, access_phys_mem)); 655124Sgblack@eecs.umich.edu } 668752Sgblack@eecs.umich.edu 678771Sgblack@eecs.umich.edu // create the master ports based on the number of connected ports 684202Sbinkertn@umich.edu for (size_t i = 0; i < p->port_master_connection_count; ++i) { 698771Sgblack@eecs.umich.edu master_ports.push_back(new PioPort(csprintf("%s-master%d", name(), i), 708771Sgblack@eecs.umich.edu this)); 714997Sgblack@eecs.umich.edu } 727624Sgblack@eecs.umich.edu} 735135Sgblack@eecs.umich.edu 748753Sgblack@eecs.umich.eduvoid 754997Sgblack@eecs.umich.eduRubyPort::init() 769384SAndreas.Sandberg@arm.com{ 778745Sgblack@eecs.umich.edu assert(m_controller != NULL); 786365Sgblack@eecs.umich.edu m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 798771Sgblack@eecs.umich.edu} 808740Sgblack@eecs.umich.edu 816365Sgblack@eecs.umich.eduMasterPort & 828740Sgblack@eecs.umich.eduRubyPort::getMasterPort(const std::string &if_name, int idx) 838745Sgblack@eecs.umich.edu{ 848752Sgblack@eecs.umich.edu if (if_name == "pio_port") { 858752Sgblack@eecs.umich.edu return pio_port; 869023Sgblack@eecs.umich.edu } 878335Snate@binkert.org 884120Sgblack@eecs.umich.edu // used by the x86 CPUs to connect the interrupt PIO and interrupt slave 895069Sgblack@eecs.umich.edu // port 905081Sgblack@eecs.umich.edu if (if_name != "master") { 915081Sgblack@eecs.umich.edu // pass it along to our super class 925081Sgblack@eecs.umich.edu return MemObject::getMasterPort(if_name, idx); 935081Sgblack@eecs.umich.edu } else { 945081Sgblack@eecs.umich.edu if (idx >= static_cast<int>(master_ports.size())) { 955081Sgblack@eecs.umich.edu panic("RubyPort::getMasterPort: unknown index %d\n", idx); 965081Sgblack@eecs.umich.edu } 975081Sgblack@eecs.umich.edu 985081Sgblack@eecs.umich.edu return *master_ports[idx]; 995081Sgblack@eecs.umich.edu } 1005081Sgblack@eecs.umich.edu} 1015081Sgblack@eecs.umich.edu 1025081Sgblack@eecs.umich.eduSlavePort & 1035081Sgblack@eecs.umich.eduRubyPort::getSlavePort(const std::string &if_name, int idx) 1045081Sgblack@eecs.umich.edu{ 1055081Sgblack@eecs.umich.edu // used by the CPUs to connect the caches to the interconnect, and 1065081Sgblack@eecs.umich.edu // for the x86 case also the interrupt master 1075081Sgblack@eecs.umich.edu if (if_name != "slave") { 1085081Sgblack@eecs.umich.edu // pass it along to our super class 1095081Sgblack@eecs.umich.edu return MemObject::getSlavePort(if_name, idx); 1105081Sgblack@eecs.umich.edu } else { 1115081Sgblack@eecs.umich.edu if (idx >= static_cast<int>(slave_ports.size())) { 1125081Sgblack@eecs.umich.edu panic("RubyPort::getSlavePort: unknown index %d\n", idx); 1135081Sgblack@eecs.umich.edu } 1145081Sgblack@eecs.umich.edu 1155081Sgblack@eecs.umich.edu return *slave_ports[idx]; 1165081Sgblack@eecs.umich.edu } 1175081Sgblack@eecs.umich.edu} 1185081Sgblack@eecs.umich.edu 1195081Sgblack@eecs.umich.eduRubyPort::PioPort::PioPort(const std::string &_name, 1205081Sgblack@eecs.umich.edu RubyPort *_port) 1215081Sgblack@eecs.umich.edu : QueuedMasterPort(_name, _port, queue), queue(*_port, *this), 1225081Sgblack@eecs.umich.edu ruby_port(_port) 1235081Sgblack@eecs.umich.edu{ 1245081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name); 1255081Sgblack@eecs.umich.edu} 1265081Sgblack@eecs.umich.edu 1275081Sgblack@eecs.umich.eduRubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port, 1285081Sgblack@eecs.umich.edu RubySystem *_system, bool _access_phys_mem) 1295081Sgblack@eecs.umich.edu : QueuedSlavePort(_name, _port, queue), queue(*_port, *this), 1305081Sgblack@eecs.umich.edu ruby_port(_port), ruby_system(_system), 1315081Sgblack@eecs.umich.edu _onRetryList(false), access_phys_mem(_access_phys_mem) 1325081Sgblack@eecs.umich.edu{ 1335081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name); 1345081Sgblack@eecs.umich.edu} 1355081Sgblack@eecs.umich.edu 1365081Sgblack@eecs.umich.eduTick 1375081Sgblack@eecs.umich.eduRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1385081Sgblack@eecs.umich.edu{ 1395081Sgblack@eecs.umich.edu panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1405081Sgblack@eecs.umich.edu return 0; 1415081Sgblack@eecs.umich.edu} 1425081Sgblack@eecs.umich.edu 1435081Sgblack@eecs.umich.edu 1445081Sgblack@eecs.umich.edubool 1455680Sgblack@eecs.umich.eduRubyPort::PioPort::recvTimingResp(PacketPtr pkt) 1465081Sgblack@eecs.umich.edu{ 1475933Sgblack@eecs.umich.edu // In FS mode, ruby memory will receive pio responses from devices 1485173Sgblack@eecs.umich.edu // and it must forward these responses back to the particular CPU. 1495359Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr()); 1505081Sgblack@eecs.umich.edu 1515149Sgblack@eecs.umich.edu // First we must retrieve the request port from the sender State 1525298Sgblack@eecs.umich.edu RubyPort::SenderState *senderState = 1535081Sgblack@eecs.umich.edu safe_cast<RubyPort::SenderState *>(pkt->senderState); 1545081Sgblack@eecs.umich.edu M5Port *port = senderState->port; 1555081Sgblack@eecs.umich.edu assert(port != NULL); 1565081Sgblack@eecs.umich.edu 1575081Sgblack@eecs.umich.edu // pop the sender state from the packet 1585081Sgblack@eecs.umich.edu pkt->senderState = senderState->saved; 1595081Sgblack@eecs.umich.edu delete senderState; 1605081Sgblack@eecs.umich.edu 1615081Sgblack@eecs.umich.edu port->sendTimingResp(pkt); 1625081Sgblack@eecs.umich.edu 1635081Sgblack@eecs.umich.edu return true; 1645081Sgblack@eecs.umich.edu} 1655081Sgblack@eecs.umich.edu 1665081Sgblack@eecs.umich.edubool 1675081Sgblack@eecs.umich.eduRubyPort::M5Port::recvTimingReq(PacketPtr pkt) 1685081Sgblack@eecs.umich.edu{ 1695081Sgblack@eecs.umich.edu DPRINTF(RubyPort, 1705081Sgblack@eecs.umich.edu "Timing access caught for address %#x\n", pkt->getAddr()); 1715081Sgblack@eecs.umich.edu 1725081Sgblack@eecs.umich.edu //dsm: based on SimpleTimingPort::recvTimingReq(pkt); 1735081Sgblack@eecs.umich.edu 1745081Sgblack@eecs.umich.edu // The received packets should only be M5 requests, which should never 1755081Sgblack@eecs.umich.edu // get nacked. There used to be code to hanldle nacks here, but 1765081Sgblack@eecs.umich.edu // I'm pretty sure it didn't work correctly with the drain code, 1775081Sgblack@eecs.umich.edu // so that would need to be fixed if we ever added it back. 1785081Sgblack@eecs.umich.edu 1795081Sgblack@eecs.umich.edu if (pkt->memInhibitAsserted()) { 1805081Sgblack@eecs.umich.edu warn("memInhibitAsserted???"); 1815081Sgblack@eecs.umich.edu // snooper will supply based on copy of packet 1825081Sgblack@eecs.umich.edu // still target's responsibility to delete packet 1835081Sgblack@eecs.umich.edu delete pkt; 1845081Sgblack@eecs.umich.edu return true; 1855081Sgblack@eecs.umich.edu } 1865081Sgblack@eecs.umich.edu 1875081Sgblack@eecs.umich.edu // Save the port in the sender state object to be used later to 1885081Sgblack@eecs.umich.edu // route the response 1895081Sgblack@eecs.umich.edu pkt->senderState = new SenderState(this, pkt->senderState); 1905081Sgblack@eecs.umich.edu 1915081Sgblack@eecs.umich.edu // Check for pio requests and directly send them to the dedicated 1925081Sgblack@eecs.umich.edu // pio port. 1935081Sgblack@eecs.umich.edu if (!isPhysMemAddress(pkt->getAddr())) { 1945081Sgblack@eecs.umich.edu assert(ruby_port->pio_port.isConnected()); 1955081Sgblack@eecs.umich.edu DPRINTF(RubyPort, 1965081Sgblack@eecs.umich.edu "Request for address 0x%#x is assumed to be a pio request\n", 1975081Sgblack@eecs.umich.edu pkt->getAddr()); 1985081Sgblack@eecs.umich.edu 1995081Sgblack@eecs.umich.edu // send next cycle 2005081Sgblack@eecs.umich.edu ruby_port->pio_port.schedTimingReq(pkt, 2015081Sgblack@eecs.umich.edu curTick() + g_system_ptr->clockPeriod()); 2025081Sgblack@eecs.umich.edu return true; 2035081Sgblack@eecs.umich.edu } 2045081Sgblack@eecs.umich.edu 2055081Sgblack@eecs.umich.edu assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 2065081Sgblack@eecs.umich.edu RubySystem::getBlockSizeBytes()); 2075081Sgblack@eecs.umich.edu 2085081Sgblack@eecs.umich.edu // Submit the ruby request 2095081Sgblack@eecs.umich.edu RequestStatus requestStatus = ruby_port->makeRequest(pkt); 2105081Sgblack@eecs.umich.edu 2115081Sgblack@eecs.umich.edu // If the request successfully issued then we should return true. 2125081Sgblack@eecs.umich.edu // Otherwise, we need to delete the senderStatus we just created and return 2135081Sgblack@eecs.umich.edu // false. 2145081Sgblack@eecs.umich.edu if (requestStatus == RequestStatus_Issued) { 2155081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr()); 2165081Sgblack@eecs.umich.edu return true; 2175081Sgblack@eecs.umich.edu } 2185081Sgblack@eecs.umich.edu 2195081Sgblack@eecs.umich.edu // 2205081Sgblack@eecs.umich.edu // Unless one is using the ruby tester, record the stalled M5 port for 2215081Sgblack@eecs.umich.edu // later retry when the sequencer becomes free. 2225081Sgblack@eecs.umich.edu // 2235081Sgblack@eecs.umich.edu if (!ruby_port->m_usingRubyTester) { 2245081Sgblack@eecs.umich.edu ruby_port->addToRetryList(this); 2255081Sgblack@eecs.umich.edu } 2265081Sgblack@eecs.umich.edu 2275081Sgblack@eecs.umich.edu DPRINTF(RubyPort, 2285081Sgblack@eecs.umich.edu "Request for address %#x did not issue because %s\n", 2295081Sgblack@eecs.umich.edu pkt->getAddr(), RequestStatus_to_string(requestStatus)); 2305081Sgblack@eecs.umich.edu 2315081Sgblack@eecs.umich.edu SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 2325081Sgblack@eecs.umich.edu pkt->senderState = senderState->saved; 2335081Sgblack@eecs.umich.edu delete senderState; 2345081Sgblack@eecs.umich.edu return false; 2355081Sgblack@eecs.umich.edu} 2365081Sgblack@eecs.umich.edu 2375081Sgblack@eecs.umich.eduvoid 2385081Sgblack@eecs.umich.eduRubyPort::M5Port::recvFunctional(PacketPtr pkt) 2395081Sgblack@eecs.umich.edu{ 2405081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Functional access caught for address %#x\n", 2415081Sgblack@eecs.umich.edu pkt->getAddr()); 2425081Sgblack@eecs.umich.edu 2435081Sgblack@eecs.umich.edu // Check for pio requests and directly send them to the dedicated 2445081Sgblack@eecs.umich.edu // pio port. 2455081Sgblack@eecs.umich.edu if (!isPhysMemAddress(pkt->getAddr())) { 2465081Sgblack@eecs.umich.edu assert(ruby_port->pio_port.isConnected()); 2475081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n", 2485081Sgblack@eecs.umich.edu pkt->getAddr()); 2495081Sgblack@eecs.umich.edu panic("RubyPort::PioPort::recvFunctional() not implemented!\n"); 2505081Sgblack@eecs.umich.edu } 2515081Sgblack@eecs.umich.edu 2525081Sgblack@eecs.umich.edu assert(pkt->getAddr() + pkt->getSize() <= 2535081Sgblack@eecs.umich.edu line_address(Address(pkt->getAddr())).getAddress() + 2545081Sgblack@eecs.umich.edu RubySystem::getBlockSizeBytes()); 2555081Sgblack@eecs.umich.edu 2565081Sgblack@eecs.umich.edu bool accessSucceeded = false; 2575081Sgblack@eecs.umich.edu bool needsResponse = pkt->needsResponse(); 2585081Sgblack@eecs.umich.edu 2595081Sgblack@eecs.umich.edu // Do the functional access on ruby memory 2605081Sgblack@eecs.umich.edu if (pkt->isRead()) { 2615081Sgblack@eecs.umich.edu accessSucceeded = ruby_system->functionalRead(pkt); 2625081Sgblack@eecs.umich.edu } else if (pkt->isWrite()) { 2635081Sgblack@eecs.umich.edu accessSucceeded = ruby_system->functionalWrite(pkt); 2645081Sgblack@eecs.umich.edu } else { 2655081Sgblack@eecs.umich.edu panic("RubyPort: unsupported functional command %s\n", 2665081Sgblack@eecs.umich.edu pkt->cmdString()); 2675081Sgblack@eecs.umich.edu } 2685081Sgblack@eecs.umich.edu 2695081Sgblack@eecs.umich.edu // Unless the requester explicitly said otherwise, generate an error if 2705081Sgblack@eecs.umich.edu // the functional request failed 2715081Sgblack@eecs.umich.edu if (!accessSucceeded && !pkt->suppressFuncError()) { 2725081Sgblack@eecs.umich.edu fatal("Ruby functional %s failed for address %#x\n", 2735081Sgblack@eecs.umich.edu pkt->isWrite() ? "write" : "read", pkt->getAddr()); 2745081Sgblack@eecs.umich.edu } 2755081Sgblack@eecs.umich.edu 2765081Sgblack@eecs.umich.edu if (access_phys_mem) { 2775081Sgblack@eecs.umich.edu // The attached physmem contains the official version of data. 2785081Sgblack@eecs.umich.edu // The following command performs the real functional access. 2795081Sgblack@eecs.umich.edu // This line should be removed once Ruby supplies the official version 2805081Sgblack@eecs.umich.edu // of data. 2815081Sgblack@eecs.umich.edu ruby_port->system->getPhysMem().functionalAccess(pkt); 2825081Sgblack@eecs.umich.edu } 2835081Sgblack@eecs.umich.edu 2845081Sgblack@eecs.umich.edu // turn packet around to go back to requester if response expected 2855081Sgblack@eecs.umich.edu if (needsResponse) { 2865081Sgblack@eecs.umich.edu pkt->setFunctionalResponseStatus(accessSucceeded); 2875081Sgblack@eecs.umich.edu 2885081Sgblack@eecs.umich.edu // @todo There should not be a reverse call since the response is 2895081Sgblack@eecs.umich.edu // communicated through the packet pointer 2905081Sgblack@eecs.umich.edu // DPRINTF(RubyPort, "Sending packet back over port\n"); 2915081Sgblack@eecs.umich.edu // sendFunctional(pkt); 2925081Sgblack@eecs.umich.edu } 2935081Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Functional access %s!\n", 2945081Sgblack@eecs.umich.edu accessSucceeded ? "successful":"failed"); 2955081Sgblack@eecs.umich.edu} 2965081Sgblack@eecs.umich.edu 2975081Sgblack@eecs.umich.eduvoid 2985081Sgblack@eecs.umich.eduRubyPort::ruby_hit_callback(PacketPtr pkt) 2995081Sgblack@eecs.umich.edu{ 3005081Sgblack@eecs.umich.edu // Retrieve the request port from the sender State 3015081Sgblack@eecs.umich.edu RubyPort::SenderState *senderState = 3025081Sgblack@eecs.umich.edu safe_cast<RubyPort::SenderState *>(pkt->senderState); 3035081Sgblack@eecs.umich.edu M5Port *port = senderState->port; 3045081Sgblack@eecs.umich.edu assert(port != NULL); 3055081Sgblack@eecs.umich.edu 3065081Sgblack@eecs.umich.edu // pop the sender state from the packet 3075081Sgblack@eecs.umich.edu pkt->senderState = senderState->saved; 3085069Sgblack@eecs.umich.edu delete senderState; 3094202Sbinkertn@umich.edu 3104202Sbinkertn@umich.edu port->hitCallback(pkt); 3114202Sbinkertn@umich.edu 3125069Sgblack@eecs.umich.edu // 3135069Sgblack@eecs.umich.edu // If we had to stall the M5Ports, wake them up because the sequencer 3145069Sgblack@eecs.umich.edu // likely has free resources now. 3155069Sgblack@eecs.umich.edu // 3164202Sbinkertn@umich.edu if (waitingOnSequencer) { 3174202Sbinkertn@umich.edu // 318 // Record the current list of ports to retry on a temporary list before 319 // calling sendRetry on those ports. sendRetry will cause an 320 // immediate retry, which may result in the ports being put back on the 321 // list. Therefore we want to clear the retryList before calling 322 // sendRetry. 323 // 324 std::list<M5Port*> curRetryList(retryList); 325 326 retryList.clear(); 327 waitingOnSequencer = false; 328 329 for (std::list<M5Port*>::iterator i = curRetryList.begin(); 330 i != curRetryList.end(); ++i) { 331 DPRINTF(RubyPort, 332 "Sequencer may now be free. SendRetry to port %s\n", 333 (*i)->name()); 334 (*i)->onRetryList(false); 335 (*i)->sendRetry(); 336 } 337 } 338 339 testDrainComplete(); 340} 341 342void 343RubyPort::testDrainComplete() 344{ 345 //If we weren't able to drain before, we might be able to now. 346 if (drainEvent != NULL) { 347 unsigned int drainCount = outstandingCount(); 348 DPRINTF(Drain, "Drain count: %u\n", drainCount); 349 if (drainCount == 0) { 350 DPRINTF(Drain, "RubyPort done draining, processing drain event\n"); 351 drainEvent->process(); 352 // Clear the drain event once we're done with it. 353 drainEvent = NULL; 354 } 355 } 356} 357 358unsigned int 359RubyPort::getChildDrainCount(Event *de) 360{ 361 int count = 0; 362 363 if (pio_port.isConnected()) { 364 count += pio_port.drain(de); 365 DPRINTF(Config, "count after pio check %d\n", count); 366 } 367 368 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) { 369 count += (*p)->drain(de); 370 DPRINTF(Config, "count after slave port check %d\n", count); 371 } 372 373 for (std::vector<PioPort*>::iterator p = master_ports.begin(); 374 p != master_ports.end(); ++p) { 375 count += (*p)->drain(de); 376 DPRINTF(Config, "count after master port check %d\n", count); 377 } 378 379 DPRINTF(Config, "final count %d\n", count); 380 381 return count; 382} 383 384unsigned int 385RubyPort::drain(Event *de) 386{ 387 if (isDeadlockEventScheduled()) { 388 descheduleDeadlockEvent(); 389 } 390 391 // 392 // If the RubyPort is not empty, then it needs to clear all outstanding 393 // requests before it should call drainEvent->process() 394 // 395 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 396 bool need_drain = outstandingCount() > 0; 397 398 // 399 // Also, get the number of child ports that will also need to clear 400 // their buffered requests before they call drainEvent->process() 401 // 402 unsigned int child_drain_count = getChildDrainCount(de); 403 404 // Set status 405 if (need_drain) { 406 drainEvent = de; 407 408 DPRINTF(Drain, "RubyPort not drained\n"); 409 changeState(SimObject::Draining); 410 return child_drain_count + 1; 411 } 412 413 drainEvent = NULL; 414 changeState(SimObject::Drained); 415 return child_drain_count; 416} 417 418void 419RubyPort::M5Port::hitCallback(PacketPtr pkt) 420{ 421 bool needsResponse = pkt->needsResponse(); 422 423 // 424 // Unless specified at configuraiton, all responses except failed SC 425 // and Flush operations access M5 physical memory. 426 // 427 bool accessPhysMem = access_phys_mem; 428 429 if (pkt->isLLSC()) { 430 if (pkt->isWrite()) { 431 if (pkt->req->getExtraData() != 0) { 432 // 433 // Successful SC packets convert to normal writes 434 // 435 pkt->convertScToWrite(); 436 } else { 437 // 438 // Failed SC packets don't access physical memory and thus 439 // the RubyPort itself must convert it to a response. 440 // 441 accessPhysMem = false; 442 } 443 } else { 444 // 445 // All LL packets convert to normal loads so that M5 PhysMem does 446 // not lock the blocks. 447 // 448 pkt->convertLlToRead(); 449 } 450 } 451 452 // 453 // Flush requests don't access physical memory 454 // 455 if (pkt->isFlush()) { 456 accessPhysMem = false; 457 } 458 459 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse); 460 461 if (accessPhysMem) { 462 ruby_port->system->getPhysMem().access(pkt); 463 } else if (needsResponse) { 464 pkt->makeResponse(); 465 } 466 467 // turn packet around to go back to requester if response expected 468 if (needsResponse) { 469 DPRINTF(RubyPort, "Sending packet back over port\n"); 470 // send next cycle 471 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod()); 472 } else { 473 delete pkt; 474 } 475 DPRINTF(RubyPort, "Hit callback done!\n"); 476} 477 478AddrRangeList 479RubyPort::M5Port::getAddrRanges() const 480{ 481 // at the moment the assumption is that the master does not care 482 AddrRangeList ranges; 483 return ranges; 484} 485 486bool 487RubyPort::M5Port::isPhysMemAddress(Addr addr) 488{ 489 return ruby_port->system->isMemAddr(addr); 490} 491 492unsigned 493RubyPort::M5Port::deviceBlockSize() const 494{ 495 return (unsigned) RubySystem::getBlockSizeBytes(); 496} 497 498void 499RubyPort::ruby_eviction_callback(const Address& address) 500{ 501 DPRINTF(RubyPort, "Sending invalidations.\n"); 502 // should this really be using funcMasterId? 503 Request req(address.getAddress(), 0, 0, Request::funcMasterId); 504 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) { 505 // check if the connected master port is snooping 506 if ((*p)->isSnooping()) { 507 Packet *pkt = new Packet(&req, MemCmd::InvalidationReq); 508 // send as a snoop request 509 (*p)->sendTimingSnoopReq(pkt); 510 } 511 } 512} 513