RubyPort.cc revision 7908
16876Ssteve.reinhardt@amd.com/* 26876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc. 36876Ssteve.reinhardt@amd.com * All rights reserved. 46876Ssteve.reinhardt@amd.com * 56876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without 66876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are 76876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright 86876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer; 96876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright 106876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the 116876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution; 126876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its 136876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from 146876Ssteve.reinhardt@amd.com * this software without specific prior written permission. 156876Ssteve.reinhardt@amd.com * 166876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276876Ssteve.reinhardt@amd.com */ 286876Ssteve.reinhardt@amd.com 297908Shestness@cs.utexas.edu#include "config/the_isa.hh" 307908Shestness@cs.utexas.edu#if THE_ISA == X86_ISA 317908Shestness@cs.utexas.edu#include "arch/x86/insts/microldstop.hh" 327908Shestness@cs.utexas.edu#endif // X86_ISA 337632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh" 346876Ssteve.reinhardt@amd.com#include "mem/physical.hh" 357039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh" 366285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh" 376285Snate@binkert.org 386876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p) 396893SBrad.Beckmann@amd.com : MemObject(p) 406876Ssteve.reinhardt@amd.com{ 416876Ssteve.reinhardt@amd.com m_version = p->version; 426876Ssteve.reinhardt@amd.com assert(m_version != -1); 436876Ssteve.reinhardt@amd.com 446893SBrad.Beckmann@amd.com physmem = p->physmem; 457039Snate@binkert.org 466882SBrad.Beckmann@amd.com m_controller = NULL; 476882SBrad.Beckmann@amd.com m_mandatory_q_ptr = NULL; 486876Ssteve.reinhardt@amd.com 496876Ssteve.reinhardt@amd.com m_request_cnt = 0; 506882SBrad.Beckmann@amd.com pio_port = NULL; 516893SBrad.Beckmann@amd.com physMemPort = NULL; 526876Ssteve.reinhardt@amd.com} 536876Ssteve.reinhardt@amd.com 547039Snate@binkert.orgvoid 557039Snate@binkert.orgRubyPort::init() 566882SBrad.Beckmann@amd.com{ 576882SBrad.Beckmann@amd.com assert(m_controller != NULL); 586882SBrad.Beckmann@amd.com m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 596882SBrad.Beckmann@amd.com} 606882SBrad.Beckmann@amd.com 616876Ssteve.reinhardt@amd.comPort * 626876Ssteve.reinhardt@amd.comRubyPort::getPort(const std::string &if_name, int idx) 636876Ssteve.reinhardt@amd.com{ 646882SBrad.Beckmann@amd.com if (if_name == "port") { 656882SBrad.Beckmann@amd.com return new M5Port(csprintf("%s-port%d", name(), idx), this); 667039Snate@binkert.org } 677039Snate@binkert.org 687039Snate@binkert.org if (if_name == "pio_port") { 696882SBrad.Beckmann@amd.com // ensure there is only one pio port 706882SBrad.Beckmann@amd.com assert(pio_port == NULL); 716882SBrad.Beckmann@amd.com 727039Snate@binkert.org pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 736882SBrad.Beckmann@amd.com 746882SBrad.Beckmann@amd.com return pio_port; 757039Snate@binkert.org } 767039Snate@binkert.org 777039Snate@binkert.org if (if_name == "physMemPort") { 786893SBrad.Beckmann@amd.com // RubyPort should only have one port to physical memory 796893SBrad.Beckmann@amd.com assert (physMemPort == NULL); 806893SBrad.Beckmann@amd.com 817039Snate@binkert.org physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this); 827039Snate@binkert.org 836893SBrad.Beckmann@amd.com return physMemPort; 847039Snate@binkert.org } 857039Snate@binkert.org 867039Snate@binkert.org if (if_name == "functional") { 877039Snate@binkert.org // Calls for the functional port only want to access 887039Snate@binkert.org // functional memory. Therefore, directly pass these calls 897039Snate@binkert.org // ports to physmem. 906893SBrad.Beckmann@amd.com assert(physmem != NULL); 916893SBrad.Beckmann@amd.com return physmem->getPort(if_name, idx); 926882SBrad.Beckmann@amd.com } 937039Snate@binkert.org 946876Ssteve.reinhardt@amd.com return NULL; 956876Ssteve.reinhardt@amd.com} 966882SBrad.Beckmann@amd.com 977039Snate@binkert.orgRubyPort::PioPort::PioPort(const std::string &_name, 986882SBrad.Beckmann@amd.com RubyPort *_port) 996882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 1006882SBrad.Beckmann@amd.com{ 1016882SBrad.Beckmann@amd.com DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name); 1026882SBrad.Beckmann@amd.com ruby_port = _port; 1036882SBrad.Beckmann@amd.com} 1046882SBrad.Beckmann@amd.com 1057039Snate@binkert.orgRubyPort::M5Port::M5Port(const std::string &_name, 1066882SBrad.Beckmann@amd.com RubyPort *_port) 1076882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 1086882SBrad.Beckmann@amd.com{ 1096882SBrad.Beckmann@amd.com DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name); 1106882SBrad.Beckmann@amd.com ruby_port = _port; 1116882SBrad.Beckmann@amd.com} 1126882SBrad.Beckmann@amd.com 1136882SBrad.Beckmann@amd.comTick 1146882SBrad.Beckmann@amd.comRubyPort::PioPort::recvAtomic(PacketPtr pkt) 1156882SBrad.Beckmann@amd.com{ 1166882SBrad.Beckmann@amd.com panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 1176882SBrad.Beckmann@amd.com return 0; 1186882SBrad.Beckmann@amd.com} 1196882SBrad.Beckmann@amd.com 1206882SBrad.Beckmann@amd.comTick 1216882SBrad.Beckmann@amd.comRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1226882SBrad.Beckmann@amd.com{ 1236882SBrad.Beckmann@amd.com panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1246882SBrad.Beckmann@amd.com return 0; 1256882SBrad.Beckmann@amd.com} 1266882SBrad.Beckmann@amd.com 1276882SBrad.Beckmann@amd.com 1286882SBrad.Beckmann@amd.combool 1296882SBrad.Beckmann@amd.comRubyPort::PioPort::recvTiming(PacketPtr pkt) 1306882SBrad.Beckmann@amd.com{ 1317039Snate@binkert.org // In FS mode, ruby memory will receive pio responses from devices 1327039Snate@binkert.org // and it must forward these responses back to the particular CPU. 1337039Snate@binkert.org DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr()); 1346882SBrad.Beckmann@amd.com 1356882SBrad.Beckmann@amd.com assert(pkt->isResponse()); 1366882SBrad.Beckmann@amd.com 1376882SBrad.Beckmann@amd.com // First we must retrieve the request port from the sender State 1387039Snate@binkert.org RubyPort::SenderState *senderState = 1396882SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 1406882SBrad.Beckmann@amd.com M5Port *port = senderState->port; 1416882SBrad.Beckmann@amd.com assert(port != NULL); 1427039Snate@binkert.org 1436882SBrad.Beckmann@amd.com // pop the sender state from the packet 1446882SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 1456882SBrad.Beckmann@amd.com delete senderState; 1467039Snate@binkert.org 1476882SBrad.Beckmann@amd.com port->sendTiming(pkt); 1487039Snate@binkert.org 1496882SBrad.Beckmann@amd.com return true; 1506882SBrad.Beckmann@amd.com} 1516882SBrad.Beckmann@amd.com 1526882SBrad.Beckmann@amd.combool 1536882SBrad.Beckmann@amd.comRubyPort::M5Port::recvTiming(PacketPtr pkt) 1546882SBrad.Beckmann@amd.com{ 1557039Snate@binkert.org DPRINTF(MemoryAccess, 1567039Snate@binkert.org "Timing access caught for address %#x\n", pkt->getAddr()); 1576882SBrad.Beckmann@amd.com 1586882SBrad.Beckmann@amd.com //dsm: based on SimpleTimingPort::recvTiming(pkt); 1596882SBrad.Beckmann@amd.com 1607039Snate@binkert.org // The received packets should only be M5 requests, which should never 1617039Snate@binkert.org // get nacked. There used to be code to hanldle nacks here, but 1627039Snate@binkert.org // I'm pretty sure it didn't work correctly with the drain code, 1636882SBrad.Beckmann@amd.com // so that would need to be fixed if we ever added it back. 1646882SBrad.Beckmann@amd.com assert(pkt->isRequest()); 1656882SBrad.Beckmann@amd.com 1666882SBrad.Beckmann@amd.com if (pkt->memInhibitAsserted()) { 1676882SBrad.Beckmann@amd.com warn("memInhibitAsserted???"); 1686882SBrad.Beckmann@amd.com // snooper will supply based on copy of packet 1696882SBrad.Beckmann@amd.com // still target's responsibility to delete packet 1706882SBrad.Beckmann@amd.com delete pkt; 1716882SBrad.Beckmann@amd.com return true; 1726882SBrad.Beckmann@amd.com } 1736882SBrad.Beckmann@amd.com 1746922SBrad.Beckmann@amd.com // Save the port in the sender state object to be used later to 1756922SBrad.Beckmann@amd.com // route the response 1766922SBrad.Beckmann@amd.com pkt->senderState = new SenderState(this, pkt->senderState); 1776922SBrad.Beckmann@amd.com 1786882SBrad.Beckmann@amd.com // Check for pio requests and directly send them to the dedicated 1796882SBrad.Beckmann@amd.com // pio port. 1806882SBrad.Beckmann@amd.com if (!isPhysMemAddress(pkt->getAddr())) { 1816882SBrad.Beckmann@amd.com assert(ruby_port->pio_port != NULL); 1827039Snate@binkert.org DPRINTF(MemoryAccess, 1836922SBrad.Beckmann@amd.com "Request for address 0x%#x is assumed to be a pio request\n", 1846922SBrad.Beckmann@amd.com pkt->getAddr()); 1856882SBrad.Beckmann@amd.com 1866882SBrad.Beckmann@amd.com return ruby_port->pio_port->sendTiming(pkt); 1876882SBrad.Beckmann@amd.com } 1886882SBrad.Beckmann@amd.com 1896882SBrad.Beckmann@amd.com // For DMA and CPU requests, translate them to ruby requests before 1906882SBrad.Beckmann@amd.com // sending them to our assigned ruby port. 1916882SBrad.Beckmann@amd.com RubyRequestType type = RubyRequestType_NULL; 1926899SBrad.Beckmann@amd.com 1936899SBrad.Beckmann@amd.com // If valid, copy the pc to the ruby request 1946882SBrad.Beckmann@amd.com Addr pc = 0; 1956899SBrad.Beckmann@amd.com if (pkt->req->hasPC()) { 1966899SBrad.Beckmann@amd.com pc = pkt->req->getPC(); 1976899SBrad.Beckmann@amd.com } 1986899SBrad.Beckmann@amd.com 1997023SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 2007023SBrad.Beckmann@amd.com if (pkt->isWrite()) { 2017023SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Issuing SC\n"); 2027907Shestness@cs.utexas.edu type = RubyRequestType_Store_Conditional; 2036882SBrad.Beckmann@amd.com } else { 2047023SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Issuing LL\n"); 2057023SBrad.Beckmann@amd.com assert(pkt->isRead()); 2067907Shestness@cs.utexas.edu type = RubyRequestType_Load_Linked; 2076882SBrad.Beckmann@amd.com } 2087908Shestness@cs.utexas.edu } else if (pkt->req->isLocked()) { 2097908Shestness@cs.utexas.edu if (pkt->isWrite()) { 2107908Shestness@cs.utexas.edu DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n"); 2117908Shestness@cs.utexas.edu type = RubyRequestType_Locked_RMW_Write; 2127908Shestness@cs.utexas.edu } else { 2137908Shestness@cs.utexas.edu DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n"); 2147908Shestness@cs.utexas.edu assert(pkt->isRead()); 2157908Shestness@cs.utexas.edu type = RubyRequestType_Locked_RMW_Read; 2167908Shestness@cs.utexas.edu } 2176922SBrad.Beckmann@amd.com } else { 2187023SBrad.Beckmann@amd.com if (pkt->isRead()) { 2197023SBrad.Beckmann@amd.com if (pkt->req->isInstFetch()) { 2207023SBrad.Beckmann@amd.com type = RubyRequestType_IFETCH; 2217023SBrad.Beckmann@amd.com } else { 2227908Shestness@cs.utexas.edu#if THE_ISA == X86_ISA 2237908Shestness@cs.utexas.edu uint32_t flags = pkt->req->getFlags(); 2247908Shestness@cs.utexas.edu bool storeCheck = flags & 2257908Shestness@cs.utexas.edu (TheISA::StoreCheck << TheISA::FlagShift); 2267908Shestness@cs.utexas.edu#else 2277908Shestness@cs.utexas.edu bool storeCheck = false; 2287908Shestness@cs.utexas.edu#endif // X86_ISA 2297908Shestness@cs.utexas.edu if (storeCheck) { 2307908Shestness@cs.utexas.edu type = RubyRequestType_RMW_Read; 2317908Shestness@cs.utexas.edu } else { 2327908Shestness@cs.utexas.edu type = RubyRequestType_LD; 2337908Shestness@cs.utexas.edu } 2347023SBrad.Beckmann@amd.com } 2357023SBrad.Beckmann@amd.com } else if (pkt->isWrite()) { 2367908Shestness@cs.utexas.edu // 2377908Shestness@cs.utexas.edu // Note: M5 packets do not differentiate ST from RMW_Write 2387908Shestness@cs.utexas.edu // 2397023SBrad.Beckmann@amd.com type = RubyRequestType_ST; 2407023SBrad.Beckmann@amd.com } else { 2417023SBrad.Beckmann@amd.com panic("Unsupported ruby packet type\n"); 2427023SBrad.Beckmann@amd.com } 2436882SBrad.Beckmann@amd.com } 2446882SBrad.Beckmann@amd.com 2457039Snate@binkert.org RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(), 2467039Snate@binkert.org pkt->getSize(), pc, type, 2477039Snate@binkert.org RubyAccessMode_Supervisor, pkt); 2486882SBrad.Beckmann@amd.com 2497906SBrad.Beckmann@amd.com assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <= 2507906SBrad.Beckmann@amd.com RubySystem::getBlockSizeBytes()); 2517906SBrad.Beckmann@amd.com 2526882SBrad.Beckmann@amd.com // Submit the ruby request 2536922SBrad.Beckmann@amd.com RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 2547023SBrad.Beckmann@amd.com 2557550SBrad.Beckmann@amd.com // If the request successfully issued then we should return true. 2567023SBrad.Beckmann@amd.com // Otherwise, we need to delete the senderStatus we just created and return 2577023SBrad.Beckmann@amd.com // false. 2587550SBrad.Beckmann@amd.com if (requestStatus == RequestStatus_Issued) { 2596922SBrad.Beckmann@amd.com return true; 2606882SBrad.Beckmann@amd.com } 2617023SBrad.Beckmann@amd.com 2627039Snate@binkert.org DPRINTF(MemoryAccess, 2637906SBrad.Beckmann@amd.com "Request for address %#x did not issue because %s\n", 2647039Snate@binkert.org pkt->getAddr(), RequestStatus_to_string(requestStatus)); 2657039Snate@binkert.org 2666922SBrad.Beckmann@amd.com SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 2676922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 2686922SBrad.Beckmann@amd.com delete senderState; 2696922SBrad.Beckmann@amd.com return false; 2706882SBrad.Beckmann@amd.com} 2716882SBrad.Beckmann@amd.com 2726882SBrad.Beckmann@amd.comvoid 2736922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt) 2746882SBrad.Beckmann@amd.com{ 2756922SBrad.Beckmann@amd.com // Retrieve the request port from the sender State 2767039Snate@binkert.org RubyPort::SenderState *senderState = 2776922SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 2786922SBrad.Beckmann@amd.com M5Port *port = senderState->port; 2796922SBrad.Beckmann@amd.com assert(port != NULL); 2807039Snate@binkert.org 2816922SBrad.Beckmann@amd.com // pop the sender state from the packet 2826922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 2836922SBrad.Beckmann@amd.com delete senderState; 2846882SBrad.Beckmann@amd.com 2856882SBrad.Beckmann@amd.com port->hitCallback(pkt); 2866882SBrad.Beckmann@amd.com} 2876882SBrad.Beckmann@amd.com 2886882SBrad.Beckmann@amd.comvoid 2896882SBrad.Beckmann@amd.comRubyPort::M5Port::hitCallback(PacketPtr pkt) 2906882SBrad.Beckmann@amd.com{ 2916882SBrad.Beckmann@amd.com bool needsResponse = pkt->needsResponse(); 2926882SBrad.Beckmann@amd.com 2937550SBrad.Beckmann@amd.com // 2947550SBrad.Beckmann@amd.com // All responses except failed SC operations access M5 physical memory 2957550SBrad.Beckmann@amd.com // 2967550SBrad.Beckmann@amd.com bool accessPhysMem = true; 2977550SBrad.Beckmann@amd.com 2987550SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 2997550SBrad.Beckmann@amd.com if (pkt->isWrite()) { 3007550SBrad.Beckmann@amd.com if (pkt->req->getExtraData() != 0) { 3017550SBrad.Beckmann@amd.com // 3027550SBrad.Beckmann@amd.com // Successful SC packets convert to normal writes 3037550SBrad.Beckmann@amd.com // 3047550SBrad.Beckmann@amd.com pkt->convertScToWrite(); 3057550SBrad.Beckmann@amd.com } else { 3067550SBrad.Beckmann@amd.com // 3077550SBrad.Beckmann@amd.com // Failed SC packets don't access physical memory and thus 3087550SBrad.Beckmann@amd.com // the RubyPort itself must convert it to a response. 3097550SBrad.Beckmann@amd.com // 3107550SBrad.Beckmann@amd.com accessPhysMem = false; 3117550SBrad.Beckmann@amd.com pkt->makeAtomicResponse(); 3127550SBrad.Beckmann@amd.com } 3137550SBrad.Beckmann@amd.com } else { 3147550SBrad.Beckmann@amd.com // 3157550SBrad.Beckmann@amd.com // All LL packets convert to normal loads so that M5 PhysMem does 3167550SBrad.Beckmann@amd.com // not lock the blocks. 3177550SBrad.Beckmann@amd.com // 3187550SBrad.Beckmann@amd.com pkt->convertLlToRead(); 3197550SBrad.Beckmann@amd.com } 3207550SBrad.Beckmann@amd.com } 3217039Snate@binkert.org DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse); 3226882SBrad.Beckmann@amd.com 3237550SBrad.Beckmann@amd.com if (accessPhysMem) { 3247550SBrad.Beckmann@amd.com ruby_port->physMemPort->sendAtomic(pkt); 3257550SBrad.Beckmann@amd.com } 3266882SBrad.Beckmann@amd.com 3276882SBrad.Beckmann@amd.com // turn packet around to go back to requester if response expected 3286882SBrad.Beckmann@amd.com if (needsResponse) { 3296893SBrad.Beckmann@amd.com // sendAtomic() should already have turned packet into 3306882SBrad.Beckmann@amd.com // atomic response 3316882SBrad.Beckmann@amd.com assert(pkt->isResponse()); 3326882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Sending packet back over port\n"); 3336882SBrad.Beckmann@amd.com sendTiming(pkt); 3346882SBrad.Beckmann@amd.com } else { 3356882SBrad.Beckmann@amd.com delete pkt; 3366882SBrad.Beckmann@amd.com } 3376882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Hit callback done!\n"); 3386882SBrad.Beckmann@amd.com} 3396882SBrad.Beckmann@amd.com 3406882SBrad.Beckmann@amd.combool 3416882SBrad.Beckmann@amd.comRubyPort::M5Port::sendTiming(PacketPtr pkt) 3426882SBrad.Beckmann@amd.com{ 3437558SBrad.Beckmann@amd.com //minimum latency, must be > 0 3447823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 3456882SBrad.Beckmann@amd.com return true; 3466882SBrad.Beckmann@amd.com} 3476882SBrad.Beckmann@amd.com 3486882SBrad.Beckmann@amd.combool 3496882SBrad.Beckmann@amd.comRubyPort::PioPort::sendTiming(PacketPtr pkt) 3506882SBrad.Beckmann@amd.com{ 3517558SBrad.Beckmann@amd.com //minimum latency, must be > 0 3527823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 3536882SBrad.Beckmann@amd.com return true; 3546882SBrad.Beckmann@amd.com} 3556882SBrad.Beckmann@amd.com 3566882SBrad.Beckmann@amd.combool 3576882SBrad.Beckmann@amd.comRubyPort::M5Port::isPhysMemAddress(Addr addr) 3586882SBrad.Beckmann@amd.com{ 3596882SBrad.Beckmann@amd.com AddrRangeList physMemAddrList; 3606882SBrad.Beckmann@amd.com bool snoop = false; 3616893SBrad.Beckmann@amd.com ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 3627039Snate@binkert.org for (AddrRangeIter iter = physMemAddrList.begin(); 3637039Snate@binkert.org iter != physMemAddrList.end(); 3647039Snate@binkert.org iter++) { 3656882SBrad.Beckmann@amd.com if (addr >= iter->start && addr <= iter->end) { 3666882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n", 3676882SBrad.Beckmann@amd.com iter->start, iter->end); 3686882SBrad.Beckmann@amd.com return true; 3696882SBrad.Beckmann@amd.com } 3706882SBrad.Beckmann@amd.com } 3716882SBrad.Beckmann@amd.com return false; 3726882SBrad.Beckmann@amd.com} 373