RubyPort.cc revision 7906
16876Ssteve.reinhardt@amd.com/* 26876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc. 36876Ssteve.reinhardt@amd.com * All rights reserved. 46876Ssteve.reinhardt@amd.com * 56876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without 66876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are 76876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright 86876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer; 96876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright 106876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the 116876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution; 126876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its 136876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from 146876Ssteve.reinhardt@amd.com * this software without specific prior written permission. 156876Ssteve.reinhardt@amd.com * 166876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276876Ssteve.reinhardt@amd.com */ 286876Ssteve.reinhardt@amd.com 297632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh" 306876Ssteve.reinhardt@amd.com#include "mem/physical.hh" 317039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh" 326285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh" 336285Snate@binkert.org 346876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p) 356893SBrad.Beckmann@amd.com : MemObject(p) 366876Ssteve.reinhardt@amd.com{ 376876Ssteve.reinhardt@amd.com m_version = p->version; 386876Ssteve.reinhardt@amd.com assert(m_version != -1); 396876Ssteve.reinhardt@amd.com 406893SBrad.Beckmann@amd.com physmem = p->physmem; 417039Snate@binkert.org 426882SBrad.Beckmann@amd.com m_controller = NULL; 436882SBrad.Beckmann@amd.com m_mandatory_q_ptr = NULL; 446876Ssteve.reinhardt@amd.com 456876Ssteve.reinhardt@amd.com m_request_cnt = 0; 466882SBrad.Beckmann@amd.com pio_port = NULL; 476893SBrad.Beckmann@amd.com physMemPort = NULL; 486876Ssteve.reinhardt@amd.com} 496876Ssteve.reinhardt@amd.com 507039Snate@binkert.orgvoid 517039Snate@binkert.orgRubyPort::init() 526882SBrad.Beckmann@amd.com{ 536882SBrad.Beckmann@amd.com assert(m_controller != NULL); 546882SBrad.Beckmann@amd.com m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 556882SBrad.Beckmann@amd.com} 566882SBrad.Beckmann@amd.com 576876Ssteve.reinhardt@amd.comPort * 586876Ssteve.reinhardt@amd.comRubyPort::getPort(const std::string &if_name, int idx) 596876Ssteve.reinhardt@amd.com{ 606882SBrad.Beckmann@amd.com if (if_name == "port") { 616882SBrad.Beckmann@amd.com return new M5Port(csprintf("%s-port%d", name(), idx), this); 627039Snate@binkert.org } 637039Snate@binkert.org 647039Snate@binkert.org if (if_name == "pio_port") { 656882SBrad.Beckmann@amd.com // ensure there is only one pio port 666882SBrad.Beckmann@amd.com assert(pio_port == NULL); 676882SBrad.Beckmann@amd.com 687039Snate@binkert.org pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 696882SBrad.Beckmann@amd.com 706882SBrad.Beckmann@amd.com return pio_port; 717039Snate@binkert.org } 727039Snate@binkert.org 737039Snate@binkert.org if (if_name == "physMemPort") { 746893SBrad.Beckmann@amd.com // RubyPort should only have one port to physical memory 756893SBrad.Beckmann@amd.com assert (physMemPort == NULL); 766893SBrad.Beckmann@amd.com 777039Snate@binkert.org physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this); 787039Snate@binkert.org 796893SBrad.Beckmann@amd.com return physMemPort; 807039Snate@binkert.org } 817039Snate@binkert.org 827039Snate@binkert.org if (if_name == "functional") { 837039Snate@binkert.org // Calls for the functional port only want to access 847039Snate@binkert.org // functional memory. Therefore, directly pass these calls 857039Snate@binkert.org // ports to physmem. 866893SBrad.Beckmann@amd.com assert(physmem != NULL); 876893SBrad.Beckmann@amd.com return physmem->getPort(if_name, idx); 886882SBrad.Beckmann@amd.com } 897039Snate@binkert.org 906876Ssteve.reinhardt@amd.com return NULL; 916876Ssteve.reinhardt@amd.com} 926882SBrad.Beckmann@amd.com 937039Snate@binkert.orgRubyPort::PioPort::PioPort(const std::string &_name, 946882SBrad.Beckmann@amd.com RubyPort *_port) 956882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 966882SBrad.Beckmann@amd.com{ 976882SBrad.Beckmann@amd.com DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name); 986882SBrad.Beckmann@amd.com ruby_port = _port; 996882SBrad.Beckmann@amd.com} 1006882SBrad.Beckmann@amd.com 1017039Snate@binkert.orgRubyPort::M5Port::M5Port(const std::string &_name, 1026882SBrad.Beckmann@amd.com RubyPort *_port) 1036882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 1046882SBrad.Beckmann@amd.com{ 1056882SBrad.Beckmann@amd.com DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name); 1066882SBrad.Beckmann@amd.com ruby_port = _port; 1076882SBrad.Beckmann@amd.com} 1086882SBrad.Beckmann@amd.com 1096882SBrad.Beckmann@amd.comTick 1106882SBrad.Beckmann@amd.comRubyPort::PioPort::recvAtomic(PacketPtr pkt) 1116882SBrad.Beckmann@amd.com{ 1126882SBrad.Beckmann@amd.com panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 1136882SBrad.Beckmann@amd.com return 0; 1146882SBrad.Beckmann@amd.com} 1156882SBrad.Beckmann@amd.com 1166882SBrad.Beckmann@amd.comTick 1176882SBrad.Beckmann@amd.comRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1186882SBrad.Beckmann@amd.com{ 1196882SBrad.Beckmann@amd.com panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1206882SBrad.Beckmann@amd.com return 0; 1216882SBrad.Beckmann@amd.com} 1226882SBrad.Beckmann@amd.com 1236882SBrad.Beckmann@amd.com 1246882SBrad.Beckmann@amd.combool 1256882SBrad.Beckmann@amd.comRubyPort::PioPort::recvTiming(PacketPtr pkt) 1266882SBrad.Beckmann@amd.com{ 1277039Snate@binkert.org // In FS mode, ruby memory will receive pio responses from devices 1287039Snate@binkert.org // and it must forward these responses back to the particular CPU. 1297039Snate@binkert.org DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr()); 1306882SBrad.Beckmann@amd.com 1316882SBrad.Beckmann@amd.com assert(pkt->isResponse()); 1326882SBrad.Beckmann@amd.com 1336882SBrad.Beckmann@amd.com // First we must retrieve the request port from the sender State 1347039Snate@binkert.org RubyPort::SenderState *senderState = 1356882SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 1366882SBrad.Beckmann@amd.com M5Port *port = senderState->port; 1376882SBrad.Beckmann@amd.com assert(port != NULL); 1387039Snate@binkert.org 1396882SBrad.Beckmann@amd.com // pop the sender state from the packet 1406882SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 1416882SBrad.Beckmann@amd.com delete senderState; 1427039Snate@binkert.org 1436882SBrad.Beckmann@amd.com port->sendTiming(pkt); 1447039Snate@binkert.org 1456882SBrad.Beckmann@amd.com return true; 1466882SBrad.Beckmann@amd.com} 1476882SBrad.Beckmann@amd.com 1486882SBrad.Beckmann@amd.combool 1496882SBrad.Beckmann@amd.comRubyPort::M5Port::recvTiming(PacketPtr pkt) 1506882SBrad.Beckmann@amd.com{ 1517039Snate@binkert.org DPRINTF(MemoryAccess, 1527039Snate@binkert.org "Timing access caught for address %#x\n", pkt->getAddr()); 1536882SBrad.Beckmann@amd.com 1546882SBrad.Beckmann@amd.com //dsm: based on SimpleTimingPort::recvTiming(pkt); 1556882SBrad.Beckmann@amd.com 1567039Snate@binkert.org // The received packets should only be M5 requests, which should never 1577039Snate@binkert.org // get nacked. There used to be code to hanldle nacks here, but 1587039Snate@binkert.org // I'm pretty sure it didn't work correctly with the drain code, 1596882SBrad.Beckmann@amd.com // so that would need to be fixed if we ever added it back. 1606882SBrad.Beckmann@amd.com assert(pkt->isRequest()); 1616882SBrad.Beckmann@amd.com 1626882SBrad.Beckmann@amd.com if (pkt->memInhibitAsserted()) { 1636882SBrad.Beckmann@amd.com warn("memInhibitAsserted???"); 1646882SBrad.Beckmann@amd.com // snooper will supply based on copy of packet 1656882SBrad.Beckmann@amd.com // still target's responsibility to delete packet 1666882SBrad.Beckmann@amd.com delete pkt; 1676882SBrad.Beckmann@amd.com return true; 1686882SBrad.Beckmann@amd.com } 1696882SBrad.Beckmann@amd.com 1706922SBrad.Beckmann@amd.com // Save the port in the sender state object to be used later to 1716922SBrad.Beckmann@amd.com // route the response 1726922SBrad.Beckmann@amd.com pkt->senderState = new SenderState(this, pkt->senderState); 1736922SBrad.Beckmann@amd.com 1746882SBrad.Beckmann@amd.com // Check for pio requests and directly send them to the dedicated 1756882SBrad.Beckmann@amd.com // pio port. 1766882SBrad.Beckmann@amd.com if (!isPhysMemAddress(pkt->getAddr())) { 1776882SBrad.Beckmann@amd.com assert(ruby_port->pio_port != NULL); 1787039Snate@binkert.org DPRINTF(MemoryAccess, 1796922SBrad.Beckmann@amd.com "Request for address 0x%#x is assumed to be a pio request\n", 1806922SBrad.Beckmann@amd.com pkt->getAddr()); 1816882SBrad.Beckmann@amd.com 1826882SBrad.Beckmann@amd.com return ruby_port->pio_port->sendTiming(pkt); 1836882SBrad.Beckmann@amd.com } 1846882SBrad.Beckmann@amd.com 1856882SBrad.Beckmann@amd.com // For DMA and CPU requests, translate them to ruby requests before 1866882SBrad.Beckmann@amd.com // sending them to our assigned ruby port. 1876882SBrad.Beckmann@amd.com RubyRequestType type = RubyRequestType_NULL; 1886899SBrad.Beckmann@amd.com 1896899SBrad.Beckmann@amd.com // If valid, copy the pc to the ruby request 1906882SBrad.Beckmann@amd.com Addr pc = 0; 1916899SBrad.Beckmann@amd.com if (pkt->req->hasPC()) { 1926899SBrad.Beckmann@amd.com pc = pkt->req->getPC(); 1936899SBrad.Beckmann@amd.com } 1946899SBrad.Beckmann@amd.com 1957023SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 1967023SBrad.Beckmann@amd.com if (pkt->isWrite()) { 1977023SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Issuing SC\n"); 1987023SBrad.Beckmann@amd.com type = RubyRequestType_Locked_Write; 1996882SBrad.Beckmann@amd.com } else { 2007023SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Issuing LL\n"); 2017023SBrad.Beckmann@amd.com assert(pkt->isRead()); 2027023SBrad.Beckmann@amd.com type = RubyRequestType_Locked_Read; 2036882SBrad.Beckmann@amd.com } 2046922SBrad.Beckmann@amd.com } else { 2057023SBrad.Beckmann@amd.com if (pkt->isRead()) { 2067023SBrad.Beckmann@amd.com if (pkt->req->isInstFetch()) { 2077023SBrad.Beckmann@amd.com type = RubyRequestType_IFETCH; 2087023SBrad.Beckmann@amd.com } else { 2097039Snate@binkert.org type = RubyRequestType_LD; 2107023SBrad.Beckmann@amd.com } 2117023SBrad.Beckmann@amd.com } else if (pkt->isWrite()) { 2127023SBrad.Beckmann@amd.com type = RubyRequestType_ST; 2137023SBrad.Beckmann@amd.com } else if (pkt->isReadWrite()) { 2147039Snate@binkert.org // Fix me. This conditional will never be executed 2157039Snate@binkert.org // because isReadWrite() is just an OR of isRead() and 2167039Snate@binkert.org // isWrite(). Furthermore, just because the packet is a 2177039Snate@binkert.org // read/write request does not necessary mean it is a 2187039Snate@binkert.org // read-modify-write atomic operation. 2197023SBrad.Beckmann@amd.com type = RubyRequestType_RMW_Write; 2207023SBrad.Beckmann@amd.com } else { 2217023SBrad.Beckmann@amd.com panic("Unsupported ruby packet type\n"); 2227023SBrad.Beckmann@amd.com } 2236882SBrad.Beckmann@amd.com } 2246882SBrad.Beckmann@amd.com 2257039Snate@binkert.org RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(), 2267039Snate@binkert.org pkt->getSize(), pc, type, 2277039Snate@binkert.org RubyAccessMode_Supervisor, pkt); 2286882SBrad.Beckmann@amd.com 2297906SBrad.Beckmann@amd.com assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <= 2307906SBrad.Beckmann@amd.com RubySystem::getBlockSizeBytes()); 2317906SBrad.Beckmann@amd.com 2326882SBrad.Beckmann@amd.com // Submit the ruby request 2336922SBrad.Beckmann@amd.com RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 2347023SBrad.Beckmann@amd.com 2357550SBrad.Beckmann@amd.com // If the request successfully issued then we should return true. 2367023SBrad.Beckmann@amd.com // Otherwise, we need to delete the senderStatus we just created and return 2377023SBrad.Beckmann@amd.com // false. 2387550SBrad.Beckmann@amd.com if (requestStatus == RequestStatus_Issued) { 2396922SBrad.Beckmann@amd.com return true; 2406882SBrad.Beckmann@amd.com } 2417023SBrad.Beckmann@amd.com 2427039Snate@binkert.org DPRINTF(MemoryAccess, 2437906SBrad.Beckmann@amd.com "Request for address %#x did not issue because %s\n", 2447039Snate@binkert.org pkt->getAddr(), RequestStatus_to_string(requestStatus)); 2457039Snate@binkert.org 2466922SBrad.Beckmann@amd.com SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 2476922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 2486922SBrad.Beckmann@amd.com delete senderState; 2496922SBrad.Beckmann@amd.com return false; 2506882SBrad.Beckmann@amd.com} 2516882SBrad.Beckmann@amd.com 2526882SBrad.Beckmann@amd.comvoid 2536922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt) 2546882SBrad.Beckmann@amd.com{ 2556922SBrad.Beckmann@amd.com // Retrieve the request port from the sender State 2567039Snate@binkert.org RubyPort::SenderState *senderState = 2576922SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 2586922SBrad.Beckmann@amd.com M5Port *port = senderState->port; 2596922SBrad.Beckmann@amd.com assert(port != NULL); 2607039Snate@binkert.org 2616922SBrad.Beckmann@amd.com // pop the sender state from the packet 2626922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 2636922SBrad.Beckmann@amd.com delete senderState; 2646882SBrad.Beckmann@amd.com 2656882SBrad.Beckmann@amd.com port->hitCallback(pkt); 2666882SBrad.Beckmann@amd.com} 2676882SBrad.Beckmann@amd.com 2686882SBrad.Beckmann@amd.comvoid 2696882SBrad.Beckmann@amd.comRubyPort::M5Port::hitCallback(PacketPtr pkt) 2706882SBrad.Beckmann@amd.com{ 2716882SBrad.Beckmann@amd.com bool needsResponse = pkt->needsResponse(); 2726882SBrad.Beckmann@amd.com 2737550SBrad.Beckmann@amd.com // 2747550SBrad.Beckmann@amd.com // All responses except failed SC operations access M5 physical memory 2757550SBrad.Beckmann@amd.com // 2767550SBrad.Beckmann@amd.com bool accessPhysMem = true; 2777550SBrad.Beckmann@amd.com 2787550SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 2797550SBrad.Beckmann@amd.com if (pkt->isWrite()) { 2807550SBrad.Beckmann@amd.com if (pkt->req->getExtraData() != 0) { 2817550SBrad.Beckmann@amd.com // 2827550SBrad.Beckmann@amd.com // Successful SC packets convert to normal writes 2837550SBrad.Beckmann@amd.com // 2847550SBrad.Beckmann@amd.com pkt->convertScToWrite(); 2857550SBrad.Beckmann@amd.com } else { 2867550SBrad.Beckmann@amd.com // 2877550SBrad.Beckmann@amd.com // Failed SC packets don't access physical memory and thus 2887550SBrad.Beckmann@amd.com // the RubyPort itself must convert it to a response. 2897550SBrad.Beckmann@amd.com // 2907550SBrad.Beckmann@amd.com accessPhysMem = false; 2917550SBrad.Beckmann@amd.com pkt->makeAtomicResponse(); 2927550SBrad.Beckmann@amd.com } 2937550SBrad.Beckmann@amd.com } else { 2947550SBrad.Beckmann@amd.com // 2957550SBrad.Beckmann@amd.com // All LL packets convert to normal loads so that M5 PhysMem does 2967550SBrad.Beckmann@amd.com // not lock the blocks. 2977550SBrad.Beckmann@amd.com // 2987550SBrad.Beckmann@amd.com pkt->convertLlToRead(); 2997550SBrad.Beckmann@amd.com } 3007550SBrad.Beckmann@amd.com } 3017039Snate@binkert.org DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse); 3026882SBrad.Beckmann@amd.com 3037550SBrad.Beckmann@amd.com if (accessPhysMem) { 3047550SBrad.Beckmann@amd.com ruby_port->physMemPort->sendAtomic(pkt); 3057550SBrad.Beckmann@amd.com } 3066882SBrad.Beckmann@amd.com 3076882SBrad.Beckmann@amd.com // turn packet around to go back to requester if response expected 3086882SBrad.Beckmann@amd.com if (needsResponse) { 3096893SBrad.Beckmann@amd.com // sendAtomic() should already have turned packet into 3106882SBrad.Beckmann@amd.com // atomic response 3116882SBrad.Beckmann@amd.com assert(pkt->isResponse()); 3126882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Sending packet back over port\n"); 3136882SBrad.Beckmann@amd.com sendTiming(pkt); 3146882SBrad.Beckmann@amd.com } else { 3156882SBrad.Beckmann@amd.com delete pkt; 3166882SBrad.Beckmann@amd.com } 3176882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Hit callback done!\n"); 3186882SBrad.Beckmann@amd.com} 3196882SBrad.Beckmann@amd.com 3206882SBrad.Beckmann@amd.combool 3216882SBrad.Beckmann@amd.comRubyPort::M5Port::sendTiming(PacketPtr pkt) 3226882SBrad.Beckmann@amd.com{ 3237558SBrad.Beckmann@amd.com //minimum latency, must be > 0 3247823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 3256882SBrad.Beckmann@amd.com return true; 3266882SBrad.Beckmann@amd.com} 3276882SBrad.Beckmann@amd.com 3286882SBrad.Beckmann@amd.combool 3296882SBrad.Beckmann@amd.comRubyPort::PioPort::sendTiming(PacketPtr pkt) 3306882SBrad.Beckmann@amd.com{ 3317558SBrad.Beckmann@amd.com //minimum latency, must be > 0 3327823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 3336882SBrad.Beckmann@amd.com return true; 3346882SBrad.Beckmann@amd.com} 3356882SBrad.Beckmann@amd.com 3366882SBrad.Beckmann@amd.combool 3376882SBrad.Beckmann@amd.comRubyPort::M5Port::isPhysMemAddress(Addr addr) 3386882SBrad.Beckmann@amd.com{ 3396882SBrad.Beckmann@amd.com AddrRangeList physMemAddrList; 3406882SBrad.Beckmann@amd.com bool snoop = false; 3416893SBrad.Beckmann@amd.com ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 3427039Snate@binkert.org for (AddrRangeIter iter = physMemAddrList.begin(); 3437039Snate@binkert.org iter != physMemAddrList.end(); 3447039Snate@binkert.org iter++) { 3456882SBrad.Beckmann@amd.com if (addr >= iter->start && addr <= iter->end) { 3466882SBrad.Beckmann@amd.com DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n", 3476882SBrad.Beckmann@amd.com iter->start, iter->end); 3486882SBrad.Beckmann@amd.com return true; 3496882SBrad.Beckmann@amd.com } 3506882SBrad.Beckmann@amd.com } 3516882SBrad.Beckmann@amd.com return false; 3526882SBrad.Beckmann@amd.com} 353