RubyPort.cc revision 8232
16876Ssteve.reinhardt@amd.com/* 26876Ssteve.reinhardt@amd.com * Copyright (c) 2009 Advanced Micro Devices, Inc. 36876Ssteve.reinhardt@amd.com * All rights reserved. 46876Ssteve.reinhardt@amd.com * 56876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without 66876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are 76876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright 86876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer; 96876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright 106876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the 116876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution; 126876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its 136876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from 146876Ssteve.reinhardt@amd.com * this software without specific prior written permission. 156876Ssteve.reinhardt@amd.com * 166876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276876Ssteve.reinhardt@amd.com */ 286876Ssteve.reinhardt@amd.com 297908Shestness@cs.utexas.edu#include "config/the_isa.hh" 307908Shestness@cs.utexas.edu#if THE_ISA == X86_ISA 317908Shestness@cs.utexas.edu#include "arch/x86/insts/microldstop.hh" 327908Shestness@cs.utexas.edu#endif // X86_ISA 337632SBrad.Beckmann@amd.com#include "cpu/testers/rubytest/RubyTester.hh" 348232Snate@binkert.org#include "debug/MemoryAccess.hh" 358232Snate@binkert.org#include "debug/Ruby.hh" 367039Snate@binkert.org#include "mem/ruby/slicc_interface/AbstractController.hh" 376285Snate@binkert.org#include "mem/ruby/system/RubyPort.hh" 388229Snate@binkert.org#include "mem/physical.hh" 396285Snate@binkert.org 406876Ssteve.reinhardt@amd.comRubyPort::RubyPort(const Params *p) 416893SBrad.Beckmann@amd.com : MemObject(p) 426876Ssteve.reinhardt@amd.com{ 436876Ssteve.reinhardt@amd.com m_version = p->version; 446876Ssteve.reinhardt@amd.com assert(m_version != -1); 456876Ssteve.reinhardt@amd.com 466893SBrad.Beckmann@amd.com physmem = p->physmem; 477039Snate@binkert.org 486882SBrad.Beckmann@amd.com m_controller = NULL; 496882SBrad.Beckmann@amd.com m_mandatory_q_ptr = NULL; 506876Ssteve.reinhardt@amd.com 516876Ssteve.reinhardt@amd.com m_request_cnt = 0; 526882SBrad.Beckmann@amd.com pio_port = NULL; 536893SBrad.Beckmann@amd.com physMemPort = NULL; 547910SBrad.Beckmann@amd.com 557910SBrad.Beckmann@amd.com m_usingRubyTester = p->using_ruby_tester; 567915SBrad.Beckmann@amd.com access_phys_mem = p->access_phys_mem; 576876Ssteve.reinhardt@amd.com} 586876Ssteve.reinhardt@amd.com 597039Snate@binkert.orgvoid 607039Snate@binkert.orgRubyPort::init() 616882SBrad.Beckmann@amd.com{ 626882SBrad.Beckmann@amd.com assert(m_controller != NULL); 636882SBrad.Beckmann@amd.com m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 646882SBrad.Beckmann@amd.com} 656882SBrad.Beckmann@amd.com 666876Ssteve.reinhardt@amd.comPort * 676876Ssteve.reinhardt@amd.comRubyPort::getPort(const std::string &if_name, int idx) 686876Ssteve.reinhardt@amd.com{ 696882SBrad.Beckmann@amd.com if (if_name == "port") { 707915SBrad.Beckmann@amd.com return new M5Port(csprintf("%s-port%d", name(), idx), this, 717915SBrad.Beckmann@amd.com access_phys_mem); 727039Snate@binkert.org } 737039Snate@binkert.org 747039Snate@binkert.org if (if_name == "pio_port") { 756882SBrad.Beckmann@amd.com // ensure there is only one pio port 766882SBrad.Beckmann@amd.com assert(pio_port == NULL); 776882SBrad.Beckmann@amd.com 787039Snate@binkert.org pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 796882SBrad.Beckmann@amd.com 806882SBrad.Beckmann@amd.com return pio_port; 817039Snate@binkert.org } 827039Snate@binkert.org 837039Snate@binkert.org if (if_name == "physMemPort") { 846893SBrad.Beckmann@amd.com // RubyPort should only have one port to physical memory 856893SBrad.Beckmann@amd.com assert (physMemPort == NULL); 866893SBrad.Beckmann@amd.com 877915SBrad.Beckmann@amd.com physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this, 887915SBrad.Beckmann@amd.com access_phys_mem); 897039Snate@binkert.org 906893SBrad.Beckmann@amd.com return physMemPort; 917039Snate@binkert.org } 927039Snate@binkert.org 937039Snate@binkert.org if (if_name == "functional") { 947039Snate@binkert.org // Calls for the functional port only want to access 957039Snate@binkert.org // functional memory. Therefore, directly pass these calls 967039Snate@binkert.org // ports to physmem. 976893SBrad.Beckmann@amd.com assert(physmem != NULL); 986893SBrad.Beckmann@amd.com return physmem->getPort(if_name, idx); 996882SBrad.Beckmann@amd.com } 1007039Snate@binkert.org 1016876Ssteve.reinhardt@amd.com return NULL; 1026876Ssteve.reinhardt@amd.com} 1036882SBrad.Beckmann@amd.com 1047039Snate@binkert.orgRubyPort::PioPort::PioPort(const std::string &_name, 1056882SBrad.Beckmann@amd.com RubyPort *_port) 1066882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 1076882SBrad.Beckmann@amd.com{ 1088161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name); 1096882SBrad.Beckmann@amd.com ruby_port = _port; 1106882SBrad.Beckmann@amd.com} 1116882SBrad.Beckmann@amd.com 1127039Snate@binkert.orgRubyPort::M5Port::M5Port(const std::string &_name, 1137915SBrad.Beckmann@amd.com RubyPort *_port, bool _access_phys_mem) 1146882SBrad.Beckmann@amd.com : SimpleTimingPort(_name, _port) 1156882SBrad.Beckmann@amd.com{ 1168161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name); 1176882SBrad.Beckmann@amd.com ruby_port = _port; 1187910SBrad.Beckmann@amd.com _onRetryList = false; 1197915SBrad.Beckmann@amd.com access_phys_mem = _access_phys_mem; 1206882SBrad.Beckmann@amd.com} 1216882SBrad.Beckmann@amd.com 1226882SBrad.Beckmann@amd.comTick 1236882SBrad.Beckmann@amd.comRubyPort::PioPort::recvAtomic(PacketPtr pkt) 1246882SBrad.Beckmann@amd.com{ 1256882SBrad.Beckmann@amd.com panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 1266882SBrad.Beckmann@amd.com return 0; 1276882SBrad.Beckmann@amd.com} 1286882SBrad.Beckmann@amd.com 1296882SBrad.Beckmann@amd.comTick 1306882SBrad.Beckmann@amd.comRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1316882SBrad.Beckmann@amd.com{ 1326882SBrad.Beckmann@amd.com panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1336882SBrad.Beckmann@amd.com return 0; 1346882SBrad.Beckmann@amd.com} 1356882SBrad.Beckmann@amd.com 1366882SBrad.Beckmann@amd.com 1376882SBrad.Beckmann@amd.combool 1386882SBrad.Beckmann@amd.comRubyPort::PioPort::recvTiming(PacketPtr pkt) 1396882SBrad.Beckmann@amd.com{ 1407039Snate@binkert.org // In FS mode, ruby memory will receive pio responses from devices 1417039Snate@binkert.org // and it must forward these responses back to the particular CPU. 1428161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr()); 1436882SBrad.Beckmann@amd.com 1446882SBrad.Beckmann@amd.com assert(pkt->isResponse()); 1456882SBrad.Beckmann@amd.com 1466882SBrad.Beckmann@amd.com // First we must retrieve the request port from the sender State 1477039Snate@binkert.org RubyPort::SenderState *senderState = 1486882SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 1496882SBrad.Beckmann@amd.com M5Port *port = senderState->port; 1506882SBrad.Beckmann@amd.com assert(port != NULL); 1517039Snate@binkert.org 1526882SBrad.Beckmann@amd.com // pop the sender state from the packet 1536882SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 1546882SBrad.Beckmann@amd.com delete senderState; 1557039Snate@binkert.org 1566882SBrad.Beckmann@amd.com port->sendTiming(pkt); 1577039Snate@binkert.org 1586882SBrad.Beckmann@amd.com return true; 1596882SBrad.Beckmann@amd.com} 1606882SBrad.Beckmann@amd.com 1616882SBrad.Beckmann@amd.combool 1626882SBrad.Beckmann@amd.comRubyPort::M5Port::recvTiming(PacketPtr pkt) 1636882SBrad.Beckmann@amd.com{ 1648161SBrad.Beckmann@amd.com DPRINTF(RubyPort, 1657039Snate@binkert.org "Timing access caught for address %#x\n", pkt->getAddr()); 1666882SBrad.Beckmann@amd.com 1676882SBrad.Beckmann@amd.com //dsm: based on SimpleTimingPort::recvTiming(pkt); 1686882SBrad.Beckmann@amd.com 1697039Snate@binkert.org // The received packets should only be M5 requests, which should never 1707039Snate@binkert.org // get nacked. There used to be code to hanldle nacks here, but 1717039Snate@binkert.org // I'm pretty sure it didn't work correctly with the drain code, 1726882SBrad.Beckmann@amd.com // so that would need to be fixed if we ever added it back. 1736882SBrad.Beckmann@amd.com assert(pkt->isRequest()); 1746882SBrad.Beckmann@amd.com 1756882SBrad.Beckmann@amd.com if (pkt->memInhibitAsserted()) { 1766882SBrad.Beckmann@amd.com warn("memInhibitAsserted???"); 1776882SBrad.Beckmann@amd.com // snooper will supply based on copy of packet 1786882SBrad.Beckmann@amd.com // still target's responsibility to delete packet 1796882SBrad.Beckmann@amd.com delete pkt; 1806882SBrad.Beckmann@amd.com return true; 1816882SBrad.Beckmann@amd.com } 1826882SBrad.Beckmann@amd.com 1836922SBrad.Beckmann@amd.com // Save the port in the sender state object to be used later to 1846922SBrad.Beckmann@amd.com // route the response 1856922SBrad.Beckmann@amd.com pkt->senderState = new SenderState(this, pkt->senderState); 1866922SBrad.Beckmann@amd.com 1876882SBrad.Beckmann@amd.com // Check for pio requests and directly send them to the dedicated 1886882SBrad.Beckmann@amd.com // pio port. 1896882SBrad.Beckmann@amd.com if (!isPhysMemAddress(pkt->getAddr())) { 1906882SBrad.Beckmann@amd.com assert(ruby_port->pio_port != NULL); 1918161SBrad.Beckmann@amd.com DPRINTF(RubyPort, 1926922SBrad.Beckmann@amd.com "Request for address 0x%#x is assumed to be a pio request\n", 1936922SBrad.Beckmann@amd.com pkt->getAddr()); 1946882SBrad.Beckmann@amd.com 1956882SBrad.Beckmann@amd.com return ruby_port->pio_port->sendTiming(pkt); 1966882SBrad.Beckmann@amd.com } 1976882SBrad.Beckmann@amd.com 1986882SBrad.Beckmann@amd.com // For DMA and CPU requests, translate them to ruby requests before 1996882SBrad.Beckmann@amd.com // sending them to our assigned ruby port. 2006882SBrad.Beckmann@amd.com RubyRequestType type = RubyRequestType_NULL; 2016899SBrad.Beckmann@amd.com 2026899SBrad.Beckmann@amd.com // If valid, copy the pc to the ruby request 2036882SBrad.Beckmann@amd.com Addr pc = 0; 2046899SBrad.Beckmann@amd.com if (pkt->req->hasPC()) { 2056899SBrad.Beckmann@amd.com pc = pkt->req->getPC(); 2066899SBrad.Beckmann@amd.com } 2076899SBrad.Beckmann@amd.com 2087023SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 2097023SBrad.Beckmann@amd.com if (pkt->isWrite()) { 2108161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Issuing SC\n"); 2117907Shestness@cs.utexas.edu type = RubyRequestType_Store_Conditional; 2126882SBrad.Beckmann@amd.com } else { 2138161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Issuing LL\n"); 2147023SBrad.Beckmann@amd.com assert(pkt->isRead()); 2157907Shestness@cs.utexas.edu type = RubyRequestType_Load_Linked; 2166882SBrad.Beckmann@amd.com } 2177908Shestness@cs.utexas.edu } else if (pkt->req->isLocked()) { 2187908Shestness@cs.utexas.edu if (pkt->isWrite()) { 2198161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Issuing Locked RMW Write\n"); 2207908Shestness@cs.utexas.edu type = RubyRequestType_Locked_RMW_Write; 2217908Shestness@cs.utexas.edu } else { 2228161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Issuing Locked RMW Read\n"); 2237908Shestness@cs.utexas.edu assert(pkt->isRead()); 2247908Shestness@cs.utexas.edu type = RubyRequestType_Locked_RMW_Read; 2257908Shestness@cs.utexas.edu } 2266922SBrad.Beckmann@amd.com } else { 2277023SBrad.Beckmann@amd.com if (pkt->isRead()) { 2287023SBrad.Beckmann@amd.com if (pkt->req->isInstFetch()) { 2297023SBrad.Beckmann@amd.com type = RubyRequestType_IFETCH; 2307023SBrad.Beckmann@amd.com } else { 2317908Shestness@cs.utexas.edu#if THE_ISA == X86_ISA 2327908Shestness@cs.utexas.edu uint32_t flags = pkt->req->getFlags(); 2337908Shestness@cs.utexas.edu bool storeCheck = flags & 2347908Shestness@cs.utexas.edu (TheISA::StoreCheck << TheISA::FlagShift); 2357908Shestness@cs.utexas.edu#else 2367908Shestness@cs.utexas.edu bool storeCheck = false; 2377908Shestness@cs.utexas.edu#endif // X86_ISA 2387908Shestness@cs.utexas.edu if (storeCheck) { 2397908Shestness@cs.utexas.edu type = RubyRequestType_RMW_Read; 2407908Shestness@cs.utexas.edu } else { 2417908Shestness@cs.utexas.edu type = RubyRequestType_LD; 2427908Shestness@cs.utexas.edu } 2437023SBrad.Beckmann@amd.com } 2447023SBrad.Beckmann@amd.com } else if (pkt->isWrite()) { 2457908Shestness@cs.utexas.edu // 2467908Shestness@cs.utexas.edu // Note: M5 packets do not differentiate ST from RMW_Write 2477908Shestness@cs.utexas.edu // 2487023SBrad.Beckmann@amd.com type = RubyRequestType_ST; 2498184Ssomayeh@cs.wisc.edu } else if (pkt->isFlush()) { 2508184Ssomayeh@cs.wisc.edu type = RubyRequestType_FLUSH; 2517023SBrad.Beckmann@amd.com } else { 2527023SBrad.Beckmann@amd.com panic("Unsupported ruby packet type\n"); 2537023SBrad.Beckmann@amd.com } 2546882SBrad.Beckmann@amd.com } 2556882SBrad.Beckmann@amd.com 2567915SBrad.Beckmann@amd.com RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(true), 2577039Snate@binkert.org pkt->getSize(), pc, type, 2587039Snate@binkert.org RubyAccessMode_Supervisor, pkt); 2596882SBrad.Beckmann@amd.com 2608174Snilay@cs.wisc.edu assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <= 2617906SBrad.Beckmann@amd.com RubySystem::getBlockSizeBytes()); 2627906SBrad.Beckmann@amd.com 2636882SBrad.Beckmann@amd.com // Submit the ruby request 2646922SBrad.Beckmann@amd.com RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 2657023SBrad.Beckmann@amd.com 2667550SBrad.Beckmann@amd.com // If the request successfully issued then we should return true. 2677023SBrad.Beckmann@amd.com // Otherwise, we need to delete the senderStatus we just created and return 2687023SBrad.Beckmann@amd.com // false. 2697550SBrad.Beckmann@amd.com if (requestStatus == RequestStatus_Issued) { 2708161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr()); 2716922SBrad.Beckmann@amd.com return true; 2726882SBrad.Beckmann@amd.com } 2737023SBrad.Beckmann@amd.com 2747910SBrad.Beckmann@amd.com // 2757910SBrad.Beckmann@amd.com // Unless one is using the ruby tester, record the stalled M5 port for 2767910SBrad.Beckmann@amd.com // later retry when the sequencer becomes free. 2777910SBrad.Beckmann@amd.com // 2787910SBrad.Beckmann@amd.com if (!ruby_port->m_usingRubyTester) { 2797910SBrad.Beckmann@amd.com ruby_port->addToRetryList(this); 2807910SBrad.Beckmann@amd.com } 2817910SBrad.Beckmann@amd.com 2828161SBrad.Beckmann@amd.com DPRINTF(RubyPort, 2837906SBrad.Beckmann@amd.com "Request for address %#x did not issue because %s\n", 2847039Snate@binkert.org pkt->getAddr(), RequestStatus_to_string(requestStatus)); 2857039Snate@binkert.org 2866922SBrad.Beckmann@amd.com SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 2876922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 2886922SBrad.Beckmann@amd.com delete senderState; 2896922SBrad.Beckmann@amd.com return false; 2906882SBrad.Beckmann@amd.com} 2916882SBrad.Beckmann@amd.com 2926882SBrad.Beckmann@amd.comvoid 2936922SBrad.Beckmann@amd.comRubyPort::ruby_hit_callback(PacketPtr pkt) 2946882SBrad.Beckmann@amd.com{ 2956922SBrad.Beckmann@amd.com // Retrieve the request port from the sender State 2967039Snate@binkert.org RubyPort::SenderState *senderState = 2976922SBrad.Beckmann@amd.com safe_cast<RubyPort::SenderState *>(pkt->senderState); 2986922SBrad.Beckmann@amd.com M5Port *port = senderState->port; 2996922SBrad.Beckmann@amd.com assert(port != NULL); 3007039Snate@binkert.org 3016922SBrad.Beckmann@amd.com // pop the sender state from the packet 3026922SBrad.Beckmann@amd.com pkt->senderState = senderState->saved; 3036922SBrad.Beckmann@amd.com delete senderState; 3046882SBrad.Beckmann@amd.com 3056882SBrad.Beckmann@amd.com port->hitCallback(pkt); 3067910SBrad.Beckmann@amd.com 3077910SBrad.Beckmann@amd.com // 3087910SBrad.Beckmann@amd.com // If we had to stall the M5Ports, wake them up because the sequencer 3097910SBrad.Beckmann@amd.com // likely has free resources now. 3107910SBrad.Beckmann@amd.com // 3117910SBrad.Beckmann@amd.com if (waitingOnSequencer) { 3128162SBrad.Beckmann@amd.com // 3138162SBrad.Beckmann@amd.com // Record the current list of ports to retry on a temporary list before 3148162SBrad.Beckmann@amd.com // calling sendRetry on those ports. sendRetry will cause an 3158162SBrad.Beckmann@amd.com // immediate retry, which may result in the ports being put back on the 3168162SBrad.Beckmann@amd.com // list. Therefore we want to clear the retryList before calling 3178162SBrad.Beckmann@amd.com // sendRetry. 3188162SBrad.Beckmann@amd.com // 3198162SBrad.Beckmann@amd.com std::list<M5Port*> curRetryList(retryList); 3208162SBrad.Beckmann@amd.com 3218162SBrad.Beckmann@amd.com retryList.clear(); 3228162SBrad.Beckmann@amd.com waitingOnSequencer = false; 3238162SBrad.Beckmann@amd.com 3248162SBrad.Beckmann@amd.com for (std::list<M5Port*>::iterator i = curRetryList.begin(); 3258162SBrad.Beckmann@amd.com i != curRetryList.end(); ++i) { 3268162SBrad.Beckmann@amd.com DPRINTF(RubyPort, 3277910SBrad.Beckmann@amd.com "Sequencer may now be free. SendRetry to port %s\n", 3287910SBrad.Beckmann@amd.com (*i)->name()); 3298162SBrad.Beckmann@amd.com (*i)->onRetryList(false); 3308162SBrad.Beckmann@amd.com (*i)->sendRetry(); 3317910SBrad.Beckmann@amd.com } 3327910SBrad.Beckmann@amd.com } 3336882SBrad.Beckmann@amd.com} 3346882SBrad.Beckmann@amd.com 3356882SBrad.Beckmann@amd.comvoid 3366882SBrad.Beckmann@amd.comRubyPort::M5Port::hitCallback(PacketPtr pkt) 3376882SBrad.Beckmann@amd.com{ 3386882SBrad.Beckmann@amd.com bool needsResponse = pkt->needsResponse(); 3396882SBrad.Beckmann@amd.com 3407550SBrad.Beckmann@amd.com // 3417915SBrad.Beckmann@amd.com // Unless specified at configuraiton, all responses except failed SC 3428184Ssomayeh@cs.wisc.edu // and Flush operations access M5 physical memory. 3437550SBrad.Beckmann@amd.com // 3447915SBrad.Beckmann@amd.com bool accessPhysMem = access_phys_mem; 3457550SBrad.Beckmann@amd.com 3467550SBrad.Beckmann@amd.com if (pkt->isLLSC()) { 3477550SBrad.Beckmann@amd.com if (pkt->isWrite()) { 3487550SBrad.Beckmann@amd.com if (pkt->req->getExtraData() != 0) { 3497550SBrad.Beckmann@amd.com // 3507550SBrad.Beckmann@amd.com // Successful SC packets convert to normal writes 3517550SBrad.Beckmann@amd.com // 3527550SBrad.Beckmann@amd.com pkt->convertScToWrite(); 3537550SBrad.Beckmann@amd.com } else { 3547550SBrad.Beckmann@amd.com // 3557550SBrad.Beckmann@amd.com // Failed SC packets don't access physical memory and thus 3567550SBrad.Beckmann@amd.com // the RubyPort itself must convert it to a response. 3577550SBrad.Beckmann@amd.com // 3587550SBrad.Beckmann@amd.com accessPhysMem = false; 3597550SBrad.Beckmann@amd.com } 3607550SBrad.Beckmann@amd.com } else { 3617550SBrad.Beckmann@amd.com // 3627550SBrad.Beckmann@amd.com // All LL packets convert to normal loads so that M5 PhysMem does 3637550SBrad.Beckmann@amd.com // not lock the blocks. 3647550SBrad.Beckmann@amd.com // 3657550SBrad.Beckmann@amd.com pkt->convertLlToRead(); 3667550SBrad.Beckmann@amd.com } 3677550SBrad.Beckmann@amd.com } 3688184Ssomayeh@cs.wisc.edu 3698184Ssomayeh@cs.wisc.edu // 3708184Ssomayeh@cs.wisc.edu // Flush requests don't access physical memory 3718184Ssomayeh@cs.wisc.edu // 3728184Ssomayeh@cs.wisc.edu if (pkt->isFlush()) { 3738184Ssomayeh@cs.wisc.edu accessPhysMem = false; 3748184Ssomayeh@cs.wisc.edu } 3758184Ssomayeh@cs.wisc.edu 3768161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse); 3776882SBrad.Beckmann@amd.com 3787550SBrad.Beckmann@amd.com if (accessPhysMem) { 3797550SBrad.Beckmann@amd.com ruby_port->physMemPort->sendAtomic(pkt); 3808184Ssomayeh@cs.wisc.edu } else if (needsResponse) { 3817915SBrad.Beckmann@amd.com pkt->makeResponse(); 3827550SBrad.Beckmann@amd.com } 3836882SBrad.Beckmann@amd.com 3846882SBrad.Beckmann@amd.com // turn packet around to go back to requester if response expected 3856882SBrad.Beckmann@amd.com if (needsResponse) { 3868161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Sending packet back over port\n"); 3876882SBrad.Beckmann@amd.com sendTiming(pkt); 3886882SBrad.Beckmann@amd.com } else { 3896882SBrad.Beckmann@amd.com delete pkt; 3906882SBrad.Beckmann@amd.com } 3918161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Hit callback done!\n"); 3926882SBrad.Beckmann@amd.com} 3936882SBrad.Beckmann@amd.com 3946882SBrad.Beckmann@amd.combool 3956882SBrad.Beckmann@amd.comRubyPort::M5Port::sendTiming(PacketPtr pkt) 3966882SBrad.Beckmann@amd.com{ 3977558SBrad.Beckmann@amd.com //minimum latency, must be > 0 3987823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 3996882SBrad.Beckmann@amd.com return true; 4006882SBrad.Beckmann@amd.com} 4016882SBrad.Beckmann@amd.com 4026882SBrad.Beckmann@amd.combool 4036882SBrad.Beckmann@amd.comRubyPort::PioPort::sendTiming(PacketPtr pkt) 4046882SBrad.Beckmann@amd.com{ 4057558SBrad.Beckmann@amd.com //minimum latency, must be > 0 4067823Ssteve.reinhardt@amd.com schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 4076882SBrad.Beckmann@amd.com return true; 4086882SBrad.Beckmann@amd.com} 4096882SBrad.Beckmann@amd.com 4106882SBrad.Beckmann@amd.combool 4116882SBrad.Beckmann@amd.comRubyPort::M5Port::isPhysMemAddress(Addr addr) 4126882SBrad.Beckmann@amd.com{ 4136882SBrad.Beckmann@amd.com AddrRangeList physMemAddrList; 4146882SBrad.Beckmann@amd.com bool snoop = false; 4156893SBrad.Beckmann@amd.com ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 4167039Snate@binkert.org for (AddrRangeIter iter = physMemAddrList.begin(); 4177039Snate@binkert.org iter != physMemAddrList.end(); 4187039Snate@binkert.org iter++) { 4196882SBrad.Beckmann@amd.com if (addr >= iter->start && addr <= iter->end) { 4208161SBrad.Beckmann@amd.com DPRINTF(RubyPort, "Request found in %#llx - %#llx range\n", 4216882SBrad.Beckmann@amd.com iter->start, iter->end); 4226882SBrad.Beckmann@amd.com return true; 4236882SBrad.Beckmann@amd.com } 4246882SBrad.Beckmann@amd.com } 4256882SBrad.Beckmann@amd.com return false; 4266882SBrad.Beckmann@amd.com} 4277909Shestness@cs.utexas.edu 4287909Shestness@cs.utexas.eduunsigned 4297909Shestness@cs.utexas.eduRubyPort::M5Port::deviceBlockSize() const 4307909Shestness@cs.utexas.edu{ 4317909Shestness@cs.utexas.edu return (unsigned) RubySystem::getBlockSizeBytes(); 4327909Shestness@cs.utexas.edu} 433