RubyPort.cc revision 8717:5c253f1031d7
12SN/A/* 21762SN/A * Copyright (c) 2009 Advanced Micro Devices, Inc. 32SN/A * Copyright (c) 2011 Mark D. Hill and David A. Wood 42SN/A * All rights reserved. 52SN/A * 62SN/A * Redistribution and use in source and binary forms, with or without 72SN/A * modification, are permitted provided that the following conditions are 82SN/A * met: redistributions of source code must retain the above copyright 92SN/A * notice, this list of conditions and the following disclaimer; 102SN/A * redistributions in binary form must reproduce the above copyright 112SN/A * notice, this list of conditions and the following disclaimer in the 122SN/A * documentation and/or other materials provided with the distribution; 132SN/A * neither the name of the copyright holders nor the names of its 142SN/A * contributors may be used to endorse or promote products derived from 152SN/A * this software without specific prior written permission. 162SN/A * 172SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 182SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 192SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 202SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 212SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 222SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 232SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 242SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 252SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 262SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 272665Ssaidi@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 282665Ssaidi@eecs.umich.edu */ 292665Ssaidi@eecs.umich.edu 302SN/A#include "cpu/testers/rubytest/RubyTester.hh" 312SN/A#include "debug/Config.hh" 321112SN/A#include "debug/Ruby.hh" 331112SN/A#include "mem/protocol/AccessPermission.hh" 342SN/A#include "mem/ruby/slicc_interface/AbstractController.hh" 353386Sgblack@eecs.umich.edu#include "mem/ruby/system/RubyPort.hh" 364257Sgblack@eecs.umich.edu 372SN/ARubyPort::RubyPort(const Params *p) 382SN/A : MemObject(p) 392SN/A{ 402SN/A m_version = p->version; 412SN/A assert(m_version != -1); 422SN/A 432SN/A physmem = p->physmem; 442SN/A 452SN/A m_controller = NULL; 462SN/A m_mandatory_q_ptr = NULL; 472SN/A 484070Ssaidi@eecs.umich.edu m_request_cnt = 0; 492SN/A pio_port = NULL; 502SN/A physMemPort = NULL; 512SN/A 522SN/A m_usingRubyTester = p->using_ruby_tester; 532SN/A access_phys_mem = p->access_phys_mem; 542SN/A 552SN/A drainEvent = NULL; 562SN/A 572SN/A ruby_system = p->ruby_system; 582SN/A waitingOnSequencer = false; 592SN/A} 602SN/A 612SN/Avoid 622SN/ARubyPort::init() 633814Ssaidi@eecs.umich.edu{ 643814Ssaidi@eecs.umich.edu assert(m_controller != NULL); 653814Ssaidi@eecs.umich.edu m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 663814Ssaidi@eecs.umich.edu} 673814Ssaidi@eecs.umich.edu 683814Ssaidi@eecs.umich.eduPort * 693814Ssaidi@eecs.umich.eduRubyPort::getPort(const std::string &if_name, int idx) 703814Ssaidi@eecs.umich.edu{ 713814Ssaidi@eecs.umich.edu if (if_name == "port") { 723814Ssaidi@eecs.umich.edu M5Port* cpuPort = new M5Port(csprintf("%s-port%d", name(), idx), 733814Ssaidi@eecs.umich.edu this, ruby_system, access_phys_mem); 744070Ssaidi@eecs.umich.edu cpu_ports.push_back(cpuPort); 754070Ssaidi@eecs.umich.edu return cpuPort; 764070Ssaidi@eecs.umich.edu } 774070Ssaidi@eecs.umich.edu 784070Ssaidi@eecs.umich.edu if (if_name == "pio_port") { 794070Ssaidi@eecs.umich.edu // ensure there is only one pio port 803814Ssaidi@eecs.umich.edu assert(pio_port == NULL); 812SN/A 822SN/A pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 832SN/A 842SN/A return pio_port; 852SN/A } 862SN/A 872SN/A if (if_name == "physMemPort") { 882SN/A // RubyPort should only have one port to physical memory 892SN/A assert (physMemPort == NULL); 902SN/A 912SN/A physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this, 923422Sgblack@eecs.umich.edu ruby_system, access_phys_mem); 933422Sgblack@eecs.umich.edu 943422Sgblack@eecs.umich.edu return physMemPort; 953422Sgblack@eecs.umich.edu } 963422Sgblack@eecs.umich.edu 973422Sgblack@eecs.umich.edu return NULL; 983422Sgblack@eecs.umich.edu} 993422Sgblack@eecs.umich.edu 1003422Sgblack@eecs.umich.eduRubyPort::PioPort::PioPort(const std::string &_name, 1013422Sgblack@eecs.umich.edu RubyPort *_port) 1023422Sgblack@eecs.umich.edu : SimpleTimingPort(_name, _port) 1033422Sgblack@eecs.umich.edu{ 1043422Sgblack@eecs.umich.edu DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name); 1053422Sgblack@eecs.umich.edu ruby_port = _port; 1063422Sgblack@eecs.umich.edu} 1073422Sgblack@eecs.umich.edu 1083422Sgblack@eecs.umich.eduRubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port, 1093422Sgblack@eecs.umich.edu RubySystem *_system, bool _access_phys_mem) 1103422Sgblack@eecs.umich.edu : SimpleTimingPort(_name, _port) 1113422Sgblack@eecs.umich.edu{ 1123422Sgblack@eecs.umich.edu DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name); 1133422Sgblack@eecs.umich.edu ruby_port = _port; 1143422Sgblack@eecs.umich.edu ruby_system = _system; 1153422Sgblack@eecs.umich.edu _onRetryList = false; 1164103Ssaidi@eecs.umich.edu access_phys_mem = _access_phys_mem; 1174103Ssaidi@eecs.umich.edu} 1184103Ssaidi@eecs.umich.edu 1194103Ssaidi@eecs.umich.eduTick 1204103Ssaidi@eecs.umich.eduRubyPort::PioPort::recvAtomic(PacketPtr pkt) 1214103Ssaidi@eecs.umich.edu{ 1224103Ssaidi@eecs.umich.edu panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 1234103Ssaidi@eecs.umich.edu return 0; 1244103Ssaidi@eecs.umich.edu} 1254244Ssaidi@eecs.umich.edu 1264244Ssaidi@eecs.umich.eduTick 1274244Ssaidi@eecs.umich.eduRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1284244Ssaidi@eecs.umich.edu{ 1294244Ssaidi@eecs.umich.edu panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1304244Ssaidi@eecs.umich.edu return 0; 1314103Ssaidi@eecs.umich.edu} 1324103Ssaidi@eecs.umich.edu 1334103Ssaidi@eecs.umich.edu 1344257Sgblack@eecs.umich.edubool 1354257Sgblack@eecs.umich.eduRubyPort::PioPort::recvTiming(PacketPtr pkt) 1364257Sgblack@eecs.umich.edu{ 1374257Sgblack@eecs.umich.edu // In FS mode, ruby memory will receive pio responses from devices 1384257Sgblack@eecs.umich.edu // and it must forward these responses back to the particular CPU. 1394103Ssaidi@eecs.umich.edu DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr()); 1404257Sgblack@eecs.umich.edu 1414257Sgblack@eecs.umich.edu assert(pkt->isResponse()); 1424257Sgblack@eecs.umich.edu 1434257Sgblack@eecs.umich.edu // First we must retrieve the request port from the sender State 1444257Sgblack@eecs.umich.edu RubyPort::SenderState *senderState = 1454257Sgblack@eecs.umich.edu safe_cast<RubyPort::SenderState *>(pkt->senderState); 1464257Sgblack@eecs.umich.edu M5Port *port = senderState->port; 1474257Sgblack@eecs.umich.edu assert(port != NULL); 1484257Sgblack@eecs.umich.edu 1494257Sgblack@eecs.umich.edu // pop the sender state from the packet 1504257Sgblack@eecs.umich.edu pkt->senderState = senderState->saved; 1514257Sgblack@eecs.umich.edu delete senderState; 1524257Sgblack@eecs.umich.edu 1534257Sgblack@eecs.umich.edu port->sendTiming(pkt); 1544257Sgblack@eecs.umich.edu 1554257Sgblack@eecs.umich.edu return true; 1564257Sgblack@eecs.umich.edu} 1574257Sgblack@eecs.umich.edu 1584257Sgblack@eecs.umich.edubool 1594257Sgblack@eecs.umich.eduRubyPort::M5Port::recvTiming(PacketPtr pkt) 1604257Sgblack@eecs.umich.edu{ 1614257Sgblack@eecs.umich.edu DPRINTF(RubyPort, 1624257Sgblack@eecs.umich.edu "Timing access caught for address %#x\n", pkt->getAddr()); 1634257Sgblack@eecs.umich.edu 1644257Sgblack@eecs.umich.edu //dsm: based on SimpleTimingPort::recvTiming(pkt); 1654257Sgblack@eecs.umich.edu 1664257Sgblack@eecs.umich.edu // The received packets should only be M5 requests, which should never 1674257Sgblack@eecs.umich.edu // get nacked. There used to be code to hanldle nacks here, but 1684257Sgblack@eecs.umich.edu // I'm pretty sure it didn't work correctly with the drain code, 1694257Sgblack@eecs.umich.edu // so that would need to be fixed if we ever added it back. 1704257Sgblack@eecs.umich.edu assert(pkt->isRequest()); 1714257Sgblack@eecs.umich.edu 1724257Sgblack@eecs.umich.edu if (pkt->memInhibitAsserted()) { 1734257Sgblack@eecs.umich.edu warn("memInhibitAsserted???"); 1744257Sgblack@eecs.umich.edu // snooper will supply based on copy of packet 1754257Sgblack@eecs.umich.edu // still target's responsibility to delete packet 1764257Sgblack@eecs.umich.edu delete pkt; 1774257Sgblack@eecs.umich.edu return true; 1784257Sgblack@eecs.umich.edu } 1794257Sgblack@eecs.umich.edu 1804257Sgblack@eecs.umich.edu // Save the port in the sender state object to be used later to 1814257Sgblack@eecs.umich.edu // route the response 1824257Sgblack@eecs.umich.edu pkt->senderState = new SenderState(this, pkt->senderState); 1834257Sgblack@eecs.umich.edu 1844257Sgblack@eecs.umich.edu // Check for pio requests and directly send them to the dedicated 1854257Sgblack@eecs.umich.edu // pio port. 1864257Sgblack@eecs.umich.edu if (!isPhysMemAddress(pkt->getAddr())) { 1874257Sgblack@eecs.umich.edu assert(ruby_port->pio_port != NULL); 1884257Sgblack@eecs.umich.edu DPRINTF(RubyPort, 1894257Sgblack@eecs.umich.edu "Request for address 0x%#x is assumed to be a pio request\n", 1904257Sgblack@eecs.umich.edu pkt->getAddr()); 1914257Sgblack@eecs.umich.edu 1924257Sgblack@eecs.umich.edu return ruby_port->pio_port->sendTiming(pkt); 1934257Sgblack@eecs.umich.edu } 1944257Sgblack@eecs.umich.edu 1954257Sgblack@eecs.umich.edu assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 1964257Sgblack@eecs.umich.edu RubySystem::getBlockSizeBytes()); 1974257Sgblack@eecs.umich.edu 1984257Sgblack@eecs.umich.edu // Submit the ruby request 1994257Sgblack@eecs.umich.edu RequestStatus requestStatus = ruby_port->makeRequest(pkt); 2004257Sgblack@eecs.umich.edu 2014257Sgblack@eecs.umich.edu // If the request successfully issued then we should return true. 2024257Sgblack@eecs.umich.edu // Otherwise, we need to delete the senderStatus we just created and return 2034257Sgblack@eecs.umich.edu // false. 2044257Sgblack@eecs.umich.edu if (requestStatus == RequestStatus_Issued) { 2054257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr()); 2064257Sgblack@eecs.umich.edu return true; 2074257Sgblack@eecs.umich.edu } 2084257Sgblack@eecs.umich.edu 2094257Sgblack@eecs.umich.edu // 2104257Sgblack@eecs.umich.edu // Unless one is using the ruby tester, record the stalled M5 port for 2114257Sgblack@eecs.umich.edu // later retry when the sequencer becomes free. 2124257Sgblack@eecs.umich.edu // 2134257Sgblack@eecs.umich.edu if (!ruby_port->m_usingRubyTester) { 2144257Sgblack@eecs.umich.edu ruby_port->addToRetryList(this); 2154257Sgblack@eecs.umich.edu } 2164257Sgblack@eecs.umich.edu 2174257Sgblack@eecs.umich.edu DPRINTF(RubyPort, 2184257Sgblack@eecs.umich.edu "Request for address %#x did not issue because %s\n", 2194257Sgblack@eecs.umich.edu pkt->getAddr(), RequestStatus_to_string(requestStatus)); 2204257Sgblack@eecs.umich.edu 2214257Sgblack@eecs.umich.edu SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 2224257Sgblack@eecs.umich.edu pkt->senderState = senderState->saved; 2234257Sgblack@eecs.umich.edu delete senderState; 2244257Sgblack@eecs.umich.edu return false; 2254257Sgblack@eecs.umich.edu} 2264257Sgblack@eecs.umich.edu 2274257Sgblack@eecs.umich.edubool 2284257Sgblack@eecs.umich.eduRubyPort::M5Port::doFunctionalRead(PacketPtr pkt) 2294257Sgblack@eecs.umich.edu{ 2304257Sgblack@eecs.umich.edu Address address(pkt->getAddr()); 2314257Sgblack@eecs.umich.edu Address line_address(address); 2324257Sgblack@eecs.umich.edu line_address.makeLineAddress(); 2334257Sgblack@eecs.umich.edu 2344257Sgblack@eecs.umich.edu AccessPermission access_perm = AccessPermission_NotPresent; 2354257Sgblack@eecs.umich.edu int num_controllers = ruby_system->m_abs_cntrl_vec.size(); 2364257Sgblack@eecs.umich.edu 2374257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "Functional Read request for %s\n",address); 2384257Sgblack@eecs.umich.edu 2394257Sgblack@eecs.umich.edu unsigned int num_ro = 0; 2404257Sgblack@eecs.umich.edu unsigned int num_rw = 0; 2414257Sgblack@eecs.umich.edu unsigned int num_busy = 0; 2424257Sgblack@eecs.umich.edu unsigned int num_backing_store = 0; 2434257Sgblack@eecs.umich.edu unsigned int num_invalid = 0; 2444257Sgblack@eecs.umich.edu 2454257Sgblack@eecs.umich.edu // In this loop we count the number of controllers that have the given 2464257Sgblack@eecs.umich.edu // address in read only, read write and busy states. 2474257Sgblack@eecs.umich.edu for (int i = 0; i < num_controllers; ++i) { 2484257Sgblack@eecs.umich.edu access_perm = ruby_system->m_abs_cntrl_vec[i]-> 2494257Sgblack@eecs.umich.edu getAccessPermission(line_address); 2504257Sgblack@eecs.umich.edu if (access_perm == AccessPermission_Read_Only) 2514257Sgblack@eecs.umich.edu num_ro++; 2524257Sgblack@eecs.umich.edu else if (access_perm == AccessPermission_Read_Write) 2534257Sgblack@eecs.umich.edu num_rw++; 2544257Sgblack@eecs.umich.edu else if (access_perm == AccessPermission_Busy) 2554257Sgblack@eecs.umich.edu num_busy++; 2564257Sgblack@eecs.umich.edu else if (access_perm == AccessPermission_Backing_Store) 2574257Sgblack@eecs.umich.edu // See RubySlicc_Exports.sm for details, but Backing_Store is meant 2584257Sgblack@eecs.umich.edu // to represent blocks in memory *for Broadcast/Snooping protocols*, 2594257Sgblack@eecs.umich.edu // where memory has no idea whether it has an exclusive copy of data 2604257Sgblack@eecs.umich.edu // or not. 2614257Sgblack@eecs.umich.edu num_backing_store++; 2624257Sgblack@eecs.umich.edu else if (access_perm == AccessPermission_Invalid || 2634257Sgblack@eecs.umich.edu access_perm == AccessPermission_NotPresent) 2644257Sgblack@eecs.umich.edu num_invalid++; 2654257Sgblack@eecs.umich.edu } 2664257Sgblack@eecs.umich.edu assert(num_rw <= 1); 2674257Sgblack@eecs.umich.edu 2684257Sgblack@eecs.umich.edu uint8* data = pkt->getPtr<uint8_t>(true); 2694257Sgblack@eecs.umich.edu unsigned int size_in_bytes = pkt->getSize(); 2704257Sgblack@eecs.umich.edu unsigned startByte = address.getAddress() - line_address.getAddress(); 2714257Sgblack@eecs.umich.edu 2724257Sgblack@eecs.umich.edu // This if case is meant to capture what happens in a Broadcast/Snoop 2734257Sgblack@eecs.umich.edu // protocol where the block does not exist in the cache hierarchy. You 2744257Sgblack@eecs.umich.edu // only want to read from the Backing_Store memory if there is no copy in 2754257Sgblack@eecs.umich.edu // the cache hierarchy, otherwise you want to try to read the RO or RW 2764257Sgblack@eecs.umich.edu // copies existing in the cache hierarchy (covered by the else statement). 2774257Sgblack@eecs.umich.edu // The reason is because the Backing_Store memory could easily be stale, if 2784257Sgblack@eecs.umich.edu // there are copies floating around the cache hierarchy, so you want to read 2794257Sgblack@eecs.umich.edu // it only if it's not in the cache hierarchy at all. 2804257Sgblack@eecs.umich.edu if (num_invalid == (num_controllers - 1) && 2814257Sgblack@eecs.umich.edu num_backing_store == 1) 2824257Sgblack@eecs.umich.edu { 2834257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "only copy in Backing_Store memory, read from it\n"); 2844257Sgblack@eecs.umich.edu for (int i = 0; i < num_controllers; ++i) { 2854257Sgblack@eecs.umich.edu access_perm = ruby_system->m_abs_cntrl_vec[i] 2864257Sgblack@eecs.umich.edu ->getAccessPermission(line_address); 2874257Sgblack@eecs.umich.edu if (access_perm == AccessPermission_Backing_Store) { 2884257Sgblack@eecs.umich.edu DataBlock& block = ruby_system->m_abs_cntrl_vec[i] 2894257Sgblack@eecs.umich.edu ->getDataBlock(line_address); 2904257Sgblack@eecs.umich.edu 2914257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "reading from %s block %s\n", 2924257Sgblack@eecs.umich.edu ruby_system->m_abs_cntrl_vec[i]->name(), block); 2934257Sgblack@eecs.umich.edu for (unsigned i = 0; i < size_in_bytes; ++i) { 2944257Sgblack@eecs.umich.edu data[i] = block.getByte(i + startByte); 2954257Sgblack@eecs.umich.edu } 2964257Sgblack@eecs.umich.edu return true; 2974257Sgblack@eecs.umich.edu } 2984257Sgblack@eecs.umich.edu } 2994257Sgblack@eecs.umich.edu } else { 3004257Sgblack@eecs.umich.edu // In Broadcast/Snoop protocols, this covers if you know the block 3014257Sgblack@eecs.umich.edu // exists somewhere in the caching hierarchy, then you want to read any 3024257Sgblack@eecs.umich.edu // valid RO or RW block. In directory protocols, same thing, you want 3034257Sgblack@eecs.umich.edu // to read any valid readable copy of the block. 3044257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n", 3054257Sgblack@eecs.umich.edu num_busy, num_ro, num_rw); 3064257Sgblack@eecs.umich.edu // In this loop, we try to figure which controller has a read only or 3074257Sgblack@eecs.umich.edu // a read write copy of the given address. Any valid copy would suffice 3084257Sgblack@eecs.umich.edu // for a functional read. 3094257Sgblack@eecs.umich.edu for(int i = 0;i < num_controllers;++i) { 3104257Sgblack@eecs.umich.edu access_perm = ruby_system->m_abs_cntrl_vec[i] 3114257Sgblack@eecs.umich.edu ->getAccessPermission(line_address); 3124257Sgblack@eecs.umich.edu if(access_perm == AccessPermission_Read_Only || 3134257Sgblack@eecs.umich.edu access_perm == AccessPermission_Read_Write) 3144257Sgblack@eecs.umich.edu { 3154257Sgblack@eecs.umich.edu DataBlock& block = ruby_system->m_abs_cntrl_vec[i] 3164257Sgblack@eecs.umich.edu ->getDataBlock(line_address); 3174257Sgblack@eecs.umich.edu 3184257Sgblack@eecs.umich.edu DPRINTF(RubyPort, "reading from %s block %s\n", 3194257Sgblack@eecs.umich.edu ruby_system->m_abs_cntrl_vec[i]->name(), block); 3204257Sgblack@eecs.umich.edu for (unsigned i = 0; i < size_in_bytes; ++i) { 3214103Ssaidi@eecs.umich.edu data[i] = block.getByte(i + startByte); 3221112SN/A } 323 return true; 324 } 325 } 326 } 327 return false; 328} 329 330bool 331RubyPort::M5Port::doFunctionalWrite(PacketPtr pkt) 332{ 333 Address addr(pkt->getAddr()); 334 Address line_addr = line_address(addr); 335 AccessPermission access_perm = AccessPermission_NotPresent; 336 int num_controllers = ruby_system->m_abs_cntrl_vec.size(); 337 338 DPRINTF(RubyPort, "Functional Write request for %s\n",addr); 339 340 unsigned int num_ro = 0; 341 unsigned int num_rw = 0; 342 unsigned int num_busy = 0; 343 unsigned int num_backing_store = 0; 344 unsigned int num_invalid = 0; 345 346 // In this loop we count the number of controllers that have the given 347 // address in read only, read write and busy states. 348 for(int i = 0;i < num_controllers;++i) { 349 access_perm = ruby_system->m_abs_cntrl_vec[i]-> 350 getAccessPermission(line_addr); 351 if (access_perm == AccessPermission_Read_Only) 352 num_ro++; 353 else if (access_perm == AccessPermission_Read_Write) 354 num_rw++; 355 else if (access_perm == AccessPermission_Busy) 356 num_busy++; 357 else if (access_perm == AccessPermission_Backing_Store) 358 // See RubySlicc_Exports.sm for details, but Backing_Store is meant 359 // to represent blocks in memory *for Broadcast/Snooping protocols*, 360 // where memory has no idea whether it has an exclusive copy of data 361 // or not. 362 num_backing_store++; 363 else if (access_perm == AccessPermission_Invalid || 364 access_perm == AccessPermission_NotPresent) 365 num_invalid++; 366 } 367 368 // If the number of read write copies is more than 1, then there is bug in 369 // coherence protocol. Otherwise, if all copies are in stable states, i.e. 370 // num_busy == 0, we update all the copies. If there is at least one copy 371 // in busy state, then we check if there is read write copy. If yes, then 372 // also we let the access go through. Or, if there is no copy in the cache 373 // hierarchy at all, we still want to do the write to the memory 374 // (Backing_Store) instead of failing. 375 376 DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n", 377 num_busy, num_ro, num_rw); 378 assert(num_rw <= 1); 379 380 uint8* data = pkt->getPtr<uint8_t>(true); 381 unsigned int size_in_bytes = pkt->getSize(); 382 unsigned startByte = addr.getAddress() - line_addr.getAddress(); 383 384 if ((num_busy == 0 && num_ro > 0) || num_rw == 1 || 385 (num_invalid == (num_controllers - 1) && num_backing_store == 1)) 386 { 387 for(int i = 0; i < num_controllers;++i) { 388 access_perm = ruby_system->m_abs_cntrl_vec[i]-> 389 getAccessPermission(line_addr); 390 if(access_perm == AccessPermission_Read_Only || 391 access_perm == AccessPermission_Read_Write|| 392 access_perm == AccessPermission_Maybe_Stale || 393 access_perm == AccessPermission_Backing_Store) 394 { 395 DataBlock& block = ruby_system->m_abs_cntrl_vec[i] 396 ->getDataBlock(line_addr); 397 398 DPRINTF(RubyPort, "%s\n",block); 399 for (unsigned i = 0; i < size_in_bytes; ++i) { 400 block.setByte(i + startByte, data[i]); 401 } 402 DPRINTF(RubyPort, "%s\n",block); 403 } 404 } 405 return true; 406 } 407 return false; 408} 409 410void 411RubyPort::M5Port::recvFunctional(PacketPtr pkt) 412{ 413 DPRINTF(RubyPort, "Functional access caught for address %#x\n", 414 pkt->getAddr()); 415 416 // Check for pio requests and directly send them to the dedicated 417 // pio port. 418 if (!isPhysMemAddress(pkt->getAddr())) { 419 assert(ruby_port->pio_port != NULL); 420 DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n", 421 pkt->getAddr()); 422 panic("RubyPort::PioPort::recvFunctional() not implemented!\n"); 423 } 424 425 assert(pkt->getAddr() + pkt->getSize() <= 426 line_address(Address(pkt->getAddr())).getAddress() + 427 RubySystem::getBlockSizeBytes()); 428 429 bool accessSucceeded = false; 430 bool needsResponse = pkt->needsResponse(); 431 432 // Do the functional access on ruby memory 433 if (pkt->isRead()) { 434 accessSucceeded = doFunctionalRead(pkt); 435 } else if (pkt->isWrite()) { 436 accessSucceeded = doFunctionalWrite(pkt); 437 } else { 438 panic("RubyPort: unsupported functional command %s\n", 439 pkt->cmdString()); 440 } 441 442 // Unless the requester explicitly said otherwise, generate an error if 443 // the functional request failed 444 if (!accessSucceeded && !pkt->suppressFuncError()) { 445 fatal("Ruby functional %s failed for address %#x\n", 446 pkt->isWrite() ? "write" : "read", pkt->getAddr()); 447 } 448 449 if (access_phys_mem) { 450 // The attached physmem contains the official version of data. 451 // The following command performs the real functional access. 452 // This line should be removed once Ruby supplies the official version 453 // of data. 454 ruby_port->physMemPort->sendFunctional(pkt); 455 } 456 457 // turn packet around to go back to requester if response expected 458 if (needsResponse) { 459 pkt->setFunctionalResponseStatus(accessSucceeded); 460 461 // @todo There should not be a reverse call since the response is 462 // communicated through the packet pointer 463 // DPRINTF(RubyPort, "Sending packet back over port\n"); 464 // sendFunctional(pkt); 465 } 466 DPRINTF(RubyPort, "Functional access %s!\n", 467 accessSucceeded ? "successful":"failed"); 468} 469 470void 471RubyPort::ruby_hit_callback(PacketPtr pkt) 472{ 473 // Retrieve the request port from the sender State 474 RubyPort::SenderState *senderState = 475 safe_cast<RubyPort::SenderState *>(pkt->senderState); 476 M5Port *port = senderState->port; 477 assert(port != NULL); 478 479 // pop the sender state from the packet 480 pkt->senderState = senderState->saved; 481 delete senderState; 482 483 port->hitCallback(pkt); 484 485 // 486 // If we had to stall the M5Ports, wake them up because the sequencer 487 // likely has free resources now. 488 // 489 if (waitingOnSequencer) { 490 // 491 // Record the current list of ports to retry on a temporary list before 492 // calling sendRetry on those ports. sendRetry will cause an 493 // immediate retry, which may result in the ports being put back on the 494 // list. Therefore we want to clear the retryList before calling 495 // sendRetry. 496 // 497 std::list<M5Port*> curRetryList(retryList); 498 499 retryList.clear(); 500 waitingOnSequencer = false; 501 502 for (std::list<M5Port*>::iterator i = curRetryList.begin(); 503 i != curRetryList.end(); ++i) { 504 DPRINTF(RubyPort, 505 "Sequencer may now be free. SendRetry to port %s\n", 506 (*i)->name()); 507 (*i)->onRetryList(false); 508 (*i)->sendRetry(); 509 } 510 } 511 512 testDrainComplete(); 513} 514 515void 516RubyPort::testDrainComplete() 517{ 518 //If we weren't able to drain before, we might be able to now. 519 if (drainEvent != NULL) { 520 unsigned int drainCount = getDrainCount(drainEvent); 521 DPRINTF(Config, "Drain count: %u\n", drainCount); 522 if (drainCount == 0) { 523 drainEvent->process(); 524 // Clear the drain event once we're done with it. 525 drainEvent = NULL; 526 } 527 } 528} 529 530unsigned int 531RubyPort::getDrainCount(Event *de) 532{ 533 int count = 0; 534 // 535 // If the sequencer is not empty, then requests need to drain. 536 // The outstandingCount is the number of requests outstanding and thus the 537 // number of times M5's timing port will process the drain event. 538 // 539 count += outstandingCount(); 540 541 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 542 543 // To simplify the draining process, the sequencer's deadlock detection 544 // event should have been descheduled. 545 assert(isDeadlockEventScheduled() == false); 546 547 if (pio_port != NULL) { 548 count += pio_port->drain(de); 549 DPRINTF(Config, "count after pio check %d\n", count); 550 } 551 if (physMemPort != NULL) { 552 count += physMemPort->drain(de); 553 DPRINTF(Config, "count after physmem check %d\n", count); 554 } 555 556 for (CpuPortIter p_iter = cpu_ports.begin(); p_iter != cpu_ports.end(); 557 p_iter++) { 558 M5Port* cpu_port = *p_iter; 559 count += cpu_port->drain(de); 560 DPRINTF(Config, "count after cpu port check %d\n", count); 561 } 562 563 DPRINTF(Config, "final count %d\n", count); 564 565 return count; 566} 567 568unsigned int 569RubyPort::drain(Event *de) 570{ 571 if (isDeadlockEventScheduled()) { 572 descheduleDeadlockEvent(); 573 } 574 575 int count = getDrainCount(de); 576 577 // Set status 578 if (count != 0) { 579 drainEvent = de; 580 581 changeState(SimObject::Draining); 582 return count; 583 } 584 585 changeState(SimObject::Drained); 586 return 0; 587} 588 589void 590RubyPort::M5Port::hitCallback(PacketPtr pkt) 591{ 592 bool needsResponse = pkt->needsResponse(); 593 594 // 595 // Unless specified at configuraiton, all responses except failed SC 596 // and Flush operations access M5 physical memory. 597 // 598 bool accessPhysMem = access_phys_mem; 599 600 if (pkt->isLLSC()) { 601 if (pkt->isWrite()) { 602 if (pkt->req->getExtraData() != 0) { 603 // 604 // Successful SC packets convert to normal writes 605 // 606 pkt->convertScToWrite(); 607 } else { 608 // 609 // Failed SC packets don't access physical memory and thus 610 // the RubyPort itself must convert it to a response. 611 // 612 accessPhysMem = false; 613 } 614 } else { 615 // 616 // All LL packets convert to normal loads so that M5 PhysMem does 617 // not lock the blocks. 618 // 619 pkt->convertLlToRead(); 620 } 621 } 622 623 // 624 // Flush requests don't access physical memory 625 // 626 if (pkt->isFlush()) { 627 accessPhysMem = false; 628 } 629 630 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse); 631 632 if (accessPhysMem) { 633 ruby_port->physMemPort->sendAtomic(pkt); 634 } else if (needsResponse) { 635 pkt->makeResponse(); 636 } 637 638 // turn packet around to go back to requester if response expected 639 if (needsResponse) { 640 DPRINTF(RubyPort, "Sending packet back over port\n"); 641 sendTiming(pkt); 642 } else { 643 delete pkt; 644 } 645 DPRINTF(RubyPort, "Hit callback done!\n"); 646} 647 648bool 649RubyPort::M5Port::sendTiming(PacketPtr pkt) 650{ 651 //minimum latency, must be > 0 652 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 653 return true; 654} 655 656bool 657RubyPort::PioPort::sendTiming(PacketPtr pkt) 658{ 659 //minimum latency, must be > 0 660 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 661 return true; 662} 663 664bool 665RubyPort::M5Port::isPhysMemAddress(Addr addr) 666{ 667 AddrRangeList physMemAddrList = 668 ruby_port->physMemPort->getPeer()->getAddrRanges(); 669 for (AddrRangeIter iter = physMemAddrList.begin(); 670 iter != physMemAddrList.end(); 671 iter++) { 672 if (addr >= iter->start && addr <= iter->end) { 673 DPRINTF(RubyPort, "Request found in %#llx - %#llx range\n", 674 iter->start, iter->end); 675 return true; 676 } 677 } 678 return false; 679} 680 681unsigned 682RubyPort::M5Port::deviceBlockSize() const 683{ 684 return (unsigned) RubySystem::getBlockSizeBytes(); 685} 686 687void 688RubyPort::ruby_eviction_callback(const Address& address) 689{ 690 DPRINTF(RubyPort, "Sending invalidations.\n"); 691 Request req(address.getAddress(), 0, 0); 692 for (CpuPortIter it = cpu_ports.begin(); it != cpu_ports.end(); it++) { 693 Packet *pkt = new Packet(&req, MemCmd::InvalidationReq, -1); 694 (*it)->sendTiming(pkt); 695 } 696} 697