RubyPort.cc revision 12395:322bb93e5f06
1/* 2 * Copyright (c) 2012-2013 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2009-2013 Advanced Micro Devices, Inc. 15 * Copyright (c) 2011 Mark D. Hill and David A. Wood 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42#include "mem/ruby/system/RubyPort.hh" 43 44#include "cpu/testers/rubytest/RubyTester.hh" 45#include "debug/Config.hh" 46#include "debug/Drain.hh" 47#include "debug/Ruby.hh" 48#include "mem/protocol/AccessPermission.hh" 49#include "mem/ruby/slicc_interface/AbstractController.hh" 50#include "mem/simple_mem.hh" 51#include "sim/full_system.hh" 52#include "sim/system.hh" 53 54RubyPort::RubyPort(const Params *p) 55 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version), 56 m_controller(NULL), m_mandatory_q_ptr(NULL), 57 m_usingRubyTester(p->using_ruby_tester), system(p->system), 58 pioMasterPort(csprintf("%s.pio-master-port", name()), this), 59 pioSlavePort(csprintf("%s.pio-slave-port", name()), this), 60 memMasterPort(csprintf("%s.mem-master-port", name()), this), 61 memSlavePort(csprintf("%s-mem-slave-port", name()), this, 62 p->ruby_system->getAccessBackingStore(), -1, 63 p->no_retry_on_stall), 64 gotAddrRanges(p->port_master_connection_count), 65 m_isCPUSequencer(p->is_cpu_sequencer) 66{ 67 assert(m_version != -1); 68 69 // create the slave ports based on the number of connected ports 70 for (size_t i = 0; i < p->port_slave_connection_count; ++i) { 71 slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(), 72 i), this, p->ruby_system->getAccessBackingStore(), 73 i, p->no_retry_on_stall)); 74 } 75 76 // create the master ports based on the number of connected ports 77 for (size_t i = 0; i < p->port_master_connection_count; ++i) { 78 master_ports.push_back(new PioMasterPort(csprintf("%s.master%d", 79 name(), i), this)); 80 } 81} 82 83void 84RubyPort::init() 85{ 86 assert(m_controller != NULL); 87 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 88} 89 90BaseMasterPort & 91RubyPort::getMasterPort(const std::string &if_name, PortID idx) 92{ 93 if (if_name == "mem_master_port") { 94 return memMasterPort; 95 } 96 97 if (if_name == "pio_master_port") { 98 return pioMasterPort; 99 } 100 101 // used by the x86 CPUs to connect the interrupt PIO and interrupt slave 102 // port 103 if (if_name != "master") { 104 // pass it along to our super class 105 return MemObject::getMasterPort(if_name, idx); 106 } else { 107 if (idx >= static_cast<PortID>(master_ports.size())) { 108 panic("RubyPort::getMasterPort: unknown index %d\n", idx); 109 } 110 111 return *master_ports[idx]; 112 } 113} 114 115BaseSlavePort & 116RubyPort::getSlavePort(const std::string &if_name, PortID idx) 117{ 118 if (if_name == "mem_slave_port") { 119 return memSlavePort; 120 } 121 122 if (if_name == "pio_slave_port") 123 return pioSlavePort; 124 125 // used by the CPUs to connect the caches to the interconnect, and 126 // for the x86 case also the interrupt master 127 if (if_name != "slave") { 128 // pass it along to our super class 129 return MemObject::getSlavePort(if_name, idx); 130 } else { 131 if (idx >= static_cast<PortID>(slave_ports.size())) { 132 panic("RubyPort::getSlavePort: unknown index %d\n", idx); 133 } 134 135 return *slave_ports[idx]; 136 } 137} 138 139RubyPort::PioMasterPort::PioMasterPort(const std::string &_name, 140 RubyPort *_port) 141 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), 142 reqQueue(*_port, *this), snoopRespQueue(*_port, *this) 143{ 144 DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name); 145} 146 147RubyPort::PioSlavePort::PioSlavePort(const std::string &_name, 148 RubyPort *_port) 149 : QueuedSlavePort(_name, _port, queue), queue(*_port, *this) 150{ 151 DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name); 152} 153 154RubyPort::MemMasterPort::MemMasterPort(const std::string &_name, 155 RubyPort *_port) 156 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), 157 reqQueue(*_port, *this), snoopRespQueue(*_port, *this) 158{ 159 DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name); 160} 161 162RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port, 163 bool _access_backing_store, PortID id, 164 bool _no_retry_on_stall) 165 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), 166 access_backing_store(_access_backing_store), 167 no_retry_on_stall(_no_retry_on_stall) 168{ 169 DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name); 170} 171 172bool 173RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt) 174{ 175 RubyPort *rp = static_cast<RubyPort *>(&owner); 176 DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr()); 177 178 // send next cycle 179 rp->pioSlavePort.schedTimingResp( 180 pkt, curTick() + rp->m_ruby_system->clockPeriod()); 181 return true; 182} 183 184bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt) 185{ 186 // got a response from a device 187 assert(pkt->isResponse()); 188 189 // First we must retrieve the request port from the sender State 190 RubyPort::SenderState *senderState = 191 safe_cast<RubyPort::SenderState *>(pkt->popSenderState()); 192 MemSlavePort *port = senderState->port; 193 assert(port != NULL); 194 delete senderState; 195 196 // In FS mode, ruby memory will receive pio responses from devices 197 // and it must forward these responses back to the particular CPU. 198 DPRINTF(RubyPort, "Pio response for address %#x, going to %s\n", 199 pkt->getAddr(), port->name()); 200 201 // attempt to send the response in the next cycle 202 RubyPort *rp = static_cast<RubyPort *>(&owner); 203 port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod()); 204 205 return true; 206} 207 208bool 209RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt) 210{ 211 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 212 213 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { 214 AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges(); 215 for (auto it = l.begin(); it != l.end(); ++it) { 216 if (it->contains(pkt->getAddr())) { 217 // generally it is not safe to assume success here as 218 // the port could be blocked 219 bool M5_VAR_USED success = 220 ruby_port->master_ports[i]->sendTimingReq(pkt); 221 assert(success); 222 return true; 223 } 224 } 225 } 226 panic("Should never reach here!\n"); 227} 228 229Tick 230RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt) 231{ 232 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 233 // Only atomic_noncaching mode supported! 234 if (!ruby_port->system->bypassCaches()) { 235 panic("Ruby supports atomic accesses only in noncaching mode\n"); 236 } 237 238 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { 239 AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges(); 240 for (auto it = l.begin(); it != l.end(); ++it) { 241 if (it->contains(pkt->getAddr())) { 242 return ruby_port->master_ports[i]->sendAtomic(pkt); 243 } 244 } 245 } 246 panic("Could not find address in Ruby PIO address ranges!\n"); 247} 248 249bool 250RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt) 251{ 252 DPRINTF(RubyPort, "Timing request for address %#x on port %d\n", 253 pkt->getAddr(), id); 254 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 255 256 if (pkt->cacheResponding()) 257 panic("RubyPort should never see request with the " 258 "cacheResponding flag set\n"); 259 260 // ruby doesn't support cache maintenance operations at the 261 // moment, as a workaround, we respond right away 262 if (pkt->req->isCacheMaintenance()) { 263 warn_once("Cache maintenance operations are not supported in Ruby.\n"); 264 pkt->makeResponse(); 265 schedTimingResp(pkt, curTick()); 266 return true; 267 } 268 // Check for pio requests and directly send them to the dedicated 269 // pio port. 270 if (pkt->cmd != MemCmd::MemFenceReq) { 271 if (!isPhysMemAddress(pkt->getAddr())) { 272 assert(ruby_port->memMasterPort.isConnected()); 273 DPRINTF(RubyPort, "Request address %#x assumed to be a " 274 "pio address\n", pkt->getAddr()); 275 276 // Save the port in the sender state object to be used later to 277 // route the response 278 pkt->pushSenderState(new SenderState(this)); 279 280 // send next cycle 281 RubySystem *rs = ruby_port->m_ruby_system; 282 ruby_port->memMasterPort.schedTimingReq(pkt, 283 curTick() + rs->clockPeriod()); 284 return true; 285 } 286 287 assert(getOffset(pkt->getAddr()) + pkt->getSize() <= 288 RubySystem::getBlockSizeBytes()); 289 } 290 291 // Submit the ruby request 292 RequestStatus requestStatus = ruby_port->makeRequest(pkt); 293 294 // If the request successfully issued then we should return true. 295 // Otherwise, we need to tell the port to retry at a later point 296 // and return false. 297 if (requestStatus == RequestStatus_Issued) { 298 // Save the port in the sender state object to be used later to 299 // route the response 300 pkt->pushSenderState(new SenderState(this)); 301 302 DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(), 303 pkt->getAddr()); 304 return true; 305 } 306 307 if (pkt->cmd != MemCmd::MemFenceReq) { 308 DPRINTF(RubyPort, 309 "Request for address %#x did not issued because %s\n", 310 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 311 } 312 313 addToRetryList(); 314 315 return false; 316} 317 318Tick 319RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt) 320{ 321 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 322 // Only atomic_noncaching mode supported! 323 if (!ruby_port->system->bypassCaches()) { 324 panic("Ruby supports atomic accesses only in noncaching mode\n"); 325 } 326 327 // Check for pio requests and directly send them to the dedicated 328 // pio port. 329 if (pkt->cmd != MemCmd::MemFenceReq) { 330 if (!isPhysMemAddress(pkt->getAddr())) { 331 assert(ruby_port->memMasterPort.isConnected()); 332 DPRINTF(RubyPort, "Request address %#x assumed to be a " 333 "pio address\n", pkt->getAddr()); 334 335 // Save the port in the sender state object to be used later to 336 // route the response 337 pkt->pushSenderState(new SenderState(this)); 338 339 // send next cycle 340 Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt); 341 return ruby_port->ticksToCycles(req_ticks); 342 } 343 344 assert(getOffset(pkt->getAddr()) + pkt->getSize() <= 345 RubySystem::getBlockSizeBytes()); 346 } 347 348 // Find appropriate directory for address 349 // This assumes that protocols have a Directory machine, 350 // which has its memPort hooked up to memory. This can 351 // fail for some custom protocols. 352 MachineID id = ruby_port->m_controller->mapAddressToMachine( 353 pkt->getAddr(), MachineType_Directory); 354 RubySystem *rs = ruby_port->m_ruby_system; 355 AbstractController *directory = 356 rs->m_abstract_controls[id.getType()][id.getNum()]; 357 return directory->recvAtomic(pkt); 358} 359 360void 361RubyPort::MemSlavePort::addToRetryList() 362{ 363 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 364 365 // 366 // Unless the requestor do not want retries (e.g., the Ruby tester), 367 // record the stalled M5 port for later retry when the sequencer 368 // becomes free. 369 // 370 if (!no_retry_on_stall && !ruby_port->onRetryList(this)) { 371 ruby_port->addToRetryList(this); 372 } 373} 374 375void 376RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt) 377{ 378 DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr()); 379 380 RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner); 381 RubySystem *rs = rp->m_ruby_system; 382 383 // Check for pio requests and directly send them to the dedicated 384 // pio port. 385 if (!isPhysMemAddress(pkt->getAddr())) { 386 DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr()); 387 assert(rp->pioMasterPort.isConnected()); 388 rp->pioMasterPort.sendFunctional(pkt); 389 return; 390 } 391 392 assert(pkt->getAddr() + pkt->getSize() <= 393 makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes()); 394 395 if (access_backing_store) { 396 // The attached physmem contains the official version of data. 397 // The following command performs the real functional access. 398 // This line should be removed once Ruby supplies the official version 399 // of data. 400 rs->getPhysMem()->functionalAccess(pkt); 401 } else { 402 bool accessSucceeded = false; 403 bool needsResponse = pkt->needsResponse(); 404 405 // Do the functional access on ruby memory 406 if (pkt->isRead()) { 407 accessSucceeded = rs->functionalRead(pkt); 408 } else if (pkt->isWrite()) { 409 accessSucceeded = rs->functionalWrite(pkt); 410 } else { 411 panic("Unsupported functional command %s\n", pkt->cmdString()); 412 } 413 414 // Unless the requester explicitly said otherwise, generate an error if 415 // the functional request failed 416 if (!accessSucceeded && !pkt->suppressFuncError()) { 417 fatal("Ruby functional %s failed for address %#x\n", 418 pkt->isWrite() ? "write" : "read", pkt->getAddr()); 419 } 420 421 // turn packet around to go back to requester if response expected 422 if (needsResponse) { 423 pkt->setFunctionalResponseStatus(accessSucceeded); 424 } 425 426 DPRINTF(RubyPort, "Functional access %s!\n", 427 accessSucceeded ? "successful":"failed"); 428 } 429} 430 431void 432RubyPort::ruby_hit_callback(PacketPtr pkt) 433{ 434 DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(), 435 pkt->getAddr()); 436 437 // The packet was destined for memory and has not yet been turned 438 // into a response 439 assert(system->isMemAddr(pkt->getAddr())); 440 assert(pkt->isRequest()); 441 442 // First we must retrieve the request port from the sender State 443 RubyPort::SenderState *senderState = 444 safe_cast<RubyPort::SenderState *>(pkt->popSenderState()); 445 MemSlavePort *port = senderState->port; 446 assert(port != NULL); 447 delete senderState; 448 449 port->hitCallback(pkt); 450 451 trySendRetries(); 452} 453 454void 455RubyPort::trySendRetries() 456{ 457 // 458 // If we had to stall the MemSlavePorts, wake them up because the sequencer 459 // likely has free resources now. 460 // 461 if (!retryList.empty()) { 462 // Record the current list of ports to retry on a temporary list 463 // before calling sendRetryReq on those ports. sendRetryReq will cause 464 // an immediate retry, which may result in the ports being put back on 465 // the list. Therefore we want to clear the retryList before calling 466 // sendRetryReq. 467 std::vector<MemSlavePort *> curRetryList(retryList); 468 469 retryList.clear(); 470 471 for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) { 472 DPRINTF(RubyPort, 473 "Sequencer may now be free. SendRetry to port %s\n", 474 (*i)->name()); 475 (*i)->sendRetryReq(); 476 } 477 } 478} 479 480void 481RubyPort::testDrainComplete() 482{ 483 //If we weren't able to drain before, we might be able to now. 484 if (drainState() == DrainState::Draining) { 485 unsigned int drainCount = outstandingCount(); 486 DPRINTF(Drain, "Drain count: %u\n", drainCount); 487 if (drainCount == 0) { 488 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n"); 489 signalDrainDone(); 490 } 491 } 492} 493 494DrainState 495RubyPort::drain() 496{ 497 if (isDeadlockEventScheduled()) { 498 descheduleDeadlockEvent(); 499 } 500 501 // 502 // If the RubyPort is not empty, then it needs to clear all outstanding 503 // requests before it should call signalDrainDone() 504 // 505 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 506 if (outstandingCount() > 0) { 507 DPRINTF(Drain, "RubyPort not drained\n"); 508 return DrainState::Draining; 509 } else { 510 return DrainState::Drained; 511 } 512} 513 514void 515RubyPort::MemSlavePort::hitCallback(PacketPtr pkt) 516{ 517 bool needsResponse = pkt->needsResponse(); 518 519 // Unless specified at configuraiton, all responses except failed SC 520 // and Flush operations access M5 physical memory. 521 bool accessPhysMem = access_backing_store; 522 523 if (pkt->isLLSC()) { 524 if (pkt->isWrite()) { 525 if (pkt->req->getExtraData() != 0) { 526 // 527 // Successful SC packets convert to normal writes 528 // 529 pkt->convertScToWrite(); 530 } else { 531 // 532 // Failed SC packets don't access physical memory and thus 533 // the RubyPort itself must convert it to a response. 534 // 535 accessPhysMem = false; 536 } 537 } else { 538 // 539 // All LL packets convert to normal loads so that M5 PhysMem does 540 // not lock the blocks. 541 // 542 pkt->convertLlToRead(); 543 } 544 } 545 546 // Flush, acquire, release requests don't access physical memory 547 if (pkt->isFlush() || pkt->cmd == MemCmd::MemFenceReq) { 548 accessPhysMem = false; 549 } 550 551 if (pkt->req->isKernel()) { 552 accessPhysMem = false; 553 needsResponse = true; 554 } 555 556 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse); 557 558 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 559 RubySystem *rs = ruby_port->m_ruby_system; 560 if (accessPhysMem) { 561 rs->getPhysMem()->access(pkt); 562 } else if (needsResponse) { 563 pkt->makeResponse(); 564 } 565 566 // turn packet around to go back to requester if response expected 567 if (needsResponse) { 568 DPRINTF(RubyPort, "Sending packet back over port\n"); 569 // Send a response in the same cycle. There is no need to delay the 570 // response because the response latency is already incurred in the 571 // Ruby protocol. 572 schedTimingResp(pkt, curTick()); 573 } else { 574 delete pkt; 575 } 576 577 DPRINTF(RubyPort, "Hit callback done!\n"); 578} 579 580AddrRangeList 581RubyPort::PioSlavePort::getAddrRanges() const 582{ 583 // at the moment the assumption is that the master does not care 584 AddrRangeList ranges; 585 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 586 587 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { 588 ranges.splice(ranges.begin(), 589 ruby_port->master_ports[i]->getAddrRanges()); 590 } 591 for (const auto M5_VAR_USED &r : ranges) 592 DPRINTF(RubyPort, "%s\n", r.to_string()); 593 return ranges; 594} 595 596bool 597RubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const 598{ 599 RubyPort *ruby_port = static_cast<RubyPort *>(&owner); 600 return ruby_port->system->isMemAddr(addr); 601} 602 603void 604RubyPort::ruby_eviction_callback(Addr address) 605{ 606 DPRINTF(RubyPort, "Sending invalidations.\n"); 607 // Allocate the invalidate request and packet on the stack, as it is 608 // assumed they will not be modified or deleted by receivers. 609 // TODO: should this really be using funcMasterId? 610 Request request(address, RubySystem::getBlockSizeBytes(), 0, 611 Request::funcMasterId); 612 // Use a single packet to signal all snooping ports of the invalidation. 613 // This assumes that snooping ports do NOT modify the packet/request 614 Packet pkt(&request, MemCmd::InvalidateReq); 615 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) { 616 // check if the connected master port is snooping 617 if ((*p)->isSnooping()) { 618 // send as a snoop request 619 (*p)->sendTimingSnoopReq(&pkt); 620 } 621 } 622} 623 624void 625RubyPort::PioMasterPort::recvRangeChange() 626{ 627 RubyPort &r = static_cast<RubyPort &>(owner); 628 r.gotAddrRanges--; 629 if (r.gotAddrRanges == 0 && FullSystem) { 630 r.pioSlavePort.sendRangeChange(); 631 } 632} 633