RubyPort.cc revision 8232
1/* 2 * Copyright (c) 2009 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "config/the_isa.hh" 30#if THE_ISA == X86_ISA 31#include "arch/x86/insts/microldstop.hh" 32#endif // X86_ISA 33#include "cpu/testers/rubytest/RubyTester.hh" 34#include "debug/MemoryAccess.hh" 35#include "debug/Ruby.hh" 36#include "mem/ruby/slicc_interface/AbstractController.hh" 37#include "mem/ruby/system/RubyPort.hh" 38#include "mem/physical.hh" 39 40RubyPort::RubyPort(const Params *p) 41 : MemObject(p) 42{ 43 m_version = p->version; 44 assert(m_version != -1); 45 46 physmem = p->physmem; 47 48 m_controller = NULL; 49 m_mandatory_q_ptr = NULL; 50 51 m_request_cnt = 0; 52 pio_port = NULL; 53 physMemPort = NULL; 54 55 m_usingRubyTester = p->using_ruby_tester; 56 access_phys_mem = p->access_phys_mem; 57} 58 59void 60RubyPort::init() 61{ 62 assert(m_controller != NULL); 63 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 64} 65 66Port * 67RubyPort::getPort(const std::string &if_name, int idx) 68{ 69 if (if_name == "port") { 70 return new M5Port(csprintf("%s-port%d", name(), idx), this, 71 access_phys_mem); 72 } 73 74 if (if_name == "pio_port") { 75 // ensure there is only one pio port 76 assert(pio_port == NULL); 77 78 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 79 80 return pio_port; 81 } 82 83 if (if_name == "physMemPort") { 84 // RubyPort should only have one port to physical memory 85 assert (physMemPort == NULL); 86 87 physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this, 88 access_phys_mem); 89 90 return physMemPort; 91 } 92 93 if (if_name == "functional") { 94 // Calls for the functional port only want to access 95 // functional memory. Therefore, directly pass these calls 96 // ports to physmem. 97 assert(physmem != NULL); 98 return physmem->getPort(if_name, idx); 99 } 100 101 return NULL; 102} 103 104RubyPort::PioPort::PioPort(const std::string &_name, 105 RubyPort *_port) 106 : SimpleTimingPort(_name, _port) 107{ 108 DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name); 109 ruby_port = _port; 110} 111 112RubyPort::M5Port::M5Port(const std::string &_name, 113 RubyPort *_port, bool _access_phys_mem) 114 : SimpleTimingPort(_name, _port) 115{ 116 DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name); 117 ruby_port = _port; 118 _onRetryList = false; 119 access_phys_mem = _access_phys_mem; 120} 121 122Tick 123RubyPort::PioPort::recvAtomic(PacketPtr pkt) 124{ 125 panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 126 return 0; 127} 128 129Tick 130RubyPort::M5Port::recvAtomic(PacketPtr pkt) 131{ 132 panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 133 return 0; 134} 135 136 137bool 138RubyPort::PioPort::recvTiming(PacketPtr pkt) 139{ 140 // In FS mode, ruby memory will receive pio responses from devices 141 // and it must forward these responses back to the particular CPU. 142 DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr()); 143 144 assert(pkt->isResponse()); 145 146 // First we must retrieve the request port from the sender State 147 RubyPort::SenderState *senderState = 148 safe_cast<RubyPort::SenderState *>(pkt->senderState); 149 M5Port *port = senderState->port; 150 assert(port != NULL); 151 152 // pop the sender state from the packet 153 pkt->senderState = senderState->saved; 154 delete senderState; 155 156 port->sendTiming(pkt); 157 158 return true; 159} 160 161bool 162RubyPort::M5Port::recvTiming(PacketPtr pkt) 163{ 164 DPRINTF(RubyPort, 165 "Timing access caught for address %#x\n", pkt->getAddr()); 166 167 //dsm: based on SimpleTimingPort::recvTiming(pkt); 168 169 // The received packets should only be M5 requests, which should never 170 // get nacked. There used to be code to hanldle nacks here, but 171 // I'm pretty sure it didn't work correctly with the drain code, 172 // so that would need to be fixed if we ever added it back. 173 assert(pkt->isRequest()); 174 175 if (pkt->memInhibitAsserted()) { 176 warn("memInhibitAsserted???"); 177 // snooper will supply based on copy of packet 178 // still target's responsibility to delete packet 179 delete pkt; 180 return true; 181 } 182 183 // Save the port in the sender state object to be used later to 184 // route the response 185 pkt->senderState = new SenderState(this, pkt->senderState); 186 187 // Check for pio requests and directly send them to the dedicated 188 // pio port. 189 if (!isPhysMemAddress(pkt->getAddr())) { 190 assert(ruby_port->pio_port != NULL); 191 DPRINTF(RubyPort, 192 "Request for address 0x%#x is assumed to be a pio request\n", 193 pkt->getAddr()); 194 195 return ruby_port->pio_port->sendTiming(pkt); 196 } 197 198 // For DMA and CPU requests, translate them to ruby requests before 199 // sending them to our assigned ruby port. 200 RubyRequestType type = RubyRequestType_NULL; 201 202 // If valid, copy the pc to the ruby request 203 Addr pc = 0; 204 if (pkt->req->hasPC()) { 205 pc = pkt->req->getPC(); 206 } 207 208 if (pkt->isLLSC()) { 209 if (pkt->isWrite()) { 210 DPRINTF(RubyPort, "Issuing SC\n"); 211 type = RubyRequestType_Store_Conditional; 212 } else { 213 DPRINTF(RubyPort, "Issuing LL\n"); 214 assert(pkt->isRead()); 215 type = RubyRequestType_Load_Linked; 216 } 217 } else if (pkt->req->isLocked()) { 218 if (pkt->isWrite()) { 219 DPRINTF(RubyPort, "Issuing Locked RMW Write\n"); 220 type = RubyRequestType_Locked_RMW_Write; 221 } else { 222 DPRINTF(RubyPort, "Issuing Locked RMW Read\n"); 223 assert(pkt->isRead()); 224 type = RubyRequestType_Locked_RMW_Read; 225 } 226 } else { 227 if (pkt->isRead()) { 228 if (pkt->req->isInstFetch()) { 229 type = RubyRequestType_IFETCH; 230 } else { 231#if THE_ISA == X86_ISA 232 uint32_t flags = pkt->req->getFlags(); 233 bool storeCheck = flags & 234 (TheISA::StoreCheck << TheISA::FlagShift); 235#else 236 bool storeCheck = false; 237#endif // X86_ISA 238 if (storeCheck) { 239 type = RubyRequestType_RMW_Read; 240 } else { 241 type = RubyRequestType_LD; 242 } 243 } 244 } else if (pkt->isWrite()) { 245 // 246 // Note: M5 packets do not differentiate ST from RMW_Write 247 // 248 type = RubyRequestType_ST; 249 } else if (pkt->isFlush()) { 250 type = RubyRequestType_FLUSH; 251 } else { 252 panic("Unsupported ruby packet type\n"); 253 } 254 } 255 256 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(true), 257 pkt->getSize(), pc, type, 258 RubyAccessMode_Supervisor, pkt); 259 260 assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <= 261 RubySystem::getBlockSizeBytes()); 262 263 // Submit the ruby request 264 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 265 266 // If the request successfully issued then we should return true. 267 // Otherwise, we need to delete the senderStatus we just created and return 268 // false. 269 if (requestStatus == RequestStatus_Issued) { 270 DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr()); 271 return true; 272 } 273 274 // 275 // Unless one is using the ruby tester, record the stalled M5 port for 276 // later retry when the sequencer becomes free. 277 // 278 if (!ruby_port->m_usingRubyTester) { 279 ruby_port->addToRetryList(this); 280 } 281 282 DPRINTF(RubyPort, 283 "Request for address %#x did not issue because %s\n", 284 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 285 286 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 287 pkt->senderState = senderState->saved; 288 delete senderState; 289 return false; 290} 291 292void 293RubyPort::ruby_hit_callback(PacketPtr pkt) 294{ 295 // Retrieve the request port from the sender State 296 RubyPort::SenderState *senderState = 297 safe_cast<RubyPort::SenderState *>(pkt->senderState); 298 M5Port *port = senderState->port; 299 assert(port != NULL); 300 301 // pop the sender state from the packet 302 pkt->senderState = senderState->saved; 303 delete senderState; 304 305 port->hitCallback(pkt); 306 307 // 308 // If we had to stall the M5Ports, wake them up because the sequencer 309 // likely has free resources now. 310 // 311 if (waitingOnSequencer) { 312 // 313 // Record the current list of ports to retry on a temporary list before 314 // calling sendRetry on those ports. sendRetry will cause an 315 // immediate retry, which may result in the ports being put back on the 316 // list. Therefore we want to clear the retryList before calling 317 // sendRetry. 318 // 319 std::list<M5Port*> curRetryList(retryList); 320 321 retryList.clear(); 322 waitingOnSequencer = false; 323 324 for (std::list<M5Port*>::iterator i = curRetryList.begin(); 325 i != curRetryList.end(); ++i) { 326 DPRINTF(RubyPort, 327 "Sequencer may now be free. SendRetry to port %s\n", 328 (*i)->name()); 329 (*i)->onRetryList(false); 330 (*i)->sendRetry(); 331 } 332 } 333} 334 335void 336RubyPort::M5Port::hitCallback(PacketPtr pkt) 337{ 338 bool needsResponse = pkt->needsResponse(); 339 340 // 341 // Unless specified at configuraiton, all responses except failed SC 342 // and Flush operations access M5 physical memory. 343 // 344 bool accessPhysMem = access_phys_mem; 345 346 if (pkt->isLLSC()) { 347 if (pkt->isWrite()) { 348 if (pkt->req->getExtraData() != 0) { 349 // 350 // Successful SC packets convert to normal writes 351 // 352 pkt->convertScToWrite(); 353 } else { 354 // 355 // Failed SC packets don't access physical memory and thus 356 // the RubyPort itself must convert it to a response. 357 // 358 accessPhysMem = false; 359 } 360 } else { 361 // 362 // All LL packets convert to normal loads so that M5 PhysMem does 363 // not lock the blocks. 364 // 365 pkt->convertLlToRead(); 366 } 367 } 368 369 // 370 // Flush requests don't access physical memory 371 // 372 if (pkt->isFlush()) { 373 accessPhysMem = false; 374 } 375 376 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse); 377 378 if (accessPhysMem) { 379 ruby_port->physMemPort->sendAtomic(pkt); 380 } else if (needsResponse) { 381 pkt->makeResponse(); 382 } 383 384 // turn packet around to go back to requester if response expected 385 if (needsResponse) { 386 DPRINTF(RubyPort, "Sending packet back over port\n"); 387 sendTiming(pkt); 388 } else { 389 delete pkt; 390 } 391 DPRINTF(RubyPort, "Hit callback done!\n"); 392} 393 394bool 395RubyPort::M5Port::sendTiming(PacketPtr pkt) 396{ 397 //minimum latency, must be > 0 398 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 399 return true; 400} 401 402bool 403RubyPort::PioPort::sendTiming(PacketPtr pkt) 404{ 405 //minimum latency, must be > 0 406 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 407 return true; 408} 409 410bool 411RubyPort::M5Port::isPhysMemAddress(Addr addr) 412{ 413 AddrRangeList physMemAddrList; 414 bool snoop = false; 415 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 416 for (AddrRangeIter iter = physMemAddrList.begin(); 417 iter != physMemAddrList.end(); 418 iter++) { 419 if (addr >= iter->start && addr <= iter->end) { 420 DPRINTF(RubyPort, "Request found in %#llx - %#llx range\n", 421 iter->start, iter->end); 422 return true; 423 } 424 } 425 return false; 426} 427 428unsigned 429RubyPort::M5Port::deviceBlockSize() const 430{ 431 return (unsigned) RubySystem::getBlockSizeBytes(); 432} 433