RubyPort.cc revision 7910:8a92b39be50e
1/* 2 * Copyright (c) 2009 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "config/the_isa.hh" 30#if THE_ISA == X86_ISA 31#include "arch/x86/insts/microldstop.hh" 32#endif // X86_ISA 33#include "cpu/testers/rubytest/RubyTester.hh" 34#include "mem/physical.hh" 35#include "mem/ruby/slicc_interface/AbstractController.hh" 36#include "mem/ruby/system/RubyPort.hh" 37 38RubyPort::RubyPort(const Params *p) 39 : MemObject(p) 40{ 41 m_version = p->version; 42 assert(m_version != -1); 43 44 physmem = p->physmem; 45 46 m_controller = NULL; 47 m_mandatory_q_ptr = NULL; 48 49 m_request_cnt = 0; 50 pio_port = NULL; 51 physMemPort = NULL; 52 53 m_usingRubyTester = p->using_ruby_tester; 54} 55 56void 57RubyPort::init() 58{ 59 assert(m_controller != NULL); 60 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 61} 62 63Port * 64RubyPort::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "port") { 67 return new M5Port(csprintf("%s-port%d", name(), idx), this); 68 } 69 70 if (if_name == "pio_port") { 71 // ensure there is only one pio port 72 assert(pio_port == NULL); 73 74 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 75 76 return pio_port; 77 } 78 79 if (if_name == "physMemPort") { 80 // RubyPort should only have one port to physical memory 81 assert (physMemPort == NULL); 82 83 physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this); 84 85 return physMemPort; 86 } 87 88 if (if_name == "functional") { 89 // Calls for the functional port only want to access 90 // functional memory. Therefore, directly pass these calls 91 // ports to physmem. 92 assert(physmem != NULL); 93 return physmem->getPort(if_name, idx); 94 } 95 96 return NULL; 97} 98 99RubyPort::PioPort::PioPort(const std::string &_name, 100 RubyPort *_port) 101 : SimpleTimingPort(_name, _port) 102{ 103 DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name); 104 ruby_port = _port; 105} 106 107RubyPort::M5Port::M5Port(const std::string &_name, 108 RubyPort *_port) 109 : SimpleTimingPort(_name, _port) 110{ 111 DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name); 112 ruby_port = _port; 113 _onRetryList = false; 114} 115 116Tick 117RubyPort::PioPort::recvAtomic(PacketPtr pkt) 118{ 119 panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 120 return 0; 121} 122 123Tick 124RubyPort::M5Port::recvAtomic(PacketPtr pkt) 125{ 126 panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 127 return 0; 128} 129 130 131bool 132RubyPort::PioPort::recvTiming(PacketPtr pkt) 133{ 134 // In FS mode, ruby memory will receive pio responses from devices 135 // and it must forward these responses back to the particular CPU. 136 DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr()); 137 138 assert(pkt->isResponse()); 139 140 // First we must retrieve the request port from the sender State 141 RubyPort::SenderState *senderState = 142 safe_cast<RubyPort::SenderState *>(pkt->senderState); 143 M5Port *port = senderState->port; 144 assert(port != NULL); 145 146 // pop the sender state from the packet 147 pkt->senderState = senderState->saved; 148 delete senderState; 149 150 port->sendTiming(pkt); 151 152 return true; 153} 154 155bool 156RubyPort::M5Port::recvTiming(PacketPtr pkt) 157{ 158 DPRINTF(MemoryAccess, 159 "Timing access caught for address %#x\n", pkt->getAddr()); 160 161 //dsm: based on SimpleTimingPort::recvTiming(pkt); 162 163 // The received packets should only be M5 requests, which should never 164 // get nacked. There used to be code to hanldle nacks here, but 165 // I'm pretty sure it didn't work correctly with the drain code, 166 // so that would need to be fixed if we ever added it back. 167 assert(pkt->isRequest()); 168 169 if (pkt->memInhibitAsserted()) { 170 warn("memInhibitAsserted???"); 171 // snooper will supply based on copy of packet 172 // still target's responsibility to delete packet 173 delete pkt; 174 return true; 175 } 176 177 // Save the port in the sender state object to be used later to 178 // route the response 179 pkt->senderState = new SenderState(this, pkt->senderState); 180 181 // Check for pio requests and directly send them to the dedicated 182 // pio port. 183 if (!isPhysMemAddress(pkt->getAddr())) { 184 assert(ruby_port->pio_port != NULL); 185 DPRINTF(MemoryAccess, 186 "Request for address 0x%#x is assumed to be a pio request\n", 187 pkt->getAddr()); 188 189 return ruby_port->pio_port->sendTiming(pkt); 190 } 191 192 // For DMA and CPU requests, translate them to ruby requests before 193 // sending them to our assigned ruby port. 194 RubyRequestType type = RubyRequestType_NULL; 195 196 // If valid, copy the pc to the ruby request 197 Addr pc = 0; 198 if (pkt->req->hasPC()) { 199 pc = pkt->req->getPC(); 200 } 201 202 if (pkt->isLLSC()) { 203 if (pkt->isWrite()) { 204 DPRINTF(MemoryAccess, "Issuing SC\n"); 205 type = RubyRequestType_Store_Conditional; 206 } else { 207 DPRINTF(MemoryAccess, "Issuing LL\n"); 208 assert(pkt->isRead()); 209 type = RubyRequestType_Load_Linked; 210 } 211 } else if (pkt->req->isLocked()) { 212 if (pkt->isWrite()) { 213 DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n"); 214 type = RubyRequestType_Locked_RMW_Write; 215 } else { 216 DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n"); 217 assert(pkt->isRead()); 218 type = RubyRequestType_Locked_RMW_Read; 219 } 220 } else { 221 if (pkt->isRead()) { 222 if (pkt->req->isInstFetch()) { 223 type = RubyRequestType_IFETCH; 224 } else { 225#if THE_ISA == X86_ISA 226 uint32_t flags = pkt->req->getFlags(); 227 bool storeCheck = flags & 228 (TheISA::StoreCheck << TheISA::FlagShift); 229#else 230 bool storeCheck = false; 231#endif // X86_ISA 232 if (storeCheck) { 233 type = RubyRequestType_RMW_Read; 234 } else { 235 type = RubyRequestType_LD; 236 } 237 } 238 } else if (pkt->isWrite()) { 239 // 240 // Note: M5 packets do not differentiate ST from RMW_Write 241 // 242 type = RubyRequestType_ST; 243 } else { 244 panic("Unsupported ruby packet type\n"); 245 } 246 } 247 248 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(), 249 pkt->getSize(), pc, type, 250 RubyAccessMode_Supervisor, pkt); 251 252 assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <= 253 RubySystem::getBlockSizeBytes()); 254 255 // Submit the ruby request 256 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 257 258 // If the request successfully issued then we should return true. 259 // Otherwise, we need to delete the senderStatus we just created and return 260 // false. 261 if (requestStatus == RequestStatus_Issued) { 262 DPRINTF(MemoryAccess, "Request %x issued\n", pkt->getAddr()); 263 return true; 264 } 265 266 // 267 // Unless one is using the ruby tester, record the stalled M5 port for 268 // later retry when the sequencer becomes free. 269 // 270 if (!ruby_port->m_usingRubyTester) { 271 ruby_port->addToRetryList(this); 272 } 273 274 DPRINTF(MemoryAccess, 275 "Request for address %#x did not issue because %s\n", 276 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 277 278 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 279 pkt->senderState = senderState->saved; 280 delete senderState; 281 return false; 282} 283 284void 285RubyPort::ruby_hit_callback(PacketPtr pkt) 286{ 287 // Retrieve the request port from the sender State 288 RubyPort::SenderState *senderState = 289 safe_cast<RubyPort::SenderState *>(pkt->senderState); 290 M5Port *port = senderState->port; 291 assert(port != NULL); 292 293 // pop the sender state from the packet 294 pkt->senderState = senderState->saved; 295 delete senderState; 296 297 port->hitCallback(pkt); 298 299 // 300 // If we had to stall the M5Ports, wake them up because the sequencer 301 // likely has free resources now. 302 // 303 if (waitingOnSequencer) { 304 for (std::list<M5Port*>::iterator i = retryList.begin(); 305 i != retryList.end(); ++i) { 306 (*i)->sendRetry(); 307 (*i)->onRetryList(false); 308 DPRINTF(MemoryAccess, 309 "Sequencer may now be free. SendRetry to port %s\n", 310 (*i)->name()); 311 } 312 retryList.clear(); 313 waitingOnSequencer = false; 314 } 315} 316 317void 318RubyPort::M5Port::hitCallback(PacketPtr pkt) 319{ 320 bool needsResponse = pkt->needsResponse(); 321 322 // 323 // All responses except failed SC operations access M5 physical memory 324 // 325 bool accessPhysMem = true; 326 327 if (pkt->isLLSC()) { 328 if (pkt->isWrite()) { 329 if (pkt->req->getExtraData() != 0) { 330 // 331 // Successful SC packets convert to normal writes 332 // 333 pkt->convertScToWrite(); 334 } else { 335 // 336 // Failed SC packets don't access physical memory and thus 337 // the RubyPort itself must convert it to a response. 338 // 339 accessPhysMem = false; 340 pkt->makeAtomicResponse(); 341 } 342 } else { 343 // 344 // All LL packets convert to normal loads so that M5 PhysMem does 345 // not lock the blocks. 346 // 347 pkt->convertLlToRead(); 348 } 349 } 350 DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse); 351 352 if (accessPhysMem) { 353 ruby_port->physMemPort->sendAtomic(pkt); 354 } 355 356 // turn packet around to go back to requester if response expected 357 if (needsResponse) { 358 // sendAtomic() should already have turned packet into 359 // atomic response 360 assert(pkt->isResponse()); 361 DPRINTF(MemoryAccess, "Sending packet back over port\n"); 362 sendTiming(pkt); 363 } else { 364 delete pkt; 365 } 366 DPRINTF(MemoryAccess, "Hit callback done!\n"); 367} 368 369bool 370RubyPort::M5Port::sendTiming(PacketPtr pkt) 371{ 372 //minimum latency, must be > 0 373 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 374 return true; 375} 376 377bool 378RubyPort::PioPort::sendTiming(PacketPtr pkt) 379{ 380 //minimum latency, must be > 0 381 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 382 return true; 383} 384 385bool 386RubyPort::M5Port::isPhysMemAddress(Addr addr) 387{ 388 AddrRangeList physMemAddrList; 389 bool snoop = false; 390 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 391 for (AddrRangeIter iter = physMemAddrList.begin(); 392 iter != physMemAddrList.end(); 393 iter++) { 394 if (addr >= iter->start && addr <= iter->end) { 395 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n", 396 iter->start, iter->end); 397 return true; 398 } 399 } 400 return false; 401} 402 403unsigned 404RubyPort::M5Port::deviceBlockSize() const 405{ 406 return (unsigned) RubySystem::getBlockSizeBytes(); 407} 408