RubyPort.cc revision 7909
12632Sstever@eecs.umich.edu/* 22632Sstever@eecs.umich.edu * Copyright (c) 2009 Advanced Micro Devices, Inc. 32632Sstever@eecs.umich.edu * All rights reserved. 42632Sstever@eecs.umich.edu * 52632Sstever@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 62632Sstever@eecs.umich.edu * modification, are permitted provided that the following conditions are 72632Sstever@eecs.umich.edu * met: redistributions of source code must retain the above copyright 82632Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 92632Sstever@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 102632Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 112632Sstever@eecs.umich.edu * documentation and/or other materials provided with the distribution; 122632Sstever@eecs.umich.edu * neither the name of the copyright holders nor the names of its 132632Sstever@eecs.umich.edu * contributors may be used to endorse or promote products derived from 142632Sstever@eecs.umich.edu * this software without specific prior written permission. 152632Sstever@eecs.umich.edu * 162632Sstever@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172632Sstever@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182632Sstever@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192632Sstever@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202632Sstever@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212632Sstever@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222632Sstever@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232632Sstever@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242632Sstever@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252632Sstever@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262632Sstever@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272632Sstever@eecs.umich.edu */ 282632Sstever@eecs.umich.edu 292632Sstever@eecs.umich.edu#include "config/the_isa.hh" 302632Sstever@eecs.umich.edu#if THE_ISA == X86_ISA 312023SN/A#include "arch/x86/insts/microldstop.hh" 322023SN/A#endif // X86_ISA 332023SN/A#include "cpu/testers/rubytest/RubyTester.hh" 342023SN/A#include "mem/physical.hh" 352023SN/A#include "mem/ruby/slicc_interface/AbstractController.hh" 362023SN/A#include "mem/ruby/system/RubyPort.hh" 372023SN/A 382023SN/ARubyPort::RubyPort(const Params *p) 392023SN/A : MemObject(p) 402023SN/A{ 412023SN/A m_version = p->version; 422023SN/A assert(m_version != -1); 432023SN/A 442023SN/A physmem = p->physmem; 453279Sgblack@eecs.umich.edu 463279Sgblack@eecs.umich.edu m_controller = NULL; 473279Sgblack@eecs.umich.edu m_mandatory_q_ptr = NULL; 483279Sgblack@eecs.umich.edu 493279Sgblack@eecs.umich.edu m_request_cnt = 0; 503279Sgblack@eecs.umich.edu pio_port = NULL; 513381Sgblack@eecs.umich.edu physMemPort = NULL; 523279Sgblack@eecs.umich.edu} 533279Sgblack@eecs.umich.edu 543279Sgblack@eecs.umich.eduvoid 552023SN/ARubyPort::init() 562023SN/A{ 572023SN/A assert(m_controller != NULL); 582023SN/A m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 592501SN/A} 602501SN/A 612501SN/APort * 622501SN/ARubyPort::getPort(const std::string &if_name, int idx) 632501SN/A{ 643437Sgblack@eecs.umich.edu if (if_name == "port") { 653279Sgblack@eecs.umich.edu return new M5Port(csprintf("%s-port%d", name(), idx), this); 663279Sgblack@eecs.umich.edu } 673279Sgblack@eecs.umich.edu 683279Sgblack@eecs.umich.edu if (if_name == "pio_port") { 693279Sgblack@eecs.umich.edu // ensure there is only one pio port 703279Sgblack@eecs.umich.edu assert(pio_port == NULL); 713279Sgblack@eecs.umich.edu 723279Sgblack@eecs.umich.edu pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this); 733279Sgblack@eecs.umich.edu 743279Sgblack@eecs.umich.edu return pio_port; 753279Sgblack@eecs.umich.edu } 763279Sgblack@eecs.umich.edu 773279Sgblack@eecs.umich.edu if (if_name == "physMemPort") { 783279Sgblack@eecs.umich.edu // RubyPort should only have one port to physical memory 793279Sgblack@eecs.umich.edu assert (physMemPort == NULL); 803279Sgblack@eecs.umich.edu 812954Sgblack@eecs.umich.edu physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this); 822954Sgblack@eecs.umich.edu 832516SN/A return physMemPort; 842561SN/A } 852561SN/A 862561SN/A if (if_name == "functional") { 872646Ssaidi@eecs.umich.edu // Calls for the functional port only want to access 882469SN/A // functional memory. Therefore, directly pass these calls 892954Sgblack@eecs.umich.edu // ports to physmem. 902954Sgblack@eecs.umich.edu assert(physmem != NULL); 912954Sgblack@eecs.umich.edu return physmem->getPort(if_name, idx); 923587Sgblack@eecs.umich.edu } 933587Sgblack@eecs.umich.edu 943587Sgblack@eecs.umich.edu return NULL; 953587Sgblack@eecs.umich.edu} 963587Sgblack@eecs.umich.edu 973587Sgblack@eecs.umich.eduRubyPort::PioPort::PioPort(const std::string &_name, 983587Sgblack@eecs.umich.edu RubyPort *_port) 993587Sgblack@eecs.umich.edu : SimpleTimingPort(_name, _port) 1003587Sgblack@eecs.umich.edu{ 1013587Sgblack@eecs.umich.edu DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name); 1022646Ssaidi@eecs.umich.edu ruby_port = _port; 1033587Sgblack@eecs.umich.edu} 1043587Sgblack@eecs.umich.edu 1053587Sgblack@eecs.umich.eduRubyPort::M5Port::M5Port(const std::string &_name, 1063587Sgblack@eecs.umich.edu RubyPort *_port) 1073587Sgblack@eecs.umich.edu : SimpleTimingPort(_name, _port) 1083587Sgblack@eecs.umich.edu{ 1093587Sgblack@eecs.umich.edu DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name); 1103587Sgblack@eecs.umich.edu ruby_port = _port; 1113587Sgblack@eecs.umich.edu} 1123587Sgblack@eecs.umich.edu 1133587Sgblack@eecs.umich.eduTick 1143587Sgblack@eecs.umich.eduRubyPort::PioPort::recvAtomic(PacketPtr pkt) 1153587Sgblack@eecs.umich.edu{ 1163587Sgblack@eecs.umich.edu panic("RubyPort::PioPort::recvAtomic() not implemented!\n"); 1173587Sgblack@eecs.umich.edu return 0; 1183587Sgblack@eecs.umich.edu} 1192646Ssaidi@eecs.umich.edu 1203587Sgblack@eecs.umich.eduTick 1213587Sgblack@eecs.umich.eduRubyPort::M5Port::recvAtomic(PacketPtr pkt) 1223587Sgblack@eecs.umich.edu{ 1233587Sgblack@eecs.umich.edu panic("RubyPort::M5Port::recvAtomic() not implemented!\n"); 1243587Sgblack@eecs.umich.edu return 0; 1253587Sgblack@eecs.umich.edu} 1262646Ssaidi@eecs.umich.edu 1273587Sgblack@eecs.umich.edu 1283388Sgblack@eecs.umich.edubool 1293388Sgblack@eecs.umich.eduRubyPort::PioPort::recvTiming(PacketPtr pkt) 1302646Ssaidi@eecs.umich.edu{ 1312023SN/A // In FS mode, ruby memory will receive pio responses from devices 132 // and it must forward these responses back to the particular CPU. 133 DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr()); 134 135 assert(pkt->isResponse()); 136 137 // First we must retrieve the request port from the sender State 138 RubyPort::SenderState *senderState = 139 safe_cast<RubyPort::SenderState *>(pkt->senderState); 140 M5Port *port = senderState->port; 141 assert(port != NULL); 142 143 // pop the sender state from the packet 144 pkt->senderState = senderState->saved; 145 delete senderState; 146 147 port->sendTiming(pkt); 148 149 return true; 150} 151 152bool 153RubyPort::M5Port::recvTiming(PacketPtr pkt) 154{ 155 DPRINTF(MemoryAccess, 156 "Timing access caught for address %#x\n", pkt->getAddr()); 157 158 //dsm: based on SimpleTimingPort::recvTiming(pkt); 159 160 // The received packets should only be M5 requests, which should never 161 // get nacked. There used to be code to hanldle nacks here, but 162 // I'm pretty sure it didn't work correctly with the drain code, 163 // so that would need to be fixed if we ever added it back. 164 assert(pkt->isRequest()); 165 166 if (pkt->memInhibitAsserted()) { 167 warn("memInhibitAsserted???"); 168 // snooper will supply based on copy of packet 169 // still target's responsibility to delete packet 170 delete pkt; 171 return true; 172 } 173 174 // Save the port in the sender state object to be used later to 175 // route the response 176 pkt->senderState = new SenderState(this, pkt->senderState); 177 178 // Check for pio requests and directly send them to the dedicated 179 // pio port. 180 if (!isPhysMemAddress(pkt->getAddr())) { 181 assert(ruby_port->pio_port != NULL); 182 DPRINTF(MemoryAccess, 183 "Request for address 0x%#x is assumed to be a pio request\n", 184 pkt->getAddr()); 185 186 return ruby_port->pio_port->sendTiming(pkt); 187 } 188 189 // For DMA and CPU requests, translate them to ruby requests before 190 // sending them to our assigned ruby port. 191 RubyRequestType type = RubyRequestType_NULL; 192 193 // If valid, copy the pc to the ruby request 194 Addr pc = 0; 195 if (pkt->req->hasPC()) { 196 pc = pkt->req->getPC(); 197 } 198 199 if (pkt->isLLSC()) { 200 if (pkt->isWrite()) { 201 DPRINTF(MemoryAccess, "Issuing SC\n"); 202 type = RubyRequestType_Store_Conditional; 203 } else { 204 DPRINTF(MemoryAccess, "Issuing LL\n"); 205 assert(pkt->isRead()); 206 type = RubyRequestType_Load_Linked; 207 } 208 } else if (pkt->req->isLocked()) { 209 if (pkt->isWrite()) { 210 DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n"); 211 type = RubyRequestType_Locked_RMW_Write; 212 } else { 213 DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n"); 214 assert(pkt->isRead()); 215 type = RubyRequestType_Locked_RMW_Read; 216 } 217 } else { 218 if (pkt->isRead()) { 219 if (pkt->req->isInstFetch()) { 220 type = RubyRequestType_IFETCH; 221 } else { 222#if THE_ISA == X86_ISA 223 uint32_t flags = pkt->req->getFlags(); 224 bool storeCheck = flags & 225 (TheISA::StoreCheck << TheISA::FlagShift); 226#else 227 bool storeCheck = false; 228#endif // X86_ISA 229 if (storeCheck) { 230 type = RubyRequestType_RMW_Read; 231 } else { 232 type = RubyRequestType_LD; 233 } 234 } 235 } else if (pkt->isWrite()) { 236 // 237 // Note: M5 packets do not differentiate ST from RMW_Write 238 // 239 type = RubyRequestType_ST; 240 } else { 241 panic("Unsupported ruby packet type\n"); 242 } 243 } 244 245 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(), 246 pkt->getSize(), pc, type, 247 RubyAccessMode_Supervisor, pkt); 248 249 assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <= 250 RubySystem::getBlockSizeBytes()); 251 252 // Submit the ruby request 253 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request); 254 255 // If the request successfully issued then we should return true. 256 // Otherwise, we need to delete the senderStatus we just created and return 257 // false. 258 if (requestStatus == RequestStatus_Issued) { 259 return true; 260 } 261 262 DPRINTF(MemoryAccess, 263 "Request for address %#x did not issue because %s\n", 264 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 265 266 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState); 267 pkt->senderState = senderState->saved; 268 delete senderState; 269 return false; 270} 271 272void 273RubyPort::ruby_hit_callback(PacketPtr pkt) 274{ 275 // Retrieve the request port from the sender State 276 RubyPort::SenderState *senderState = 277 safe_cast<RubyPort::SenderState *>(pkt->senderState); 278 M5Port *port = senderState->port; 279 assert(port != NULL); 280 281 // pop the sender state from the packet 282 pkt->senderState = senderState->saved; 283 delete senderState; 284 285 port->hitCallback(pkt); 286} 287 288void 289RubyPort::M5Port::hitCallback(PacketPtr pkt) 290{ 291 bool needsResponse = pkt->needsResponse(); 292 293 // 294 // All responses except failed SC operations access M5 physical memory 295 // 296 bool accessPhysMem = true; 297 298 if (pkt->isLLSC()) { 299 if (pkt->isWrite()) { 300 if (pkt->req->getExtraData() != 0) { 301 // 302 // Successful SC packets convert to normal writes 303 // 304 pkt->convertScToWrite(); 305 } else { 306 // 307 // Failed SC packets don't access physical memory and thus 308 // the RubyPort itself must convert it to a response. 309 // 310 accessPhysMem = false; 311 pkt->makeAtomicResponse(); 312 } 313 } else { 314 // 315 // All LL packets convert to normal loads so that M5 PhysMem does 316 // not lock the blocks. 317 // 318 pkt->convertLlToRead(); 319 } 320 } 321 DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse); 322 323 if (accessPhysMem) { 324 ruby_port->physMemPort->sendAtomic(pkt); 325 } 326 327 // turn packet around to go back to requester if response expected 328 if (needsResponse) { 329 // sendAtomic() should already have turned packet into 330 // atomic response 331 assert(pkt->isResponse()); 332 DPRINTF(MemoryAccess, "Sending packet back over port\n"); 333 sendTiming(pkt); 334 } else { 335 delete pkt; 336 } 337 DPRINTF(MemoryAccess, "Hit callback done!\n"); 338} 339 340bool 341RubyPort::M5Port::sendTiming(PacketPtr pkt) 342{ 343 //minimum latency, must be > 0 344 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 345 return true; 346} 347 348bool 349RubyPort::PioPort::sendTiming(PacketPtr pkt) 350{ 351 //minimum latency, must be > 0 352 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock())); 353 return true; 354} 355 356bool 357RubyPort::M5Port::isPhysMemAddress(Addr addr) 358{ 359 AddrRangeList physMemAddrList; 360 bool snoop = false; 361 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop); 362 for (AddrRangeIter iter = physMemAddrList.begin(); 363 iter != physMemAddrList.end(); 364 iter++) { 365 if (addr >= iter->start && addr <= iter->end) { 366 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n", 367 iter->start, iter->end); 368 return true; 369 } 370 } 371 return false; 372} 373 374unsigned 375RubyPort::M5Port::deviceBlockSize() const 376{ 377 return (unsigned) RubySystem::getBlockSizeBytes(); 378} 379