60template<class Impl> 61LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 62 LSQUnit *lsq_ptr) 63 : Event(Default_Pri, AutoDelete), 64 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 65{ 66} 67 68template<class Impl> 69void 70LSQUnit<Impl>::WritebackEvent::process() 71{ 72 if (!lsqPtr->isSwitchedOut()) { 73 lsqPtr->writeback(inst, pkt); 74 } 75 76 if (pkt->senderState) 77 delete pkt->senderState; 78 79 delete pkt->req; 80 delete pkt; 81} 82 83template<class Impl> 84const char * 85LSQUnit<Impl>::WritebackEvent::description() const 86{ 87 return "Store writeback"; 88} 89 90template<class Impl> 91void 92LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 93{ 94 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 95 DynInstPtr inst = state->inst; 96 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 97 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 98 99 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 100 101 assert(!pkt->wasNacked()); 102 103 // If this is a split access, wait until all packets are received. 104 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 105 delete pkt->req; 106 delete pkt; 107 return; 108 } 109 110 if (isSwitchedOut() || inst->isSquashed()) { 111 iewStage->decrWb(inst->seqNum); 112 } else { 113 if (!state->noWB) { 114 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 115 !state->isLoad) { 116 writeback(inst, pkt); 117 } else { 118 writeback(inst, state->mainPkt); 119 } 120 } 121 122 if (inst->isStore()) { 123 completeStore(state->idx); 124 } 125 } 126 127 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 128 delete state->mainPkt->req; 129 delete state->mainPkt; 130 } 131 delete state; 132 delete pkt->req; 133 delete pkt; 134} 135 136template <class Impl> 137LSQUnit<Impl>::LSQUnit() 138 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 139 isStoreBlocked(false), isLoadBlocked(false), 140 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 141{ 142} 143 144template<class Impl> 145void 146LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 147 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 148 unsigned id) 149{ 150 cpu = cpu_ptr; 151 iewStage = iew_ptr; 152 153 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 154 155 switchedOut = false; 156 157 cacheBlockMask = 0; 158 159 lsq = lsq_ptr; 160 161 lsqID = id; 162 163 // Add 1 for the sentinel entry (they are circular queues). 164 LQEntries = maxLQEntries + 1; 165 SQEntries = maxSQEntries + 1; 166 167 loadQueue.resize(LQEntries); 168 storeQueue.resize(SQEntries); 169 170 depCheckShift = params->LSQDepCheckShift; 171 checkLoads = params->LSQCheckLoads; 172 173 loadHead = loadTail = 0; 174 175 storeHead = storeWBIdx = storeTail = 0; 176 177 usedPorts = 0; 178 cachePorts = params->cachePorts; 179 180 retryPkt = NULL; 181 memDepViolator = NULL; 182 183 blockedLoadSeqNum = 0; 184 needsTSO = params->needsTSO; 185} 186 187template<class Impl> 188std::string 189LSQUnit<Impl>::name() const 190{ 191 if (Impl::MaxThreads == 1) { 192 return iewStage->name() + ".lsq"; 193 } else { 194 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 195 } 196} 197 198template<class Impl> 199void 200LSQUnit<Impl>::regStats() 201{ 202 lsqForwLoads 203 .name(name() + ".forwLoads") 204 .desc("Number of loads that had data forwarded from stores"); 205 206 invAddrLoads 207 .name(name() + ".invAddrLoads") 208 .desc("Number of loads ignored due to an invalid address"); 209 210 lsqSquashedLoads 211 .name(name() + ".squashedLoads") 212 .desc("Number of loads squashed"); 213 214 lsqIgnoredResponses 215 .name(name() + ".ignoredResponses") 216 .desc("Number of memory responses ignored because the instruction is squashed"); 217 218 lsqMemOrderViolation 219 .name(name() + ".memOrderViolation") 220 .desc("Number of memory ordering violations"); 221 222 lsqSquashedStores 223 .name(name() + ".squashedStores") 224 .desc("Number of stores squashed"); 225 226 invAddrSwpfs 227 .name(name() + ".invAddrSwpfs") 228 .desc("Number of software prefetches ignored due to an invalid address"); 229 230 lsqBlockedLoads 231 .name(name() + ".blockedLoads") 232 .desc("Number of blocked loads due to partial load-store forwarding"); 233 234 lsqRescheduledLoads 235 .name(name() + ".rescheduledLoads") 236 .desc("Number of loads that were rescheduled"); 237 238 lsqCacheBlocked 239 .name(name() + ".cacheBlocked") 240 .desc("Number of times an access to memory failed due to the cache being blocked"); 241} 242 243template<class Impl> 244void 245LSQUnit<Impl>::setDcachePort(Port *dcache_port) 246{ 247 dcachePort = dcache_port; 248} 249 250template<class Impl> 251void 252LSQUnit<Impl>::clearLQ() 253{ 254 loadQueue.clear(); 255} 256 257template<class Impl> 258void 259LSQUnit<Impl>::clearSQ() 260{ 261 storeQueue.clear(); 262} 263 264template<class Impl> 265void 266LSQUnit<Impl>::switchOut() 267{ 268 switchedOut = true; 269 for (int i = 0; i < loadQueue.size(); ++i) { 270 assert(!loadQueue[i]); 271 loadQueue[i] = NULL; 272 } 273 274 assert(storesToWB == 0); 275} 276 277template<class Impl> 278void 279LSQUnit<Impl>::takeOverFrom() 280{ 281 switchedOut = false; 282 loads = stores = storesToWB = 0; 283 284 loadHead = loadTail = 0; 285 286 storeHead = storeWBIdx = storeTail = 0; 287 288 usedPorts = 0; 289 290 memDepViolator = NULL; 291 292 blockedLoadSeqNum = 0; 293 294 stalled = false; 295 isLoadBlocked = false; 296 loadBlockedHandled = false; 297 298 // Just incase the memory system changed out from under us 299 cacheBlockMask = 0; 300} 301 302template<class Impl> 303void 304LSQUnit<Impl>::resizeLQ(unsigned size) 305{ 306 unsigned size_plus_sentinel = size + 1; 307 assert(size_plus_sentinel >= LQEntries); 308 309 if (size_plus_sentinel > LQEntries) { 310 while (size_plus_sentinel > loadQueue.size()) { 311 DynInstPtr dummy; 312 loadQueue.push_back(dummy); 313 LQEntries++; 314 } 315 } else { 316 LQEntries = size_plus_sentinel; 317 } 318 319} 320 321template<class Impl> 322void 323LSQUnit<Impl>::resizeSQ(unsigned size) 324{ 325 unsigned size_plus_sentinel = size + 1; 326 if (size_plus_sentinel > SQEntries) { 327 while (size_plus_sentinel > storeQueue.size()) { 328 SQEntry dummy; 329 storeQueue.push_back(dummy); 330 SQEntries++; 331 } 332 } else { 333 SQEntries = size_plus_sentinel; 334 } 335} 336 337template <class Impl> 338void 339LSQUnit<Impl>::insert(DynInstPtr &inst) 340{ 341 assert(inst->isMemRef()); 342 343 assert(inst->isLoad() || inst->isStore()); 344 345 if (inst->isLoad()) { 346 insertLoad(inst); 347 } else { 348 insertStore(inst); 349 } 350 351 inst->setInLSQ(); 352} 353 354template <class Impl> 355void 356LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 357{ 358 assert((loadTail + 1) % LQEntries != loadHead); 359 assert(loads < LQEntries); 360 361 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 362 load_inst->pcState(), loadTail, load_inst->seqNum); 363 364 load_inst->lqIdx = loadTail; 365 366 if (stores == 0) { 367 load_inst->sqIdx = -1; 368 } else { 369 load_inst->sqIdx = storeTail; 370 } 371 372 loadQueue[loadTail] = load_inst; 373 374 incrLdIdx(loadTail); 375 376 ++loads; 377} 378 379template <class Impl> 380void 381LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 382{ 383 // Make sure it is not full before inserting an instruction. 384 assert((storeTail + 1) % SQEntries != storeHead); 385 assert(stores < SQEntries); 386 387 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 388 store_inst->pcState(), storeTail, store_inst->seqNum); 389 390 store_inst->sqIdx = storeTail; 391 store_inst->lqIdx = loadTail; 392 393 storeQueue[storeTail] = SQEntry(store_inst); 394 395 incrStIdx(storeTail); 396 397 ++stores; 398} 399 400template <class Impl> 401typename Impl::DynInstPtr 402LSQUnit<Impl>::getMemDepViolator() 403{ 404 DynInstPtr temp = memDepViolator; 405 406 memDepViolator = NULL; 407 408 return temp; 409} 410 411template <class Impl> 412unsigned 413LSQUnit<Impl>::numFreeEntries() 414{ 415 unsigned free_lq_entries = LQEntries - loads; 416 unsigned free_sq_entries = SQEntries - stores; 417 418 // Both the LQ and SQ entries have an extra dummy entry to differentiate 419 // empty/full conditions. Subtract 1 from the free entries. 420 if (free_lq_entries < free_sq_entries) { 421 return free_lq_entries - 1; 422 } else { 423 return free_sq_entries - 1; 424 } 425} 426 427template <class Impl> 428int 429LSQUnit<Impl>::numLoadsReady() 430{ 431 int load_idx = loadHead; 432 int retval = 0; 433 434 while (load_idx != loadTail) { 435 assert(loadQueue[load_idx]); 436 437 if (loadQueue[load_idx]->readyToIssue()) { 438 ++retval; 439 } 440 } 441 442 return retval; 443} 444 445template <class Impl> 446void 447LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 448{ 449 int load_idx = loadHead; 450 451 if (!cacheBlockMask) { 452 assert(dcachePort); 453 Addr bs = dcachePort->peerBlockSize(); 454 455 // Make sure we actually got a size 456 assert(bs != 0); 457 458 cacheBlockMask = ~(bs - 1); 459 } 460 461 // If this is the only load in the LSQ we don't care 462 if (load_idx == loadTail) 463 return; 464 incrLdIdx(load_idx); 465 466 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 467 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 468 while (load_idx != loadTail) { 469 DynInstPtr ld_inst = loadQueue[load_idx]; 470 471 if (!ld_inst->effAddrValid || ld_inst->uncacheable()) { 472 incrLdIdx(load_idx); 473 continue; 474 } 475 476 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 477 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 478 ld_inst->seqNum, load_addr, invalidate_addr); 479 480 if (load_addr == invalidate_addr) { 481 if (ld_inst->possibleLoadViolation) { 482 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 483 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); 484 485 // Mark the load for re-execution 486 ld_inst->fault = new ReExec; 487 } else { 488 // If a older load checks this and it's true 489 // then we might have missed the snoop 490 // in which case we need to invalidate to be sure 491 ld_inst->hitExternalSnoop = true; 492 } 493 } 494 incrLdIdx(load_idx); 495 } 496 return; 497} 498 499template <class Impl> 500Fault 501LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 502{ 503 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 504 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 505 506 /** @todo in theory you only need to check an instruction that has executed 507 * however, there isn't a good way in the pipeline at the moment to check 508 * all instructions that will execute before the store writes back. Thus, 509 * like the implementation that came before it, we're overly conservative. 510 */ 511 while (load_idx != loadTail) { 512 DynInstPtr ld_inst = loadQueue[load_idx]; 513 if (!ld_inst->effAddrValid || ld_inst->uncacheable()) { 514 incrLdIdx(load_idx); 515 continue; 516 } 517 518 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 519 Addr ld_eff_addr2 = 520 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 521 522 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 523 if (inst->isLoad()) { 524 // If this load is to the same block as an external snoop 525 // invalidate that we've observed then the load needs to be 526 // squashed as it could have newer data 527 if (ld_inst->hitExternalSnoop) { 528 if (!memDepViolator || 529 ld_inst->seqNum < memDepViolator->seqNum) { 530 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 531 "and [sn:%lli] at address %#x\n", 532 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 533 memDepViolator = ld_inst; 534 535 ++lsqMemOrderViolation; 536 537 return new GenericISA::M5PanicFault( 538 "Detected fault with inst [sn:%lli] and " 539 "[sn:%lli] at address %#x\n", 540 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 541 } 542 } 543 544 // Otherwise, mark the load has a possible load violation 545 // and if we see a snoop before it's commited, we need to squash 546 ld_inst->possibleLoadViolation = true; 547 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 548 " between instructions [sn:%lli] and [sn:%lli]\n", 549 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 550 } else { 551 // A load/store incorrectly passed this store. 552 // Check if we already have a violator, or if it's newer 553 // squash and refetch. 554 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 555 break; 556 557 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 558 "[sn:%lli] at address %#x\n", 559 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 560 memDepViolator = ld_inst; 561 562 ++lsqMemOrderViolation; 563 564 return new GenericISA::M5PanicFault("Detected fault with " 565 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 566 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 567 } 568 } 569 570 incrLdIdx(load_idx); 571 } 572 return NoFault; 573} 574 575 576 577 578template <class Impl> 579Fault 580LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 581{ 582 using namespace TheISA; 583 // Execute a specific load. 584 Fault load_fault = NoFault; 585 586 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 587 inst->pcState(), inst->seqNum); 588 589 assert(!inst->isSquashed()); 590 591 load_fault = inst->initiateAcc(); 592 593 if (inst->isTranslationDelayed() && 594 load_fault == NoFault) 595 return load_fault; 596 597 // If the instruction faulted or predicated false, then we need to send it 598 // along to commit without the instruction completing. 599 if (load_fault != NoFault || inst->readPredicate() == false) { 600 // Send this instruction to commit, also make sure iew stage 601 // realizes there is activity. 602 // Mark it as executed unless it is an uncached load that 603 // needs to hit the head of commit. 604 if (inst->readPredicate() == false) 605 inst->forwardOldRegs(); 606 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 607 inst->seqNum, 608 (load_fault != NoFault ? "fault" : "predication")); 609 if (!(inst->hasRequest() && inst->uncacheable()) || 610 inst->isAtCommit()) { 611 inst->setExecuted(); 612 } 613 iewStage->instToCommit(inst); 614 iewStage->activityThisCycle(); 615 } else if (!loadBlocked()) { 616 assert(inst->effAddrValid); 617 int load_idx = inst->lqIdx; 618 incrLdIdx(load_idx); 619 620 if (checkLoads) 621 return checkViolations(load_idx, inst); 622 } 623 624 return load_fault; 625} 626 627template <class Impl> 628Fault 629LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 630{ 631 using namespace TheISA; 632 // Make sure that a store exists. 633 assert(stores != 0); 634 635 int store_idx = store_inst->sqIdx; 636 637 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 638 store_inst->pcState(), store_inst->seqNum); 639 640 assert(!store_inst->isSquashed()); 641 642 // Check the recently completed loads to see if any match this store's 643 // address. If so, then we have a memory ordering violation. 644 int load_idx = store_inst->lqIdx; 645 646 Fault store_fault = store_inst->initiateAcc(); 647 648 if (store_inst->isTranslationDelayed() && 649 store_fault == NoFault) 650 return store_fault; 651 652 if (store_inst->readPredicate() == false) 653 store_inst->forwardOldRegs(); 654 655 if (storeQueue[store_idx].size == 0) { 656 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 657 store_inst->pcState(), store_inst->seqNum); 658 659 return store_fault; 660 } else if (store_inst->readPredicate() == false) { 661 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 662 store_inst->seqNum); 663 return store_fault; 664 } 665 666 assert(store_fault == NoFault); 667 668 if (store_inst->isStoreConditional()) { 669 // Store conditionals need to set themselves as able to 670 // writeback if we haven't had a fault by here. 671 storeQueue[store_idx].canWB = true; 672 673 ++storesToWB; 674 } 675 676 return checkViolations(load_idx, store_inst); 677 678} 679 680template <class Impl> 681void 682LSQUnit<Impl>::commitLoad() 683{ 684 assert(loadQueue[loadHead]); 685 686 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 687 loadQueue[loadHead]->pcState()); 688 689 loadQueue[loadHead] = NULL; 690 691 incrLdIdx(loadHead); 692 693 --loads; 694} 695 696template <class Impl> 697void 698LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 699{ 700 assert(loads == 0 || loadQueue[loadHead]); 701 702 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 703 commitLoad(); 704 } 705} 706 707template <class Impl> 708void 709LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 710{ 711 assert(stores == 0 || storeQueue[storeHead].inst); 712 713 int store_idx = storeHead; 714 715 while (store_idx != storeTail) { 716 assert(storeQueue[store_idx].inst); 717 // Mark any stores that are now committed and have not yet 718 // been marked as able to write back. 719 if (!storeQueue[store_idx].canWB) { 720 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 721 break; 722 } 723 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 724 "%s [sn:%lli]\n", 725 storeQueue[store_idx].inst->pcState(), 726 storeQueue[store_idx].inst->seqNum); 727 728 storeQueue[store_idx].canWB = true; 729 730 ++storesToWB; 731 } 732 733 incrStIdx(store_idx); 734 } 735} 736 737template <class Impl> 738void 739LSQUnit<Impl>::writebackPendingStore() 740{ 741 if (hasPendingPkt) { 742 assert(pendingPkt != NULL); 743 744 // If the cache is blocked, this will store the packet for retry. 745 if (sendStore(pendingPkt)) { 746 storePostSend(pendingPkt); 747 } 748 pendingPkt = NULL; 749 hasPendingPkt = false; 750 } 751} 752 753template <class Impl> 754void 755LSQUnit<Impl>::writebackStores() 756{ 757 // First writeback the second packet from any split store that didn't 758 // complete last cycle because there weren't enough cache ports available. 759 if (TheISA::HasUnalignedMemAcc) { 760 writebackPendingStore(); 761 } 762 763 while (storesToWB > 0 && 764 storeWBIdx != storeTail && 765 storeQueue[storeWBIdx].inst && 766 storeQueue[storeWBIdx].canWB && 767 ((!needsTSO) || (!storeInFlight)) && 768 usedPorts < cachePorts) { 769 770 if (isStoreBlocked || lsq->cacheBlocked()) { 771 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 772 " is blocked!\n"); 773 break; 774 } 775 776 // Store didn't write any data so no need to write it back to 777 // memory. 778 if (storeQueue[storeWBIdx].size == 0) { 779 completeStore(storeWBIdx); 780 781 incrStIdx(storeWBIdx); 782 783 continue; 784 } 785 786 ++usedPorts; 787 788 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 789 incrStIdx(storeWBIdx); 790 791 continue; 792 } 793 794 assert(storeQueue[storeWBIdx].req); 795 assert(!storeQueue[storeWBIdx].committed); 796 797 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 798 assert(storeQueue[storeWBIdx].sreqLow); 799 assert(storeQueue[storeWBIdx].sreqHigh); 800 } 801 802 DynInstPtr inst = storeQueue[storeWBIdx].inst; 803 804 Request *req = storeQueue[storeWBIdx].req; 805 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 806 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 807 808 storeQueue[storeWBIdx].committed = true; 809 810 assert(!inst->memData); 811 inst->memData = new uint8_t[64]; 812 813 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 814 815 MemCmd command = 816 req->isSwap() ? MemCmd::SwapReq : 817 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 818 PacketPtr data_pkt; 819 PacketPtr snd_data_pkt = NULL; 820 821 LSQSenderState *state = new LSQSenderState; 822 state->isLoad = false; 823 state->idx = storeWBIdx; 824 state->inst = inst; 825 826 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 827 828 // Build a single data packet if the store isn't split. 829 data_pkt = new Packet(req, command, Packet::Broadcast); 830 data_pkt->dataStatic(inst->memData); 831 data_pkt->senderState = state; 832 } else { 833 // Create two packets if the store is split in two. 834 data_pkt = new Packet(sreqLow, command, Packet::Broadcast); 835 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast); 836 837 data_pkt->dataStatic(inst->memData); 838 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 839 840 data_pkt->senderState = state; 841 snd_data_pkt->senderState = state; 842 843 state->isSplit = true; 844 state->outstanding = 2; 845 846 // Can delete the main request now. 847 delete req; 848 req = sreqLow; 849 } 850 851 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 852 "to Addr:%#x, data:%#x [sn:%lli]\n", 853 storeWBIdx, inst->pcState(), 854 req->getPaddr(), (int)*(inst->memData), 855 inst->seqNum); 856 857 // @todo: Remove this SC hack once the memory system handles it. 858 if (inst->isStoreConditional()) { 859 assert(!storeQueue[storeWBIdx].isSplit); 860 // Disable recording the result temporarily. Writing to 861 // misc regs normally updates the result, but this is not 862 // the desired behavior when handling store conditionals. 863 inst->recordResult = false; 864 bool success = TheISA::handleLockedWrite(inst.get(), req); 865 inst->recordResult = true; 866 867 if (!success) { 868 // Instantly complete this store. 869 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 870 "Instantly completing it.\n", 871 inst->seqNum); 872 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 873 cpu->schedule(wb, curTick() + 1);
| 57template<class Impl> 58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 59 LSQUnit *lsq_ptr) 60 : Event(Default_Pri, AutoDelete), 61 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 62{ 63} 64 65template<class Impl> 66void 67LSQUnit<Impl>::WritebackEvent::process() 68{ 69 if (!lsqPtr->isSwitchedOut()) { 70 lsqPtr->writeback(inst, pkt); 71 } 72 73 if (pkt->senderState) 74 delete pkt->senderState; 75 76 delete pkt->req; 77 delete pkt; 78} 79 80template<class Impl> 81const char * 82LSQUnit<Impl>::WritebackEvent::description() const 83{ 84 return "Store writeback"; 85} 86 87template<class Impl> 88void 89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 90{ 91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 92 DynInstPtr inst = state->inst; 93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 95 96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 97 98 assert(!pkt->wasNacked()); 99 100 // If this is a split access, wait until all packets are received. 101 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 102 delete pkt->req; 103 delete pkt; 104 return; 105 } 106 107 if (isSwitchedOut() || inst->isSquashed()) { 108 iewStage->decrWb(inst->seqNum); 109 } else { 110 if (!state->noWB) { 111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 112 !state->isLoad) { 113 writeback(inst, pkt); 114 } else { 115 writeback(inst, state->mainPkt); 116 } 117 } 118 119 if (inst->isStore()) { 120 completeStore(state->idx); 121 } 122 } 123 124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 125 delete state->mainPkt->req; 126 delete state->mainPkt; 127 } 128 delete state; 129 delete pkt->req; 130 delete pkt; 131} 132 133template <class Impl> 134LSQUnit<Impl>::LSQUnit() 135 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 136 isStoreBlocked(false), isLoadBlocked(false), 137 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 138{ 139} 140 141template<class Impl> 142void 143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 145 unsigned id) 146{ 147 cpu = cpu_ptr; 148 iewStage = iew_ptr; 149 150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 151 152 switchedOut = false; 153 154 cacheBlockMask = 0; 155 156 lsq = lsq_ptr; 157 158 lsqID = id; 159 160 // Add 1 for the sentinel entry (they are circular queues). 161 LQEntries = maxLQEntries + 1; 162 SQEntries = maxSQEntries + 1; 163 164 loadQueue.resize(LQEntries); 165 storeQueue.resize(SQEntries); 166 167 depCheckShift = params->LSQDepCheckShift; 168 checkLoads = params->LSQCheckLoads; 169 170 loadHead = loadTail = 0; 171 172 storeHead = storeWBIdx = storeTail = 0; 173 174 usedPorts = 0; 175 cachePorts = params->cachePorts; 176 177 retryPkt = NULL; 178 memDepViolator = NULL; 179 180 blockedLoadSeqNum = 0; 181 needsTSO = params->needsTSO; 182} 183 184template<class Impl> 185std::string 186LSQUnit<Impl>::name() const 187{ 188 if (Impl::MaxThreads == 1) { 189 return iewStage->name() + ".lsq"; 190 } else { 191 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 192 } 193} 194 195template<class Impl> 196void 197LSQUnit<Impl>::regStats() 198{ 199 lsqForwLoads 200 .name(name() + ".forwLoads") 201 .desc("Number of loads that had data forwarded from stores"); 202 203 invAddrLoads 204 .name(name() + ".invAddrLoads") 205 .desc("Number of loads ignored due to an invalid address"); 206 207 lsqSquashedLoads 208 .name(name() + ".squashedLoads") 209 .desc("Number of loads squashed"); 210 211 lsqIgnoredResponses 212 .name(name() + ".ignoredResponses") 213 .desc("Number of memory responses ignored because the instruction is squashed"); 214 215 lsqMemOrderViolation 216 .name(name() + ".memOrderViolation") 217 .desc("Number of memory ordering violations"); 218 219 lsqSquashedStores 220 .name(name() + ".squashedStores") 221 .desc("Number of stores squashed"); 222 223 invAddrSwpfs 224 .name(name() + ".invAddrSwpfs") 225 .desc("Number of software prefetches ignored due to an invalid address"); 226 227 lsqBlockedLoads 228 .name(name() + ".blockedLoads") 229 .desc("Number of blocked loads due to partial load-store forwarding"); 230 231 lsqRescheduledLoads 232 .name(name() + ".rescheduledLoads") 233 .desc("Number of loads that were rescheduled"); 234 235 lsqCacheBlocked 236 .name(name() + ".cacheBlocked") 237 .desc("Number of times an access to memory failed due to the cache being blocked"); 238} 239 240template<class Impl> 241void 242LSQUnit<Impl>::setDcachePort(Port *dcache_port) 243{ 244 dcachePort = dcache_port; 245} 246 247template<class Impl> 248void 249LSQUnit<Impl>::clearLQ() 250{ 251 loadQueue.clear(); 252} 253 254template<class Impl> 255void 256LSQUnit<Impl>::clearSQ() 257{ 258 storeQueue.clear(); 259} 260 261template<class Impl> 262void 263LSQUnit<Impl>::switchOut() 264{ 265 switchedOut = true; 266 for (int i = 0; i < loadQueue.size(); ++i) { 267 assert(!loadQueue[i]); 268 loadQueue[i] = NULL; 269 } 270 271 assert(storesToWB == 0); 272} 273 274template<class Impl> 275void 276LSQUnit<Impl>::takeOverFrom() 277{ 278 switchedOut = false; 279 loads = stores = storesToWB = 0; 280 281 loadHead = loadTail = 0; 282 283 storeHead = storeWBIdx = storeTail = 0; 284 285 usedPorts = 0; 286 287 memDepViolator = NULL; 288 289 blockedLoadSeqNum = 0; 290 291 stalled = false; 292 isLoadBlocked = false; 293 loadBlockedHandled = false; 294 295 // Just incase the memory system changed out from under us 296 cacheBlockMask = 0; 297} 298 299template<class Impl> 300void 301LSQUnit<Impl>::resizeLQ(unsigned size) 302{ 303 unsigned size_plus_sentinel = size + 1; 304 assert(size_plus_sentinel >= LQEntries); 305 306 if (size_plus_sentinel > LQEntries) { 307 while (size_plus_sentinel > loadQueue.size()) { 308 DynInstPtr dummy; 309 loadQueue.push_back(dummy); 310 LQEntries++; 311 } 312 } else { 313 LQEntries = size_plus_sentinel; 314 } 315 316} 317 318template<class Impl> 319void 320LSQUnit<Impl>::resizeSQ(unsigned size) 321{ 322 unsigned size_plus_sentinel = size + 1; 323 if (size_plus_sentinel > SQEntries) { 324 while (size_plus_sentinel > storeQueue.size()) { 325 SQEntry dummy; 326 storeQueue.push_back(dummy); 327 SQEntries++; 328 } 329 } else { 330 SQEntries = size_plus_sentinel; 331 } 332} 333 334template <class Impl> 335void 336LSQUnit<Impl>::insert(DynInstPtr &inst) 337{ 338 assert(inst->isMemRef()); 339 340 assert(inst->isLoad() || inst->isStore()); 341 342 if (inst->isLoad()) { 343 insertLoad(inst); 344 } else { 345 insertStore(inst); 346 } 347 348 inst->setInLSQ(); 349} 350 351template <class Impl> 352void 353LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 354{ 355 assert((loadTail + 1) % LQEntries != loadHead); 356 assert(loads < LQEntries); 357 358 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 359 load_inst->pcState(), loadTail, load_inst->seqNum); 360 361 load_inst->lqIdx = loadTail; 362 363 if (stores == 0) { 364 load_inst->sqIdx = -1; 365 } else { 366 load_inst->sqIdx = storeTail; 367 } 368 369 loadQueue[loadTail] = load_inst; 370 371 incrLdIdx(loadTail); 372 373 ++loads; 374} 375 376template <class Impl> 377void 378LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 379{ 380 // Make sure it is not full before inserting an instruction. 381 assert((storeTail + 1) % SQEntries != storeHead); 382 assert(stores < SQEntries); 383 384 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 385 store_inst->pcState(), storeTail, store_inst->seqNum); 386 387 store_inst->sqIdx = storeTail; 388 store_inst->lqIdx = loadTail; 389 390 storeQueue[storeTail] = SQEntry(store_inst); 391 392 incrStIdx(storeTail); 393 394 ++stores; 395} 396 397template <class Impl> 398typename Impl::DynInstPtr 399LSQUnit<Impl>::getMemDepViolator() 400{ 401 DynInstPtr temp = memDepViolator; 402 403 memDepViolator = NULL; 404 405 return temp; 406} 407 408template <class Impl> 409unsigned 410LSQUnit<Impl>::numFreeEntries() 411{ 412 unsigned free_lq_entries = LQEntries - loads; 413 unsigned free_sq_entries = SQEntries - stores; 414 415 // Both the LQ and SQ entries have an extra dummy entry to differentiate 416 // empty/full conditions. Subtract 1 from the free entries. 417 if (free_lq_entries < free_sq_entries) { 418 return free_lq_entries - 1; 419 } else { 420 return free_sq_entries - 1; 421 } 422} 423 424template <class Impl> 425int 426LSQUnit<Impl>::numLoadsReady() 427{ 428 int load_idx = loadHead; 429 int retval = 0; 430 431 while (load_idx != loadTail) { 432 assert(loadQueue[load_idx]); 433 434 if (loadQueue[load_idx]->readyToIssue()) { 435 ++retval; 436 } 437 } 438 439 return retval; 440} 441 442template <class Impl> 443void 444LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 445{ 446 int load_idx = loadHead; 447 448 if (!cacheBlockMask) { 449 assert(dcachePort); 450 Addr bs = dcachePort->peerBlockSize(); 451 452 // Make sure we actually got a size 453 assert(bs != 0); 454 455 cacheBlockMask = ~(bs - 1); 456 } 457 458 // If this is the only load in the LSQ we don't care 459 if (load_idx == loadTail) 460 return; 461 incrLdIdx(load_idx); 462 463 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 464 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 465 while (load_idx != loadTail) { 466 DynInstPtr ld_inst = loadQueue[load_idx]; 467 468 if (!ld_inst->effAddrValid || ld_inst->uncacheable()) { 469 incrLdIdx(load_idx); 470 continue; 471 } 472 473 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 474 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 475 ld_inst->seqNum, load_addr, invalidate_addr); 476 477 if (load_addr == invalidate_addr) { 478 if (ld_inst->possibleLoadViolation) { 479 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 480 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); 481 482 // Mark the load for re-execution 483 ld_inst->fault = new ReExec; 484 } else { 485 // If a older load checks this and it's true 486 // then we might have missed the snoop 487 // in which case we need to invalidate to be sure 488 ld_inst->hitExternalSnoop = true; 489 } 490 } 491 incrLdIdx(load_idx); 492 } 493 return; 494} 495 496template <class Impl> 497Fault 498LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 499{ 500 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 501 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 502 503 /** @todo in theory you only need to check an instruction that has executed 504 * however, there isn't a good way in the pipeline at the moment to check 505 * all instructions that will execute before the store writes back. Thus, 506 * like the implementation that came before it, we're overly conservative. 507 */ 508 while (load_idx != loadTail) { 509 DynInstPtr ld_inst = loadQueue[load_idx]; 510 if (!ld_inst->effAddrValid || ld_inst->uncacheable()) { 511 incrLdIdx(load_idx); 512 continue; 513 } 514 515 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 516 Addr ld_eff_addr2 = 517 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 518 519 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 520 if (inst->isLoad()) { 521 // If this load is to the same block as an external snoop 522 // invalidate that we've observed then the load needs to be 523 // squashed as it could have newer data 524 if (ld_inst->hitExternalSnoop) { 525 if (!memDepViolator || 526 ld_inst->seqNum < memDepViolator->seqNum) { 527 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 528 "and [sn:%lli] at address %#x\n", 529 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 530 memDepViolator = ld_inst; 531 532 ++lsqMemOrderViolation; 533 534 return new GenericISA::M5PanicFault( 535 "Detected fault with inst [sn:%lli] and " 536 "[sn:%lli] at address %#x\n", 537 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 538 } 539 } 540 541 // Otherwise, mark the load has a possible load violation 542 // and if we see a snoop before it's commited, we need to squash 543 ld_inst->possibleLoadViolation = true; 544 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 545 " between instructions [sn:%lli] and [sn:%lli]\n", 546 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 547 } else { 548 // A load/store incorrectly passed this store. 549 // Check if we already have a violator, or if it's newer 550 // squash and refetch. 551 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 552 break; 553 554 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 555 "[sn:%lli] at address %#x\n", 556 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 557 memDepViolator = ld_inst; 558 559 ++lsqMemOrderViolation; 560 561 return new GenericISA::M5PanicFault("Detected fault with " 562 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 563 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 564 } 565 } 566 567 incrLdIdx(load_idx); 568 } 569 return NoFault; 570} 571 572 573 574 575template <class Impl> 576Fault 577LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 578{ 579 using namespace TheISA; 580 // Execute a specific load. 581 Fault load_fault = NoFault; 582 583 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 584 inst->pcState(), inst->seqNum); 585 586 assert(!inst->isSquashed()); 587 588 load_fault = inst->initiateAcc(); 589 590 if (inst->isTranslationDelayed() && 591 load_fault == NoFault) 592 return load_fault; 593 594 // If the instruction faulted or predicated false, then we need to send it 595 // along to commit without the instruction completing. 596 if (load_fault != NoFault || inst->readPredicate() == false) { 597 // Send this instruction to commit, also make sure iew stage 598 // realizes there is activity. 599 // Mark it as executed unless it is an uncached load that 600 // needs to hit the head of commit. 601 if (inst->readPredicate() == false) 602 inst->forwardOldRegs(); 603 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 604 inst->seqNum, 605 (load_fault != NoFault ? "fault" : "predication")); 606 if (!(inst->hasRequest() && inst->uncacheable()) || 607 inst->isAtCommit()) { 608 inst->setExecuted(); 609 } 610 iewStage->instToCommit(inst); 611 iewStage->activityThisCycle(); 612 } else if (!loadBlocked()) { 613 assert(inst->effAddrValid); 614 int load_idx = inst->lqIdx; 615 incrLdIdx(load_idx); 616 617 if (checkLoads) 618 return checkViolations(load_idx, inst); 619 } 620 621 return load_fault; 622} 623 624template <class Impl> 625Fault 626LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 627{ 628 using namespace TheISA; 629 // Make sure that a store exists. 630 assert(stores != 0); 631 632 int store_idx = store_inst->sqIdx; 633 634 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 635 store_inst->pcState(), store_inst->seqNum); 636 637 assert(!store_inst->isSquashed()); 638 639 // Check the recently completed loads to see if any match this store's 640 // address. If so, then we have a memory ordering violation. 641 int load_idx = store_inst->lqIdx; 642 643 Fault store_fault = store_inst->initiateAcc(); 644 645 if (store_inst->isTranslationDelayed() && 646 store_fault == NoFault) 647 return store_fault; 648 649 if (store_inst->readPredicate() == false) 650 store_inst->forwardOldRegs(); 651 652 if (storeQueue[store_idx].size == 0) { 653 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 654 store_inst->pcState(), store_inst->seqNum); 655 656 return store_fault; 657 } else if (store_inst->readPredicate() == false) { 658 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 659 store_inst->seqNum); 660 return store_fault; 661 } 662 663 assert(store_fault == NoFault); 664 665 if (store_inst->isStoreConditional()) { 666 // Store conditionals need to set themselves as able to 667 // writeback if we haven't had a fault by here. 668 storeQueue[store_idx].canWB = true; 669 670 ++storesToWB; 671 } 672 673 return checkViolations(load_idx, store_inst); 674 675} 676 677template <class Impl> 678void 679LSQUnit<Impl>::commitLoad() 680{ 681 assert(loadQueue[loadHead]); 682 683 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 684 loadQueue[loadHead]->pcState()); 685 686 loadQueue[loadHead] = NULL; 687 688 incrLdIdx(loadHead); 689 690 --loads; 691} 692 693template <class Impl> 694void 695LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 696{ 697 assert(loads == 0 || loadQueue[loadHead]); 698 699 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 700 commitLoad(); 701 } 702} 703 704template <class Impl> 705void 706LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 707{ 708 assert(stores == 0 || storeQueue[storeHead].inst); 709 710 int store_idx = storeHead; 711 712 while (store_idx != storeTail) { 713 assert(storeQueue[store_idx].inst); 714 // Mark any stores that are now committed and have not yet 715 // been marked as able to write back. 716 if (!storeQueue[store_idx].canWB) { 717 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 718 break; 719 } 720 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 721 "%s [sn:%lli]\n", 722 storeQueue[store_idx].inst->pcState(), 723 storeQueue[store_idx].inst->seqNum); 724 725 storeQueue[store_idx].canWB = true; 726 727 ++storesToWB; 728 } 729 730 incrStIdx(store_idx); 731 } 732} 733 734template <class Impl> 735void 736LSQUnit<Impl>::writebackPendingStore() 737{ 738 if (hasPendingPkt) { 739 assert(pendingPkt != NULL); 740 741 // If the cache is blocked, this will store the packet for retry. 742 if (sendStore(pendingPkt)) { 743 storePostSend(pendingPkt); 744 } 745 pendingPkt = NULL; 746 hasPendingPkt = false; 747 } 748} 749 750template <class Impl> 751void 752LSQUnit<Impl>::writebackStores() 753{ 754 // First writeback the second packet from any split store that didn't 755 // complete last cycle because there weren't enough cache ports available. 756 if (TheISA::HasUnalignedMemAcc) { 757 writebackPendingStore(); 758 } 759 760 while (storesToWB > 0 && 761 storeWBIdx != storeTail && 762 storeQueue[storeWBIdx].inst && 763 storeQueue[storeWBIdx].canWB && 764 ((!needsTSO) || (!storeInFlight)) && 765 usedPorts < cachePorts) { 766 767 if (isStoreBlocked || lsq->cacheBlocked()) { 768 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 769 " is blocked!\n"); 770 break; 771 } 772 773 // Store didn't write any data so no need to write it back to 774 // memory. 775 if (storeQueue[storeWBIdx].size == 0) { 776 completeStore(storeWBIdx); 777 778 incrStIdx(storeWBIdx); 779 780 continue; 781 } 782 783 ++usedPorts; 784 785 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 786 incrStIdx(storeWBIdx); 787 788 continue; 789 } 790 791 assert(storeQueue[storeWBIdx].req); 792 assert(!storeQueue[storeWBIdx].committed); 793 794 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 795 assert(storeQueue[storeWBIdx].sreqLow); 796 assert(storeQueue[storeWBIdx].sreqHigh); 797 } 798 799 DynInstPtr inst = storeQueue[storeWBIdx].inst; 800 801 Request *req = storeQueue[storeWBIdx].req; 802 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 803 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 804 805 storeQueue[storeWBIdx].committed = true; 806 807 assert(!inst->memData); 808 inst->memData = new uint8_t[64]; 809 810 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 811 812 MemCmd command = 813 req->isSwap() ? MemCmd::SwapReq : 814 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 815 PacketPtr data_pkt; 816 PacketPtr snd_data_pkt = NULL; 817 818 LSQSenderState *state = new LSQSenderState; 819 state->isLoad = false; 820 state->idx = storeWBIdx; 821 state->inst = inst; 822 823 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 824 825 // Build a single data packet if the store isn't split. 826 data_pkt = new Packet(req, command, Packet::Broadcast); 827 data_pkt->dataStatic(inst->memData); 828 data_pkt->senderState = state; 829 } else { 830 // Create two packets if the store is split in two. 831 data_pkt = new Packet(sreqLow, command, Packet::Broadcast); 832 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast); 833 834 data_pkt->dataStatic(inst->memData); 835 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 836 837 data_pkt->senderState = state; 838 snd_data_pkt->senderState = state; 839 840 state->isSplit = true; 841 state->outstanding = 2; 842 843 // Can delete the main request now. 844 delete req; 845 req = sreqLow; 846 } 847 848 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 849 "to Addr:%#x, data:%#x [sn:%lli]\n", 850 storeWBIdx, inst->pcState(), 851 req->getPaddr(), (int)*(inst->memData), 852 inst->seqNum); 853 854 // @todo: Remove this SC hack once the memory system handles it. 855 if (inst->isStoreConditional()) { 856 assert(!storeQueue[storeWBIdx].isSplit); 857 // Disable recording the result temporarily. Writing to 858 // misc regs normally updates the result, but this is not 859 // the desired behavior when handling store conditionals. 860 inst->recordResult = false; 861 bool success = TheISA::handleLockedWrite(inst.get(), req); 862 inst->recordResult = true; 863 864 if (!success) { 865 // Instantly complete this store. 866 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 867 "Instantly completing it.\n", 868 inst->seqNum); 869 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 870 cpu->schedule(wb, curTick() + 1);
|
879 completeStore(storeWBIdx); 880 incrStIdx(storeWBIdx); 881 continue; 882 } 883 } else { 884 // Non-store conditionals do not need a writeback. 885 state->noWB = true; 886 } 887 888 bool split = 889 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 890 891 ThreadContext *thread = cpu->tcBase(lsqID); 892 893 if (req->isMmappedIpr()) { 894 assert(!inst->isStoreConditional()); 895 TheISA::handleIprWrite(thread, data_pkt); 896 delete data_pkt; 897 if (split) { 898 assert(snd_data_pkt->req->isMmappedIpr()); 899 TheISA::handleIprWrite(thread, snd_data_pkt); 900 delete snd_data_pkt; 901 delete sreqLow; 902 delete sreqHigh; 903 } 904 delete state; 905 delete req; 906 completeStore(storeWBIdx); 907 incrStIdx(storeWBIdx); 908 } else if (!sendStore(data_pkt)) { 909 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 910 "retry later\n", 911 inst->seqNum); 912 913 // Need to store the second packet, if split. 914 if (split) { 915 state->pktToSend = true; 916 state->pendingPacket = snd_data_pkt; 917 } 918 } else { 919 920 // If split, try to send the second packet too 921 if (split) { 922 assert(snd_data_pkt); 923 924 // Ensure there are enough ports to use. 925 if (usedPorts < cachePorts) { 926 ++usedPorts; 927 if (sendStore(snd_data_pkt)) { 928 storePostSend(snd_data_pkt); 929 } else { 930 DPRINTF(IEW, "D-Cache became blocked when writing" 931 " [sn:%lli] second packet, will retry later\n", 932 inst->seqNum); 933 } 934 } else { 935 936 // Store the packet for when there's free ports. 937 assert(pendingPkt == NULL); 938 pendingPkt = snd_data_pkt; 939 hasPendingPkt = true; 940 } 941 } else { 942 943 // Not a split store. 944 storePostSend(data_pkt); 945 } 946 } 947 } 948 949 // Not sure this should set it to 0. 950 usedPorts = 0; 951 952 assert(stores >= 0 && storesToWB >= 0); 953} 954 955/*template <class Impl> 956void 957LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 958{ 959 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 960 mshrSeqNums.end(), 961 seqNum); 962 963 if (mshr_it != mshrSeqNums.end()) { 964 mshrSeqNums.erase(mshr_it); 965 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 966 } 967}*/ 968 969template <class Impl> 970void 971LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 972{ 973 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 974 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 975 976 int load_idx = loadTail; 977 decrLdIdx(load_idx); 978 979 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 980 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 981 "[sn:%lli]\n", 982 loadQueue[load_idx]->pcState(), 983 loadQueue[load_idx]->seqNum); 984 985 if (isStalled() && load_idx == stallingLoadIdx) { 986 stalled = false; 987 stallingStoreIsn = 0; 988 stallingLoadIdx = 0; 989 } 990 991 // Clear the smart pointer to make sure it is decremented. 992 loadQueue[load_idx]->setSquashed(); 993 loadQueue[load_idx] = NULL; 994 --loads; 995 996 // Inefficient! 997 loadTail = load_idx; 998 999 decrLdIdx(load_idx); 1000 ++lsqSquashedLoads; 1001 } 1002 1003 if (isLoadBlocked) { 1004 if (squashed_num < blockedLoadSeqNum) { 1005 isLoadBlocked = false; 1006 loadBlockedHandled = false; 1007 blockedLoadSeqNum = 0; 1008 } 1009 } 1010 1011 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1012 memDepViolator = NULL; 1013 } 1014 1015 int store_idx = storeTail; 1016 decrStIdx(store_idx); 1017 1018 while (stores != 0 && 1019 storeQueue[store_idx].inst->seqNum > squashed_num) { 1020 // Instructions marked as can WB are already committed. 1021 if (storeQueue[store_idx].canWB) { 1022 break; 1023 } 1024 1025 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1026 "idx:%i [sn:%lli]\n", 1027 storeQueue[store_idx].inst->pcState(), 1028 store_idx, storeQueue[store_idx].inst->seqNum); 1029 1030 // I don't think this can happen. It should have been cleared 1031 // by the stalling load. 1032 if (isStalled() && 1033 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1034 panic("Is stalled should have been cleared by stalling load!\n"); 1035 stalled = false; 1036 stallingStoreIsn = 0; 1037 } 1038 1039 // Clear the smart pointer to make sure it is decremented. 1040 storeQueue[store_idx].inst->setSquashed(); 1041 storeQueue[store_idx].inst = NULL; 1042 storeQueue[store_idx].canWB = 0; 1043 1044 // Must delete request now that it wasn't handed off to 1045 // memory. This is quite ugly. @todo: Figure out the proper 1046 // place to really handle request deletes. 1047 delete storeQueue[store_idx].req; 1048 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1049 delete storeQueue[store_idx].sreqLow; 1050 delete storeQueue[store_idx].sreqHigh; 1051 1052 storeQueue[store_idx].sreqLow = NULL; 1053 storeQueue[store_idx].sreqHigh = NULL; 1054 } 1055 1056 storeQueue[store_idx].req = NULL; 1057 --stores; 1058 1059 // Inefficient! 1060 storeTail = store_idx; 1061 1062 decrStIdx(store_idx); 1063 ++lsqSquashedStores; 1064 } 1065} 1066 1067template <class Impl> 1068void 1069LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1070{ 1071 if (isStalled() && 1072 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1073 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1074 "load idx:%i\n", 1075 stallingStoreIsn, stallingLoadIdx); 1076 stalled = false; 1077 stallingStoreIsn = 0; 1078 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1079 } 1080 1081 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1082 // The store is basically completed at this time. This 1083 // only works so long as the checker doesn't try to 1084 // verify the value in memory for stores. 1085 storeQueue[storeWBIdx].inst->setCompleted();
| 877 completeStore(storeWBIdx); 878 incrStIdx(storeWBIdx); 879 continue; 880 } 881 } else { 882 // Non-store conditionals do not need a writeback. 883 state->noWB = true; 884 } 885 886 bool split = 887 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 888 889 ThreadContext *thread = cpu->tcBase(lsqID); 890 891 if (req->isMmappedIpr()) { 892 assert(!inst->isStoreConditional()); 893 TheISA::handleIprWrite(thread, data_pkt); 894 delete data_pkt; 895 if (split) { 896 assert(snd_data_pkt->req->isMmappedIpr()); 897 TheISA::handleIprWrite(thread, snd_data_pkt); 898 delete snd_data_pkt; 899 delete sreqLow; 900 delete sreqHigh; 901 } 902 delete state; 903 delete req; 904 completeStore(storeWBIdx); 905 incrStIdx(storeWBIdx); 906 } else if (!sendStore(data_pkt)) { 907 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 908 "retry later\n", 909 inst->seqNum); 910 911 // Need to store the second packet, if split. 912 if (split) { 913 state->pktToSend = true; 914 state->pendingPacket = snd_data_pkt; 915 } 916 } else { 917 918 // If split, try to send the second packet too 919 if (split) { 920 assert(snd_data_pkt); 921 922 // Ensure there are enough ports to use. 923 if (usedPorts < cachePorts) { 924 ++usedPorts; 925 if (sendStore(snd_data_pkt)) { 926 storePostSend(snd_data_pkt); 927 } else { 928 DPRINTF(IEW, "D-Cache became blocked when writing" 929 " [sn:%lli] second packet, will retry later\n", 930 inst->seqNum); 931 } 932 } else { 933 934 // Store the packet for when there's free ports. 935 assert(pendingPkt == NULL); 936 pendingPkt = snd_data_pkt; 937 hasPendingPkt = true; 938 } 939 } else { 940 941 // Not a split store. 942 storePostSend(data_pkt); 943 } 944 } 945 } 946 947 // Not sure this should set it to 0. 948 usedPorts = 0; 949 950 assert(stores >= 0 && storesToWB >= 0); 951} 952 953/*template <class Impl> 954void 955LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 956{ 957 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 958 mshrSeqNums.end(), 959 seqNum); 960 961 if (mshr_it != mshrSeqNums.end()) { 962 mshrSeqNums.erase(mshr_it); 963 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 964 } 965}*/ 966 967template <class Impl> 968void 969LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 970{ 971 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 972 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 973 974 int load_idx = loadTail; 975 decrLdIdx(load_idx); 976 977 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 978 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 979 "[sn:%lli]\n", 980 loadQueue[load_idx]->pcState(), 981 loadQueue[load_idx]->seqNum); 982 983 if (isStalled() && load_idx == stallingLoadIdx) { 984 stalled = false; 985 stallingStoreIsn = 0; 986 stallingLoadIdx = 0; 987 } 988 989 // Clear the smart pointer to make sure it is decremented. 990 loadQueue[load_idx]->setSquashed(); 991 loadQueue[load_idx] = NULL; 992 --loads; 993 994 // Inefficient! 995 loadTail = load_idx; 996 997 decrLdIdx(load_idx); 998 ++lsqSquashedLoads; 999 } 1000 1001 if (isLoadBlocked) { 1002 if (squashed_num < blockedLoadSeqNum) { 1003 isLoadBlocked = false; 1004 loadBlockedHandled = false; 1005 blockedLoadSeqNum = 0; 1006 } 1007 } 1008 1009 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1010 memDepViolator = NULL; 1011 } 1012 1013 int store_idx = storeTail; 1014 decrStIdx(store_idx); 1015 1016 while (stores != 0 && 1017 storeQueue[store_idx].inst->seqNum > squashed_num) { 1018 // Instructions marked as can WB are already committed. 1019 if (storeQueue[store_idx].canWB) { 1020 break; 1021 } 1022 1023 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1024 "idx:%i [sn:%lli]\n", 1025 storeQueue[store_idx].inst->pcState(), 1026 store_idx, storeQueue[store_idx].inst->seqNum); 1027 1028 // I don't think this can happen. It should have been cleared 1029 // by the stalling load. 1030 if (isStalled() && 1031 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1032 panic("Is stalled should have been cleared by stalling load!\n"); 1033 stalled = false; 1034 stallingStoreIsn = 0; 1035 } 1036 1037 // Clear the smart pointer to make sure it is decremented. 1038 storeQueue[store_idx].inst->setSquashed(); 1039 storeQueue[store_idx].inst = NULL; 1040 storeQueue[store_idx].canWB = 0; 1041 1042 // Must delete request now that it wasn't handed off to 1043 // memory. This is quite ugly. @todo: Figure out the proper 1044 // place to really handle request deletes. 1045 delete storeQueue[store_idx].req; 1046 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1047 delete storeQueue[store_idx].sreqLow; 1048 delete storeQueue[store_idx].sreqHigh; 1049 1050 storeQueue[store_idx].sreqLow = NULL; 1051 storeQueue[store_idx].sreqHigh = NULL; 1052 } 1053 1054 storeQueue[store_idx].req = NULL; 1055 --stores; 1056 1057 // Inefficient! 1058 storeTail = store_idx; 1059 1060 decrStIdx(store_idx); 1061 ++lsqSquashedStores; 1062 } 1063} 1064 1065template <class Impl> 1066void 1067LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1068{ 1069 if (isStalled() && 1070 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1071 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1072 "load idx:%i\n", 1073 stallingStoreIsn, stallingLoadIdx); 1074 stalled = false; 1075 stallingStoreIsn = 0; 1076 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1077 } 1078 1079 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1080 // The store is basically completed at this time. This 1081 // only works so long as the checker doesn't try to 1082 // verify the value in memory for stores. 1083 storeQueue[storeWBIdx].inst->setCompleted();
|