3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2004-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Kevin Lim 41 * Korey Sewell 42 */ 43 44#include "arch/generic/debugfaults.hh" 45#include "arch/locked_mem.hh" 46#include "base/str.hh" 47#include "config/the_isa.hh" 48#include "cpu/checker/cpu.hh" 49#include "cpu/o3/lsq.hh" 50#include "cpu/o3/lsq_unit.hh" 51#include "debug/Activity.hh" 52#include "debug/IEW.hh" 53#include "debug/LSQUnit.hh" 54#include "mem/packet.hh" 55#include "mem/request.hh" 56 57template<class Impl> 58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 59 LSQUnit *lsq_ptr) 60 : Event(Default_Pri, AutoDelete), 61 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 62{ 63} 64 65template<class Impl> 66void 67LSQUnit<Impl>::WritebackEvent::process() 68{ 69 if (!lsqPtr->isSwitchedOut()) { 70 lsqPtr->writeback(inst, pkt); 71 } 72 73 if (pkt->senderState) 74 delete pkt->senderState; 75 76 delete pkt->req; 77 delete pkt; 78} 79 80template<class Impl> 81const char * 82LSQUnit<Impl>::WritebackEvent::description() const 83{ 84 return "Store writeback"; 85} 86 87template<class Impl> 88void 89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 90{ 91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 92 DynInstPtr inst = state->inst; 93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 95 96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 97 98 // If this is a split access, wait until all packets are received. 99 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 100 delete pkt->req; 101 delete pkt; 102 return; 103 } 104 105 if (isSwitchedOut() || inst->isSquashed()) { 106 iewStage->decrWb(inst->seqNum); 107 } else { 108 if (!state->noWB) { 109 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 110 !state->isLoad) { 111 writeback(inst, pkt); 112 } else { 113 writeback(inst, state->mainPkt); 114 } 115 } 116 117 if (inst->isStore()) { 118 completeStore(state->idx); 119 } 120 } 121 122 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 123 delete state->mainPkt->req; 124 delete state->mainPkt; 125 } 126 delete state; 127 delete pkt->req; 128 delete pkt; 129} 130 131template <class Impl> 132LSQUnit<Impl>::LSQUnit() 133 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 134 isStoreBlocked(false), isLoadBlocked(false), 135 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 136{ 137} 138 139template<class Impl> 140void 141LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 142 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 143 unsigned id) 144{ 145 cpu = cpu_ptr; 146 iewStage = iew_ptr; 147 148 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 149 150 switchedOut = false; 151 152 cacheBlockMask = 0; 153 154 lsq = lsq_ptr; 155 156 lsqID = id; 157 158 // Add 1 for the sentinel entry (they are circular queues). 159 LQEntries = maxLQEntries + 1; 160 SQEntries = maxSQEntries + 1; 161 162 loadQueue.resize(LQEntries); 163 storeQueue.resize(SQEntries); 164 165 depCheckShift = params->LSQDepCheckShift; 166 checkLoads = params->LSQCheckLoads; 167 168 loadHead = loadTail = 0; 169 170 storeHead = storeWBIdx = storeTail = 0; 171 172 usedPorts = 0; 173 cachePorts = params->cachePorts; 174 175 retryPkt = NULL; 176 memDepViolator = NULL; 177 178 blockedLoadSeqNum = 0; 179 needsTSO = params->needsTSO; 180} 181 182template<class Impl> 183std::string 184LSQUnit<Impl>::name() const 185{ 186 if (Impl::MaxThreads == 1) { 187 return iewStage->name() + ".lsq"; 188 } else { 189 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 190 } 191} 192 193template<class Impl> 194void 195LSQUnit<Impl>::regStats() 196{ 197 lsqForwLoads 198 .name(name() + ".forwLoads") 199 .desc("Number of loads that had data forwarded from stores"); 200 201 invAddrLoads 202 .name(name() + ".invAddrLoads") 203 .desc("Number of loads ignored due to an invalid address"); 204 205 lsqSquashedLoads 206 .name(name() + ".squashedLoads") 207 .desc("Number of loads squashed"); 208 209 lsqIgnoredResponses 210 .name(name() + ".ignoredResponses") 211 .desc("Number of memory responses ignored because the instruction is squashed"); 212 213 lsqMemOrderViolation 214 .name(name() + ".memOrderViolation") 215 .desc("Number of memory ordering violations"); 216 217 lsqSquashedStores 218 .name(name() + ".squashedStores") 219 .desc("Number of stores squashed"); 220 221 invAddrSwpfs 222 .name(name() + ".invAddrSwpfs") 223 .desc("Number of software prefetches ignored due to an invalid address"); 224 225 lsqBlockedLoads 226 .name(name() + ".blockedLoads") 227 .desc("Number of blocked loads due to partial load-store forwarding"); 228 229 lsqRescheduledLoads 230 .name(name() + ".rescheduledLoads") 231 .desc("Number of loads that were rescheduled"); 232 233 lsqCacheBlocked 234 .name(name() + ".cacheBlocked") 235 .desc("Number of times an access to memory failed due to the cache being blocked"); 236} 237 238template<class Impl> 239void 240LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 241{ 242 dcachePort = dcache_port; 243} 244 245template<class Impl> 246void 247LSQUnit<Impl>::clearLQ() 248{ 249 loadQueue.clear(); 250} 251 252template<class Impl> 253void 254LSQUnit<Impl>::clearSQ() 255{ 256 storeQueue.clear(); 257} 258 259template<class Impl> 260void 261LSQUnit<Impl>::switchOut() 262{ 263 switchedOut = true; 264 for (int i = 0; i < loadQueue.size(); ++i) { 265 assert(!loadQueue[i]); 266 loadQueue[i] = NULL; 267 } 268 269 assert(storesToWB == 0); 270} 271 272template<class Impl> 273void 274LSQUnit<Impl>::takeOverFrom() 275{ 276 switchedOut = false; 277 loads = stores = storesToWB = 0; 278 279 loadHead = loadTail = 0; 280 281 storeHead = storeWBIdx = storeTail = 0; 282 283 usedPorts = 0; 284 285 memDepViolator = NULL; 286 287 blockedLoadSeqNum = 0; 288 289 stalled = false; 290 isLoadBlocked = false; 291 loadBlockedHandled = false; 292 293 // Just incase the memory system changed out from under us 294 cacheBlockMask = 0; 295} 296 297template<class Impl> 298void 299LSQUnit<Impl>::resizeLQ(unsigned size) 300{ 301 unsigned size_plus_sentinel = size + 1; 302 assert(size_plus_sentinel >= LQEntries); 303 304 if (size_plus_sentinel > LQEntries) { 305 while (size_plus_sentinel > loadQueue.size()) { 306 DynInstPtr dummy; 307 loadQueue.push_back(dummy); 308 LQEntries++; 309 } 310 } else { 311 LQEntries = size_plus_sentinel; 312 } 313 314} 315 316template<class Impl> 317void 318LSQUnit<Impl>::resizeSQ(unsigned size) 319{ 320 unsigned size_plus_sentinel = size + 1; 321 if (size_plus_sentinel > SQEntries) { 322 while (size_plus_sentinel > storeQueue.size()) { 323 SQEntry dummy; 324 storeQueue.push_back(dummy); 325 SQEntries++; 326 } 327 } else { 328 SQEntries = size_plus_sentinel; 329 } 330} 331 332template <class Impl> 333void 334LSQUnit<Impl>::insert(DynInstPtr &inst) 335{ 336 assert(inst->isMemRef()); 337 338 assert(inst->isLoad() || inst->isStore()); 339 340 if (inst->isLoad()) { 341 insertLoad(inst); 342 } else { 343 insertStore(inst); 344 } 345 346 inst->setInLSQ(); 347} 348 349template <class Impl> 350void 351LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 352{ 353 assert((loadTail + 1) % LQEntries != loadHead); 354 assert(loads < LQEntries); 355 356 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 357 load_inst->pcState(), loadTail, load_inst->seqNum); 358 359 load_inst->lqIdx = loadTail; 360 361 if (stores == 0) { 362 load_inst->sqIdx = -1; 363 } else { 364 load_inst->sqIdx = storeTail; 365 } 366 367 loadQueue[loadTail] = load_inst; 368 369 incrLdIdx(loadTail); 370 371 ++loads; 372} 373 374template <class Impl> 375void 376LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 377{ 378 // Make sure it is not full before inserting an instruction. 379 assert((storeTail + 1) % SQEntries != storeHead); 380 assert(stores < SQEntries); 381 382 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 383 store_inst->pcState(), storeTail, store_inst->seqNum); 384 385 store_inst->sqIdx = storeTail; 386 store_inst->lqIdx = loadTail; 387 388 storeQueue[storeTail] = SQEntry(store_inst); 389 390 incrStIdx(storeTail); 391 392 ++stores; 393} 394 395template <class Impl> 396typename Impl::DynInstPtr 397LSQUnit<Impl>::getMemDepViolator() 398{ 399 DynInstPtr temp = memDepViolator; 400 401 memDepViolator = NULL; 402 403 return temp; 404} 405 406template <class Impl> 407unsigned 408LSQUnit<Impl>::numFreeEntries() 409{ 410 unsigned free_lq_entries = LQEntries - loads; 411 unsigned free_sq_entries = SQEntries - stores; 412 413 // Both the LQ and SQ entries have an extra dummy entry to differentiate 414 // empty/full conditions. Subtract 1 from the free entries. 415 if (free_lq_entries < free_sq_entries) { 416 return free_lq_entries - 1; 417 } else { 418 return free_sq_entries - 1; 419 } 420} 421 422template <class Impl> 423void 424LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 425{ 426 int load_idx = loadHead; 427 428 if (!cacheBlockMask) { 429 assert(dcachePort); 430 Addr bs = dcachePort->peerBlockSize(); 431 432 // Make sure we actually got a size 433 assert(bs != 0); 434 435 cacheBlockMask = ~(bs - 1); 436 } 437
|
450 // If this is the only load in the LSQ we don't care 451 if (load_idx == loadTail) 452 return; 453 incrLdIdx(load_idx); 454 455 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 456 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 457 while (load_idx != loadTail) { 458 DynInstPtr ld_inst = loadQueue[load_idx]; 459 460 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 461 incrLdIdx(load_idx); 462 continue; 463 } 464 465 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 466 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 467 ld_inst->seqNum, load_addr, invalidate_addr); 468 469 if (load_addr == invalidate_addr) { 470 if (ld_inst->possibleLoadViolation()) { 471 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 472 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); 473 474 // Mark the load for re-execution 475 ld_inst->fault = new ReExec; 476 } else { 477 // If a older load checks this and it's true 478 // then we might have missed the snoop 479 // in which case we need to invalidate to be sure 480 ld_inst->hitExternalSnoop(true); 481 } 482 } 483 incrLdIdx(load_idx); 484 } 485 return; 486} 487 488template <class Impl> 489Fault 490LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 491{ 492 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 493 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 494 495 /** @todo in theory you only need to check an instruction that has executed 496 * however, there isn't a good way in the pipeline at the moment to check 497 * all instructions that will execute before the store writes back. Thus, 498 * like the implementation that came before it, we're overly conservative. 499 */ 500 while (load_idx != loadTail) { 501 DynInstPtr ld_inst = loadQueue[load_idx]; 502 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 503 incrLdIdx(load_idx); 504 continue; 505 } 506 507 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 508 Addr ld_eff_addr2 = 509 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 510 511 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 512 if (inst->isLoad()) { 513 // If this load is to the same block as an external snoop 514 // invalidate that we've observed then the load needs to be 515 // squashed as it could have newer data 516 if (ld_inst->hitExternalSnoop()) { 517 if (!memDepViolator || 518 ld_inst->seqNum < memDepViolator->seqNum) { 519 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 520 "and [sn:%lli] at address %#x\n", 521 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 522 memDepViolator = ld_inst; 523 524 ++lsqMemOrderViolation; 525 526 return new GenericISA::M5PanicFault( 527 "Detected fault with inst [sn:%lli] and " 528 "[sn:%lli] at address %#x\n", 529 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 530 } 531 } 532 533 // Otherwise, mark the load has a possible load violation 534 // and if we see a snoop before it's commited, we need to squash 535 ld_inst->possibleLoadViolation(true); 536 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 537 " between instructions [sn:%lli] and [sn:%lli]\n", 538 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 539 } else { 540 // A load/store incorrectly passed this store. 541 // Check if we already have a violator, or if it's newer 542 // squash and refetch. 543 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 544 break; 545 546 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 547 "[sn:%lli] at address %#x\n", 548 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 549 memDepViolator = ld_inst; 550 551 ++lsqMemOrderViolation; 552 553 return new GenericISA::M5PanicFault("Detected fault with " 554 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 555 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 556 } 557 } 558 559 incrLdIdx(load_idx); 560 } 561 return NoFault; 562} 563 564 565 566 567template <class Impl> 568Fault 569LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 570{ 571 using namespace TheISA; 572 // Execute a specific load. 573 Fault load_fault = NoFault; 574 575 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 576 inst->pcState(), inst->seqNum); 577 578 assert(!inst->isSquashed()); 579 580 load_fault = inst->initiateAcc(); 581 582 if (inst->isTranslationDelayed() && 583 load_fault == NoFault) 584 return load_fault; 585 586 // If the instruction faulted or predicated false, then we need to send it 587 // along to commit without the instruction completing. 588 if (load_fault != NoFault || inst->readPredicate() == false) { 589 // Send this instruction to commit, also make sure iew stage 590 // realizes there is activity. 591 // Mark it as executed unless it is an uncached load that 592 // needs to hit the head of commit. 593 if (inst->readPredicate() == false) 594 inst->forwardOldRegs(); 595 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 596 inst->seqNum, 597 (load_fault != NoFault ? "fault" : "predication")); 598 if (!(inst->hasRequest() && inst->uncacheable()) || 599 inst->isAtCommit()) { 600 inst->setExecuted(); 601 } 602 iewStage->instToCommit(inst); 603 iewStage->activityThisCycle(); 604 } else if (!loadBlocked()) { 605 assert(inst->effAddrValid()); 606 int load_idx = inst->lqIdx; 607 incrLdIdx(load_idx); 608 609 if (checkLoads) 610 return checkViolations(load_idx, inst); 611 } 612 613 return load_fault; 614} 615 616template <class Impl> 617Fault 618LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 619{ 620 using namespace TheISA; 621 // Make sure that a store exists. 622 assert(stores != 0); 623 624 int store_idx = store_inst->sqIdx; 625 626 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 627 store_inst->pcState(), store_inst->seqNum); 628 629 assert(!store_inst->isSquashed()); 630 631 // Check the recently completed loads to see if any match this store's 632 // address. If so, then we have a memory ordering violation. 633 int load_idx = store_inst->lqIdx; 634 635 Fault store_fault = store_inst->initiateAcc(); 636 637 if (store_inst->isTranslationDelayed() && 638 store_fault == NoFault) 639 return store_fault; 640 641 if (store_inst->readPredicate() == false) 642 store_inst->forwardOldRegs(); 643 644 if (storeQueue[store_idx].size == 0) { 645 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 646 store_inst->pcState(), store_inst->seqNum); 647 648 return store_fault; 649 } else if (store_inst->readPredicate() == false) { 650 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 651 store_inst->seqNum); 652 return store_fault; 653 } 654 655 assert(store_fault == NoFault); 656 657 if (store_inst->isStoreConditional()) { 658 // Store conditionals need to set themselves as able to 659 // writeback if we haven't had a fault by here. 660 storeQueue[store_idx].canWB = true; 661 662 ++storesToWB; 663 } 664 665 return checkViolations(load_idx, store_inst); 666 667} 668 669template <class Impl> 670void 671LSQUnit<Impl>::commitLoad() 672{ 673 assert(loadQueue[loadHead]); 674 675 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 676 loadQueue[loadHead]->pcState()); 677 678 loadQueue[loadHead] = NULL; 679 680 incrLdIdx(loadHead); 681 682 --loads; 683} 684 685template <class Impl> 686void 687LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 688{ 689 assert(loads == 0 || loadQueue[loadHead]); 690 691 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 692 commitLoad(); 693 } 694} 695 696template <class Impl> 697void 698LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 699{ 700 assert(stores == 0 || storeQueue[storeHead].inst); 701 702 int store_idx = storeHead; 703 704 while (store_idx != storeTail) { 705 assert(storeQueue[store_idx].inst); 706 // Mark any stores that are now committed and have not yet 707 // been marked as able to write back. 708 if (!storeQueue[store_idx].canWB) { 709 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 710 break; 711 } 712 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 713 "%s [sn:%lli]\n", 714 storeQueue[store_idx].inst->pcState(), 715 storeQueue[store_idx].inst->seqNum); 716 717 storeQueue[store_idx].canWB = true; 718 719 ++storesToWB; 720 } 721 722 incrStIdx(store_idx); 723 } 724} 725 726template <class Impl> 727void 728LSQUnit<Impl>::writebackPendingStore() 729{ 730 if (hasPendingPkt) { 731 assert(pendingPkt != NULL); 732 733 // If the cache is blocked, this will store the packet for retry. 734 if (sendStore(pendingPkt)) { 735 storePostSend(pendingPkt); 736 } 737 pendingPkt = NULL; 738 hasPendingPkt = false; 739 } 740} 741 742template <class Impl> 743void 744LSQUnit<Impl>::writebackStores() 745{ 746 // First writeback the second packet from any split store that didn't 747 // complete last cycle because there weren't enough cache ports available. 748 if (TheISA::HasUnalignedMemAcc) { 749 writebackPendingStore(); 750 } 751 752 while (storesToWB > 0 && 753 storeWBIdx != storeTail && 754 storeQueue[storeWBIdx].inst && 755 storeQueue[storeWBIdx].canWB && 756 ((!needsTSO) || (!storeInFlight)) && 757 usedPorts < cachePorts) { 758 759 if (isStoreBlocked || lsq->cacheBlocked()) { 760 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 761 " is blocked!\n"); 762 break; 763 } 764 765 // Store didn't write any data so no need to write it back to 766 // memory. 767 if (storeQueue[storeWBIdx].size == 0) { 768 completeStore(storeWBIdx); 769 770 incrStIdx(storeWBIdx); 771 772 continue; 773 } 774 775 ++usedPorts; 776 777 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 778 incrStIdx(storeWBIdx); 779 780 continue; 781 } 782 783 assert(storeQueue[storeWBIdx].req); 784 assert(!storeQueue[storeWBIdx].committed); 785 786 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 787 assert(storeQueue[storeWBIdx].sreqLow); 788 assert(storeQueue[storeWBIdx].sreqHigh); 789 } 790 791 DynInstPtr inst = storeQueue[storeWBIdx].inst; 792 793 Request *req = storeQueue[storeWBIdx].req; 794 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 795 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 796 797 storeQueue[storeWBIdx].committed = true; 798 799 assert(!inst->memData); 800 inst->memData = new uint8_t[64]; 801 802 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 803 804 MemCmd command = 805 req->isSwap() ? MemCmd::SwapReq : 806 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 807 PacketPtr data_pkt; 808 PacketPtr snd_data_pkt = NULL; 809 810 LSQSenderState *state = new LSQSenderState; 811 state->isLoad = false; 812 state->idx = storeWBIdx; 813 state->inst = inst; 814 815 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 816 817 // Build a single data packet if the store isn't split. 818 data_pkt = new Packet(req, command); 819 data_pkt->dataStatic(inst->memData); 820 data_pkt->senderState = state; 821 } else { 822 // Create two packets if the store is split in two. 823 data_pkt = new Packet(sreqLow, command); 824 snd_data_pkt = new Packet(sreqHigh, command); 825 826 data_pkt->dataStatic(inst->memData); 827 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 828 829 data_pkt->senderState = state; 830 snd_data_pkt->senderState = state; 831 832 state->isSplit = true; 833 state->outstanding = 2; 834 835 // Can delete the main request now. 836 delete req; 837 req = sreqLow; 838 } 839 840 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 841 "to Addr:%#x, data:%#x [sn:%lli]\n", 842 storeWBIdx, inst->pcState(), 843 req->getPaddr(), (int)*(inst->memData), 844 inst->seqNum); 845 846 // @todo: Remove this SC hack once the memory system handles it. 847 if (inst->isStoreConditional()) { 848 assert(!storeQueue[storeWBIdx].isSplit); 849 // Disable recording the result temporarily. Writing to 850 // misc regs normally updates the result, but this is not 851 // the desired behavior when handling store conditionals. 852 inst->recordResult(false); 853 bool success = TheISA::handleLockedWrite(inst.get(), req); 854 inst->recordResult(true); 855 856 if (!success) { 857 // Instantly complete this store. 858 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 859 "Instantly completing it.\n", 860 inst->seqNum); 861 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 862 cpu->schedule(wb, curTick() + 1); 863 if (cpu->checker) { 864 // Make sure to set the LLSC data for verification 865 // if checker is loaded 866 inst->reqToVerify->setExtraData(0); 867 inst->completeAcc(data_pkt); 868 } 869 completeStore(storeWBIdx); 870 incrStIdx(storeWBIdx); 871 continue; 872 } 873 } else { 874 // Non-store conditionals do not need a writeback. 875 state->noWB = true; 876 } 877 878 bool split = 879 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 880 881 ThreadContext *thread = cpu->tcBase(lsqID); 882 883 if (req->isMmappedIpr()) { 884 assert(!inst->isStoreConditional()); 885 TheISA::handleIprWrite(thread, data_pkt); 886 delete data_pkt; 887 if (split) { 888 assert(snd_data_pkt->req->isMmappedIpr()); 889 TheISA::handleIprWrite(thread, snd_data_pkt); 890 delete snd_data_pkt; 891 delete sreqLow; 892 delete sreqHigh; 893 } 894 delete state; 895 delete req; 896 completeStore(storeWBIdx); 897 incrStIdx(storeWBIdx); 898 } else if (!sendStore(data_pkt)) { 899 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 900 "retry later\n", 901 inst->seqNum); 902 903 // Need to store the second packet, if split. 904 if (split) { 905 state->pktToSend = true; 906 state->pendingPacket = snd_data_pkt; 907 } 908 } else { 909 910 // If split, try to send the second packet too 911 if (split) { 912 assert(snd_data_pkt); 913 914 // Ensure there are enough ports to use. 915 if (usedPorts < cachePorts) { 916 ++usedPorts; 917 if (sendStore(snd_data_pkt)) { 918 storePostSend(snd_data_pkt); 919 } else { 920 DPRINTF(IEW, "D-Cache became blocked when writing" 921 " [sn:%lli] second packet, will retry later\n", 922 inst->seqNum); 923 } 924 } else { 925 926 // Store the packet for when there's free ports. 927 assert(pendingPkt == NULL); 928 pendingPkt = snd_data_pkt; 929 hasPendingPkt = true; 930 } 931 } else { 932 933 // Not a split store. 934 storePostSend(data_pkt); 935 } 936 } 937 } 938 939 // Not sure this should set it to 0. 940 usedPorts = 0; 941 942 assert(stores >= 0 && storesToWB >= 0); 943} 944 945/*template <class Impl> 946void 947LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 948{ 949 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 950 mshrSeqNums.end(), 951 seqNum); 952 953 if (mshr_it != mshrSeqNums.end()) { 954 mshrSeqNums.erase(mshr_it); 955 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 956 } 957}*/ 958 959template <class Impl> 960void 961LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 962{ 963 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 964 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 965 966 int load_idx = loadTail; 967 decrLdIdx(load_idx); 968 969 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 970 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 971 "[sn:%lli]\n", 972 loadQueue[load_idx]->pcState(), 973 loadQueue[load_idx]->seqNum); 974 975 if (isStalled() && load_idx == stallingLoadIdx) { 976 stalled = false; 977 stallingStoreIsn = 0; 978 stallingLoadIdx = 0; 979 } 980 981 // Clear the smart pointer to make sure it is decremented. 982 loadQueue[load_idx]->setSquashed(); 983 loadQueue[load_idx] = NULL; 984 --loads; 985 986 // Inefficient! 987 loadTail = load_idx; 988 989 decrLdIdx(load_idx); 990 ++lsqSquashedLoads; 991 } 992 993 if (isLoadBlocked) { 994 if (squashed_num < blockedLoadSeqNum) { 995 isLoadBlocked = false; 996 loadBlockedHandled = false; 997 blockedLoadSeqNum = 0; 998 } 999 } 1000 1001 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1002 memDepViolator = NULL; 1003 } 1004 1005 int store_idx = storeTail; 1006 decrStIdx(store_idx); 1007 1008 while (stores != 0 && 1009 storeQueue[store_idx].inst->seqNum > squashed_num) { 1010 // Instructions marked as can WB are already committed. 1011 if (storeQueue[store_idx].canWB) { 1012 break; 1013 } 1014 1015 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1016 "idx:%i [sn:%lli]\n", 1017 storeQueue[store_idx].inst->pcState(), 1018 store_idx, storeQueue[store_idx].inst->seqNum); 1019 1020 // I don't think this can happen. It should have been cleared 1021 // by the stalling load. 1022 if (isStalled() && 1023 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1024 panic("Is stalled should have been cleared by stalling load!\n"); 1025 stalled = false; 1026 stallingStoreIsn = 0; 1027 } 1028 1029 // Clear the smart pointer to make sure it is decremented. 1030 storeQueue[store_idx].inst->setSquashed(); 1031 storeQueue[store_idx].inst = NULL; 1032 storeQueue[store_idx].canWB = 0; 1033 1034 // Must delete request now that it wasn't handed off to 1035 // memory. This is quite ugly. @todo: Figure out the proper 1036 // place to really handle request deletes. 1037 delete storeQueue[store_idx].req; 1038 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1039 delete storeQueue[store_idx].sreqLow; 1040 delete storeQueue[store_idx].sreqHigh; 1041 1042 storeQueue[store_idx].sreqLow = NULL; 1043 storeQueue[store_idx].sreqHigh = NULL; 1044 } 1045 1046 storeQueue[store_idx].req = NULL; 1047 --stores; 1048 1049 // Inefficient! 1050 storeTail = store_idx; 1051 1052 decrStIdx(store_idx); 1053 ++lsqSquashedStores; 1054 } 1055} 1056 1057template <class Impl> 1058void 1059LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1060{ 1061 if (isStalled() && 1062 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1063 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1064 "load idx:%i\n", 1065 stallingStoreIsn, stallingLoadIdx); 1066 stalled = false; 1067 stallingStoreIsn = 0; 1068 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1069 } 1070 1071 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1072 // The store is basically completed at this time. This 1073 // only works so long as the checker doesn't try to 1074 // verify the value in memory for stores. 1075 storeQueue[storeWBIdx].inst->setCompleted(); 1076 1077 if (cpu->checker) { 1078 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1079 } 1080 } 1081 1082 if (needsTSO) { 1083 storeInFlight = true; 1084 } 1085 1086 incrStIdx(storeWBIdx); 1087} 1088 1089template <class Impl> 1090void 1091LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1092{ 1093 iewStage->wakeCPU(); 1094 1095 // Squashed instructions do not need to complete their access. 1096 if (inst->isSquashed()) { 1097 iewStage->decrWb(inst->seqNum); 1098 assert(!inst->isStore()); 1099 ++lsqIgnoredResponses; 1100 return; 1101 } 1102 1103 if (!inst->isExecuted()) { 1104 inst->setExecuted(); 1105 1106 // Complete access to copy data to proper place. 1107 inst->completeAcc(pkt); 1108 } 1109 1110 // Need to insert instruction into queue to commit 1111 iewStage->instToCommit(inst); 1112 1113 iewStage->activityThisCycle(); 1114 1115 // see if this load changed the PC 1116 iewStage->checkMisprediction(inst); 1117} 1118 1119template <class Impl> 1120void 1121LSQUnit<Impl>::completeStore(int store_idx) 1122{ 1123 assert(storeQueue[store_idx].inst); 1124 storeQueue[store_idx].completed = true; 1125 --storesToWB; 1126 // A bit conservative because a store completion may not free up entries, 1127 // but hopefully avoids two store completions in one cycle from making 1128 // the CPU tick twice. 1129 cpu->wakeCPU(); 1130 cpu->activityThisCycle(); 1131 1132 if (store_idx == storeHead) { 1133 do { 1134 incrStIdx(storeHead); 1135 1136 --stores; 1137 } while (storeQueue[storeHead].completed && 1138 storeHead != storeTail); 1139 1140 iewStage->updateLSQNextCycle = true; 1141 } 1142 1143 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1144 "idx:%i\n", 1145 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1146 1147 if (isStalled() && 1148 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1149 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1150 "load idx:%i\n", 1151 stallingStoreIsn, stallingLoadIdx); 1152 stalled = false; 1153 stallingStoreIsn = 0; 1154 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1155 } 1156 1157 storeQueue[store_idx].inst->setCompleted(); 1158 1159 if (needsTSO) { 1160 storeInFlight = false; 1161 } 1162 1163 // Tell the checker we've completed this instruction. Some stores 1164 // may get reported twice to the checker, but the checker can 1165 // handle that case. 1166 if (cpu->checker) { 1167 cpu->checker->verify(storeQueue[store_idx].inst); 1168 } 1169} 1170 1171template <class Impl> 1172bool 1173LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1174{ 1175 if (!dcachePort->sendTimingReq(data_pkt)) { 1176 // Need to handle becoming blocked on a store. 1177 isStoreBlocked = true; 1178 ++lsqCacheBlocked; 1179 assert(retryPkt == NULL); 1180 retryPkt = data_pkt; 1181 lsq->setRetryTid(lsqID); 1182 return false; 1183 } 1184 return true; 1185} 1186 1187template <class Impl> 1188void 1189LSQUnit<Impl>::recvRetry() 1190{ 1191 if (isStoreBlocked) { 1192 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1193 assert(retryPkt != NULL); 1194 1195 LSQSenderState *state = 1196 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1197 1198 if (dcachePort->sendTimingReq(retryPkt)) { 1199 // Don't finish the store unless this is the last packet. 1200 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 1201 state->pendingPacket == retryPkt) { 1202 state->pktToSend = false; 1203 storePostSend(retryPkt); 1204 } 1205 retryPkt = NULL; 1206 isStoreBlocked = false; 1207 lsq->setRetryTid(InvalidThreadID); 1208 1209 // Send any outstanding packet. 1210 if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 1211 assert(state->pendingPacket); 1212 if (sendStore(state->pendingPacket)) { 1213 storePostSend(state->pendingPacket); 1214 } 1215 } 1216 } else { 1217 // Still blocked! 1218 ++lsqCacheBlocked; 1219 lsq->setRetryTid(lsqID); 1220 } 1221 } else if (isLoadBlocked) { 1222 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 1223 "no need to resend packet.\n"); 1224 } else { 1225 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 1226 } 1227} 1228 1229template <class Impl> 1230inline void 1231LSQUnit<Impl>::incrStIdx(int &store_idx) 1232{ 1233 if (++store_idx >= SQEntries) 1234 store_idx = 0; 1235} 1236 1237template <class Impl> 1238inline void 1239LSQUnit<Impl>::decrStIdx(int &store_idx) 1240{ 1241 if (--store_idx < 0) 1242 store_idx += SQEntries; 1243} 1244 1245template <class Impl> 1246inline void 1247LSQUnit<Impl>::incrLdIdx(int &load_idx) 1248{ 1249 if (++load_idx >= LQEntries) 1250 load_idx = 0; 1251} 1252 1253template <class Impl> 1254inline void 1255LSQUnit<Impl>::decrLdIdx(int &load_idx) 1256{ 1257 if (--load_idx < 0) 1258 load_idx += LQEntries; 1259} 1260 1261template <class Impl> 1262void 1263LSQUnit<Impl>::dumpInsts() 1264{ 1265 cprintf("Load store queue: Dumping instructions.\n"); 1266 cprintf("Load queue size: %i\n", loads); 1267 cprintf("Load queue: "); 1268 1269 int load_idx = loadHead; 1270 1271 while (load_idx != loadTail && loadQueue[load_idx]) { 1272 cprintf("%s ", loadQueue[load_idx]->pcState()); 1273 1274 incrLdIdx(load_idx); 1275 } 1276 1277 cprintf("Store queue size: %i\n", stores); 1278 cprintf("Store queue: "); 1279 1280 int store_idx = storeHead; 1281 1282 while (store_idx != storeTail && storeQueue[store_idx].inst) { 1283 cprintf("%s ", storeQueue[store_idx].inst->pcState()); 1284 1285 incrStIdx(store_idx); 1286 } 1287 1288 cprintf("\n"); 1289}
|