lsq_unit_impl.hh revision 9527
1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2004-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Kevin Lim 41 * Korey Sewell 42 */ 43 44#include "arch/generic/debugfaults.hh" 45#include "arch/locked_mem.hh" 46#include "base/str.hh" 47#include "config/the_isa.hh" 48#include "cpu/checker/cpu.hh" 49#include "cpu/o3/lsq.hh" 50#include "cpu/o3/lsq_unit.hh" 51#include "debug/Activity.hh" 52#include "debug/IEW.hh" 53#include "debug/LSQUnit.hh" 54#include "debug/O3PipeView.hh" 55#include "mem/packet.hh" 56#include "mem/request.hh" 57 58template<class Impl> 59LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 60 LSQUnit *lsq_ptr) 61 : Event(Default_Pri, AutoDelete), 62 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 63{ 64} 65 66template<class Impl> 67void 68LSQUnit<Impl>::WritebackEvent::process() 69{ 70 assert(!lsqPtr->cpu->switchedOut()); 71 72 lsqPtr->writeback(inst, pkt); 73 74 if (pkt->senderState) 75 delete pkt->senderState; 76 77 delete pkt->req; 78 delete pkt; 79} 80 81template<class Impl> 82const char * 83LSQUnit<Impl>::WritebackEvent::description() const 84{ 85 return "Store writeback"; 86} 87 88template<class Impl> 89void 90LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 91{ 92 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 93 DynInstPtr inst = state->inst; 94 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 95 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 96 97 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 98 99 // If this is a split access, wait until all packets are received. 100 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 101 delete pkt->req; 102 delete pkt; 103 return; 104 } 105 106 assert(!cpu->switchedOut()); 107 if (inst->isSquashed()) { 108 iewStage->decrWb(inst->seqNum); 109 } else { 110 if (!state->noWB) { 111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 112 !state->isLoad) { 113 writeback(inst, pkt); 114 } else { 115 writeback(inst, state->mainPkt); 116 } 117 } 118 119 if (inst->isStore()) { 120 completeStore(state->idx); 121 } 122 } 123 124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 125 delete state->mainPkt->req; 126 delete state->mainPkt; 127 } 128 delete state; 129 delete pkt->req; 130 delete pkt; 131} 132 133template <class Impl> 134LSQUnit<Impl>::LSQUnit() 135 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 136 isStoreBlocked(false), isLoadBlocked(false), 137 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 138{ 139} 140 141template<class Impl> 142void 143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 145 unsigned id) 146{ 147 cpu = cpu_ptr; 148 iewStage = iew_ptr; 149 150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 151 152 lsq = lsq_ptr; 153 154 lsqID = id; 155 156 // Add 1 for the sentinel entry (they are circular queues). 157 LQEntries = maxLQEntries + 1; 158 SQEntries = maxSQEntries + 1; 159 160 loadQueue.resize(LQEntries); 161 storeQueue.resize(SQEntries); 162 163 depCheckShift = params->LSQDepCheckShift; 164 checkLoads = params->LSQCheckLoads; 165 cachePorts = params->cachePorts; 166 needsTSO = params->needsTSO; 167 168 resetState(); 169} 170 171 172template<class Impl> 173void 174LSQUnit<Impl>::resetState() 175{ 176 loads = stores = storesToWB = 0; 177 178 loadHead = loadTail = 0; 179 180 storeHead = storeWBIdx = storeTail = 0; 181 182 usedPorts = 0; 183 184 retryPkt = NULL; 185 memDepViolator = NULL; 186 187 blockedLoadSeqNum = 0; 188 189 stalled = false; 190 isLoadBlocked = false; 191 loadBlockedHandled = false; 192 193 cacheBlockMask = 0; 194} 195 196template<class Impl> 197std::string 198LSQUnit<Impl>::name() const 199{ 200 if (Impl::MaxThreads == 1) { 201 return iewStage->name() + ".lsq"; 202 } else { 203 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 204 } 205} 206 207template<class Impl> 208void 209LSQUnit<Impl>::regStats() 210{ 211 lsqForwLoads 212 .name(name() + ".forwLoads") 213 .desc("Number of loads that had data forwarded from stores"); 214 215 invAddrLoads 216 .name(name() + ".invAddrLoads") 217 .desc("Number of loads ignored due to an invalid address"); 218 219 lsqSquashedLoads 220 .name(name() + ".squashedLoads") 221 .desc("Number of loads squashed"); 222 223 lsqIgnoredResponses 224 .name(name() + ".ignoredResponses") 225 .desc("Number of memory responses ignored because the instruction is squashed"); 226 227 lsqMemOrderViolation 228 .name(name() + ".memOrderViolation") 229 .desc("Number of memory ordering violations"); 230 231 lsqSquashedStores 232 .name(name() + ".squashedStores") 233 .desc("Number of stores squashed"); 234 235 invAddrSwpfs 236 .name(name() + ".invAddrSwpfs") 237 .desc("Number of software prefetches ignored due to an invalid address"); 238 239 lsqBlockedLoads 240 .name(name() + ".blockedLoads") 241 .desc("Number of blocked loads due to partial load-store forwarding"); 242 243 lsqRescheduledLoads 244 .name(name() + ".rescheduledLoads") 245 .desc("Number of loads that were rescheduled"); 246 247 lsqCacheBlocked 248 .name(name() + ".cacheBlocked") 249 .desc("Number of times an access to memory failed due to the cache being blocked"); 250} 251 252template<class Impl> 253void 254LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 255{ 256 dcachePort = dcache_port; 257} 258 259template<class Impl> 260void 261LSQUnit<Impl>::clearLQ() 262{ 263 loadQueue.clear(); 264} 265 266template<class Impl> 267void 268LSQUnit<Impl>::clearSQ() 269{ 270 storeQueue.clear(); 271} 272 273template<class Impl> 274void 275LSQUnit<Impl>::drainSanityCheck() const 276{ 277 for (int i = 0; i < loadQueue.size(); ++i) 278 assert(!loadQueue[i]); 279 280 assert(storesToWB == 0); 281 assert(!retryPkt); 282} 283 284template<class Impl> 285void 286LSQUnit<Impl>::takeOverFrom() 287{ 288 resetState(); 289} 290 291template<class Impl> 292void 293LSQUnit<Impl>::resizeLQ(unsigned size) 294{ 295 unsigned size_plus_sentinel = size + 1; 296 assert(size_plus_sentinel >= LQEntries); 297 298 if (size_plus_sentinel > LQEntries) { 299 while (size_plus_sentinel > loadQueue.size()) { 300 DynInstPtr dummy; 301 loadQueue.push_back(dummy); 302 LQEntries++; 303 } 304 } else { 305 LQEntries = size_plus_sentinel; 306 } 307 308} 309 310template<class Impl> 311void 312LSQUnit<Impl>::resizeSQ(unsigned size) 313{ 314 unsigned size_plus_sentinel = size + 1; 315 if (size_plus_sentinel > SQEntries) { 316 while (size_plus_sentinel > storeQueue.size()) { 317 SQEntry dummy; 318 storeQueue.push_back(dummy); 319 SQEntries++; 320 } 321 } else { 322 SQEntries = size_plus_sentinel; 323 } 324} 325 326template <class Impl> 327void 328LSQUnit<Impl>::insert(DynInstPtr &inst) 329{ 330 assert(inst->isMemRef()); 331 332 assert(inst->isLoad() || inst->isStore()); 333 334 if (inst->isLoad()) { 335 insertLoad(inst); 336 } else { 337 insertStore(inst); 338 } 339 340 inst->setInLSQ(); 341} 342 343template <class Impl> 344void 345LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 346{ 347 assert((loadTail + 1) % LQEntries != loadHead); 348 assert(loads < LQEntries); 349 350 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 351 load_inst->pcState(), loadTail, load_inst->seqNum); 352 353 load_inst->lqIdx = loadTail; 354 355 if (stores == 0) { 356 load_inst->sqIdx = -1; 357 } else { 358 load_inst->sqIdx = storeTail; 359 } 360 361 loadQueue[loadTail] = load_inst; 362 363 incrLdIdx(loadTail); 364 365 ++loads; 366} 367 368template <class Impl> 369void 370LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 371{ 372 // Make sure it is not full before inserting an instruction. 373 assert((storeTail + 1) % SQEntries != storeHead); 374 assert(stores < SQEntries); 375 376 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 377 store_inst->pcState(), storeTail, store_inst->seqNum); 378 379 store_inst->sqIdx = storeTail; 380 store_inst->lqIdx = loadTail; 381 382 storeQueue[storeTail] = SQEntry(store_inst); 383 384 incrStIdx(storeTail); 385 386 ++stores; 387} 388 389template <class Impl> 390typename Impl::DynInstPtr 391LSQUnit<Impl>::getMemDepViolator() 392{ 393 DynInstPtr temp = memDepViolator; 394 395 memDepViolator = NULL; 396 397 return temp; 398} 399 400template <class Impl> 401unsigned 402LSQUnit<Impl>::numFreeEntries() 403{ 404 unsigned free_lq_entries = LQEntries - loads; 405 unsigned free_sq_entries = SQEntries - stores; 406 407 // Both the LQ and SQ entries have an extra dummy entry to differentiate 408 // empty/full conditions. Subtract 1 from the free entries. 409 if (free_lq_entries < free_sq_entries) { 410 return free_lq_entries - 1; 411 } else { 412 return free_sq_entries - 1; 413 } 414} 415 416template <class Impl> 417void 418LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 419{ 420 int load_idx = loadHead; 421 422 if (!cacheBlockMask) { 423 assert(dcachePort); 424 Addr bs = dcachePort->peerBlockSize(); 425 426 // Make sure we actually got a size 427 assert(bs != 0); 428 429 cacheBlockMask = ~(bs - 1); 430 } 431 432 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 433 // address. The CPU can speculatively execute a LL operation after a pending 434 // SC operation in the pipeline and that can make the cache monitor the CPU 435 // is connected to valid while it really shouldn't be. 436 for (int x = 0; x < cpu->numActiveThreads(); x++) { 437 ThreadContext *tc = cpu->getContext(x); 438 bool no_squash = cpu->thread[x]->noSquashFromTC; 439 cpu->thread[x]->noSquashFromTC = true; 440 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 441 cpu->thread[x]->noSquashFromTC = no_squash; 442 } 443 444 // If this is the only load in the LSQ we don't care 445 if (load_idx == loadTail) 446 return; 447 incrLdIdx(load_idx); 448 449 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 450 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 451 while (load_idx != loadTail) { 452 DynInstPtr ld_inst = loadQueue[load_idx]; 453 454 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 455 incrLdIdx(load_idx); 456 continue; 457 } 458 459 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 460 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 461 ld_inst->seqNum, load_addr, invalidate_addr); 462 463 if (load_addr == invalidate_addr) { 464 if (ld_inst->possibleLoadViolation()) { 465 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 466 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); 467 468 // Mark the load for re-execution 469 ld_inst->fault = new ReExec; 470 } else { 471 // If a older load checks this and it's true 472 // then we might have missed the snoop 473 // in which case we need to invalidate to be sure 474 ld_inst->hitExternalSnoop(true); 475 } 476 } 477 incrLdIdx(load_idx); 478 } 479 return; 480} 481 482template <class Impl> 483Fault 484LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 485{ 486 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 487 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 488 489 /** @todo in theory you only need to check an instruction that has executed 490 * however, there isn't a good way in the pipeline at the moment to check 491 * all instructions that will execute before the store writes back. Thus, 492 * like the implementation that came before it, we're overly conservative. 493 */ 494 while (load_idx != loadTail) { 495 DynInstPtr ld_inst = loadQueue[load_idx]; 496 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 497 incrLdIdx(load_idx); 498 continue; 499 } 500 501 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 502 Addr ld_eff_addr2 = 503 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 504 505 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 506 if (inst->isLoad()) { 507 // If this load is to the same block as an external snoop 508 // invalidate that we've observed then the load needs to be 509 // squashed as it could have newer data 510 if (ld_inst->hitExternalSnoop()) { 511 if (!memDepViolator || 512 ld_inst->seqNum < memDepViolator->seqNum) { 513 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 514 "and [sn:%lli] at address %#x\n", 515 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 516 memDepViolator = ld_inst; 517 518 ++lsqMemOrderViolation; 519 520 return new GenericISA::M5PanicFault( 521 "Detected fault with inst [sn:%lli] and " 522 "[sn:%lli] at address %#x\n", 523 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 524 } 525 } 526 527 // Otherwise, mark the load has a possible load violation 528 // and if we see a snoop before it's commited, we need to squash 529 ld_inst->possibleLoadViolation(true); 530 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 531 " between instructions [sn:%lli] and [sn:%lli]\n", 532 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 533 } else { 534 // A load/store incorrectly passed this store. 535 // Check if we already have a violator, or if it's newer 536 // squash and refetch. 537 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 538 break; 539 540 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 541 "[sn:%lli] at address %#x\n", 542 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 543 memDepViolator = ld_inst; 544 545 ++lsqMemOrderViolation; 546 547 return new GenericISA::M5PanicFault("Detected fault with " 548 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 549 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 550 } 551 } 552 553 incrLdIdx(load_idx); 554 } 555 return NoFault; 556} 557 558 559 560 561template <class Impl> 562Fault 563LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 564{ 565 using namespace TheISA; 566 // Execute a specific load. 567 Fault load_fault = NoFault; 568 569 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 570 inst->pcState(), inst->seqNum); 571 572 assert(!inst->isSquashed()); 573 574 load_fault = inst->initiateAcc(); 575 576 if (inst->isTranslationDelayed() && 577 load_fault == NoFault) 578 return load_fault; 579 580 // If the instruction faulted or predicated false, then we need to send it 581 // along to commit without the instruction completing. 582 if (load_fault != NoFault || inst->readPredicate() == false) { 583 // Send this instruction to commit, also make sure iew stage 584 // realizes there is activity. 585 // Mark it as executed unless it is an uncached load that 586 // needs to hit the head of commit. 587 if (inst->readPredicate() == false) 588 inst->forwardOldRegs(); 589 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 590 inst->seqNum, 591 (load_fault != NoFault ? "fault" : "predication")); 592 if (!(inst->hasRequest() && inst->uncacheable()) || 593 inst->isAtCommit()) { 594 inst->setExecuted(); 595 } 596 iewStage->instToCommit(inst); 597 iewStage->activityThisCycle(); 598 } else if (!loadBlocked()) { 599 assert(inst->effAddrValid()); 600 int load_idx = inst->lqIdx; 601 incrLdIdx(load_idx); 602 603 if (checkLoads) 604 return checkViolations(load_idx, inst); 605 } 606 607 return load_fault; 608} 609 610template <class Impl> 611Fault 612LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 613{ 614 using namespace TheISA; 615 // Make sure that a store exists. 616 assert(stores != 0); 617 618 int store_idx = store_inst->sqIdx; 619 620 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 621 store_inst->pcState(), store_inst->seqNum); 622 623 assert(!store_inst->isSquashed()); 624 625 // Check the recently completed loads to see if any match this store's 626 // address. If so, then we have a memory ordering violation. 627 int load_idx = store_inst->lqIdx; 628 629 Fault store_fault = store_inst->initiateAcc(); 630 631 if (store_inst->isTranslationDelayed() && 632 store_fault == NoFault) 633 return store_fault; 634 635 if (store_inst->readPredicate() == false) 636 store_inst->forwardOldRegs(); 637 638 if (storeQueue[store_idx].size == 0) { 639 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 640 store_inst->pcState(), store_inst->seqNum); 641 642 return store_fault; 643 } else if (store_inst->readPredicate() == false) { 644 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 645 store_inst->seqNum); 646 return store_fault; 647 } 648 649 assert(store_fault == NoFault); 650 651 if (store_inst->isStoreConditional()) { 652 // Store conditionals need to set themselves as able to 653 // writeback if we haven't had a fault by here. 654 storeQueue[store_idx].canWB = true; 655 656 ++storesToWB; 657 } 658 659 return checkViolations(load_idx, store_inst); 660 661} 662 663template <class Impl> 664void 665LSQUnit<Impl>::commitLoad() 666{ 667 assert(loadQueue[loadHead]); 668 669 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 670 loadQueue[loadHead]->pcState()); 671 672 loadQueue[loadHead] = NULL; 673 674 incrLdIdx(loadHead); 675 676 --loads; 677} 678 679template <class Impl> 680void 681LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 682{ 683 assert(loads == 0 || loadQueue[loadHead]); 684 685 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 686 commitLoad(); 687 } 688} 689 690template <class Impl> 691void 692LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 693{ 694 assert(stores == 0 || storeQueue[storeHead].inst); 695 696 int store_idx = storeHead; 697 698 while (store_idx != storeTail) { 699 assert(storeQueue[store_idx].inst); 700 // Mark any stores that are now committed and have not yet 701 // been marked as able to write back. 702 if (!storeQueue[store_idx].canWB) { 703 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 704 break; 705 } 706 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 707 "%s [sn:%lli]\n", 708 storeQueue[store_idx].inst->pcState(), 709 storeQueue[store_idx].inst->seqNum); 710 711 storeQueue[store_idx].canWB = true; 712 713 ++storesToWB; 714 } 715 716 incrStIdx(store_idx); 717 } 718} 719 720template <class Impl> 721void 722LSQUnit<Impl>::writebackPendingStore() 723{ 724 if (hasPendingPkt) { 725 assert(pendingPkt != NULL); 726 727 // If the cache is blocked, this will store the packet for retry. 728 if (sendStore(pendingPkt)) { 729 storePostSend(pendingPkt); 730 } 731 pendingPkt = NULL; 732 hasPendingPkt = false; 733 } 734} 735 736template <class Impl> 737void 738LSQUnit<Impl>::writebackStores() 739{ 740 // First writeback the second packet from any split store that didn't 741 // complete last cycle because there weren't enough cache ports available. 742 if (TheISA::HasUnalignedMemAcc) { 743 writebackPendingStore(); 744 } 745 746 while (storesToWB > 0 && 747 storeWBIdx != storeTail && 748 storeQueue[storeWBIdx].inst && 749 storeQueue[storeWBIdx].canWB && 750 ((!needsTSO) || (!storeInFlight)) && 751 usedPorts < cachePorts) { 752 753 if (isStoreBlocked || lsq->cacheBlocked()) { 754 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 755 " is blocked!\n"); 756 break; 757 } 758 759 // Store didn't write any data so no need to write it back to 760 // memory. 761 if (storeQueue[storeWBIdx].size == 0) { 762 completeStore(storeWBIdx); 763 764 incrStIdx(storeWBIdx); 765 766 continue; 767 } 768 769 ++usedPorts; 770 771 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 772 incrStIdx(storeWBIdx); 773 774 continue; 775 } 776 777 assert(storeQueue[storeWBIdx].req); 778 assert(!storeQueue[storeWBIdx].committed); 779 780 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 781 assert(storeQueue[storeWBIdx].sreqLow); 782 assert(storeQueue[storeWBIdx].sreqHigh); 783 } 784 785 DynInstPtr inst = storeQueue[storeWBIdx].inst; 786 787 Request *req = storeQueue[storeWBIdx].req; 788 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 789 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 790 791 storeQueue[storeWBIdx].committed = true; 792 793 assert(!inst->memData); 794 inst->memData = new uint8_t[64]; 795 796 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 797 798 MemCmd command = 799 req->isSwap() ? MemCmd::SwapReq : 800 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 801 PacketPtr data_pkt; 802 PacketPtr snd_data_pkt = NULL; 803 804 LSQSenderState *state = new LSQSenderState; 805 state->isLoad = false; 806 state->idx = storeWBIdx; 807 state->inst = inst; 808 809 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 810 811 // Build a single data packet if the store isn't split. 812 data_pkt = new Packet(req, command); 813 data_pkt->dataStatic(inst->memData); 814 data_pkt->senderState = state; 815 } else { 816 // Create two packets if the store is split in two. 817 data_pkt = new Packet(sreqLow, command); 818 snd_data_pkt = new Packet(sreqHigh, command); 819 820 data_pkt->dataStatic(inst->memData); 821 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 822 823 data_pkt->senderState = state; 824 snd_data_pkt->senderState = state; 825 826 state->isSplit = true; 827 state->outstanding = 2; 828 829 // Can delete the main request now. 830 delete req; 831 req = sreqLow; 832 } 833 834 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 835 "to Addr:%#x, data:%#x [sn:%lli]\n", 836 storeWBIdx, inst->pcState(), 837 req->getPaddr(), (int)*(inst->memData), 838 inst->seqNum); 839 840 // @todo: Remove this SC hack once the memory system handles it. 841 if (inst->isStoreConditional()) { 842 assert(!storeQueue[storeWBIdx].isSplit); 843 // Disable recording the result temporarily. Writing to 844 // misc regs normally updates the result, but this is not 845 // the desired behavior when handling store conditionals. 846 inst->recordResult(false); 847 bool success = TheISA::handleLockedWrite(inst.get(), req); 848 inst->recordResult(true); 849 850 if (!success) { 851 // Instantly complete this store. 852 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 853 "Instantly completing it.\n", 854 inst->seqNum); 855 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 856 cpu->schedule(wb, curTick() + 1); 857 if (cpu->checker) { 858 // Make sure to set the LLSC data for verification 859 // if checker is loaded 860 inst->reqToVerify->setExtraData(0); 861 inst->completeAcc(data_pkt); 862 } 863 completeStore(storeWBIdx); 864 incrStIdx(storeWBIdx); 865 continue; 866 } 867 } else { 868 // Non-store conditionals do not need a writeback. 869 state->noWB = true; 870 } 871 872 bool split = 873 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 874 875 ThreadContext *thread = cpu->tcBase(lsqID); 876 877 if (req->isMmappedIpr()) { 878 assert(!inst->isStoreConditional()); 879 TheISA::handleIprWrite(thread, data_pkt); 880 delete data_pkt; 881 if (split) { 882 assert(snd_data_pkt->req->isMmappedIpr()); 883 TheISA::handleIprWrite(thread, snd_data_pkt); 884 delete snd_data_pkt; 885 delete sreqLow; 886 delete sreqHigh; 887 } 888 delete state; 889 delete req; 890 completeStore(storeWBIdx); 891 incrStIdx(storeWBIdx); 892 } else if (!sendStore(data_pkt)) { 893 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 894 "retry later\n", 895 inst->seqNum); 896 897 // Need to store the second packet, if split. 898 if (split) { 899 state->pktToSend = true; 900 state->pendingPacket = snd_data_pkt; 901 } 902 } else { 903 904 // If split, try to send the second packet too 905 if (split) { 906 assert(snd_data_pkt); 907 908 // Ensure there are enough ports to use. 909 if (usedPorts < cachePorts) { 910 ++usedPorts; 911 if (sendStore(snd_data_pkt)) { 912 storePostSend(snd_data_pkt); 913 } else { 914 DPRINTF(IEW, "D-Cache became blocked when writing" 915 " [sn:%lli] second packet, will retry later\n", 916 inst->seqNum); 917 } 918 } else { 919 920 // Store the packet for when there's free ports. 921 assert(pendingPkt == NULL); 922 pendingPkt = snd_data_pkt; 923 hasPendingPkt = true; 924 } 925 } else { 926 927 // Not a split store. 928 storePostSend(data_pkt); 929 } 930 } 931 } 932 933 // Not sure this should set it to 0. 934 usedPorts = 0; 935 936 assert(stores >= 0 && storesToWB >= 0); 937} 938 939/*template <class Impl> 940void 941LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 942{ 943 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 944 mshrSeqNums.end(), 945 seqNum); 946 947 if (mshr_it != mshrSeqNums.end()) { 948 mshrSeqNums.erase(mshr_it); 949 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 950 } 951}*/ 952 953template <class Impl> 954void 955LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 956{ 957 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 958 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 959 960 int load_idx = loadTail; 961 decrLdIdx(load_idx); 962 963 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 964 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 965 "[sn:%lli]\n", 966 loadQueue[load_idx]->pcState(), 967 loadQueue[load_idx]->seqNum); 968 969 if (isStalled() && load_idx == stallingLoadIdx) { 970 stalled = false; 971 stallingStoreIsn = 0; 972 stallingLoadIdx = 0; 973 } 974 975 // Clear the smart pointer to make sure it is decremented. 976 loadQueue[load_idx]->setSquashed(); 977 loadQueue[load_idx] = NULL; 978 --loads; 979 980 // Inefficient! 981 loadTail = load_idx; 982 983 decrLdIdx(load_idx); 984 ++lsqSquashedLoads; 985 } 986 987 if (isLoadBlocked) { 988 if (squashed_num < blockedLoadSeqNum) { 989 isLoadBlocked = false; 990 loadBlockedHandled = false; 991 blockedLoadSeqNum = 0; 992 } 993 } 994 995 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 996 memDepViolator = NULL; 997 } 998 999 int store_idx = storeTail; 1000 decrStIdx(store_idx); 1001 1002 while (stores != 0 && 1003 storeQueue[store_idx].inst->seqNum > squashed_num) { 1004 // Instructions marked as can WB are already committed. 1005 if (storeQueue[store_idx].canWB) { 1006 break; 1007 } 1008 1009 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1010 "idx:%i [sn:%lli]\n", 1011 storeQueue[store_idx].inst->pcState(), 1012 store_idx, storeQueue[store_idx].inst->seqNum); 1013 1014 // I don't think this can happen. It should have been cleared 1015 // by the stalling load. 1016 if (isStalled() && 1017 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1018 panic("Is stalled should have been cleared by stalling load!\n"); 1019 stalled = false; 1020 stallingStoreIsn = 0; 1021 } 1022 1023 // Clear the smart pointer to make sure it is decremented. 1024 storeQueue[store_idx].inst->setSquashed(); 1025 storeQueue[store_idx].inst = NULL; 1026 storeQueue[store_idx].canWB = 0; 1027 1028 // Must delete request now that it wasn't handed off to 1029 // memory. This is quite ugly. @todo: Figure out the proper 1030 // place to really handle request deletes. 1031 delete storeQueue[store_idx].req; 1032 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1033 delete storeQueue[store_idx].sreqLow; 1034 delete storeQueue[store_idx].sreqHigh; 1035 1036 storeQueue[store_idx].sreqLow = NULL; 1037 storeQueue[store_idx].sreqHigh = NULL; 1038 } 1039 1040 storeQueue[store_idx].req = NULL; 1041 --stores; 1042 1043 // Inefficient! 1044 storeTail = store_idx; 1045 1046 decrStIdx(store_idx); 1047 ++lsqSquashedStores; 1048 } 1049} 1050 1051template <class Impl> 1052void 1053LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1054{ 1055 if (isStalled() && 1056 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1057 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1058 "load idx:%i\n", 1059 stallingStoreIsn, stallingLoadIdx); 1060 stalled = false; 1061 stallingStoreIsn = 0; 1062 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1063 } 1064 1065 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1066 // The store is basically completed at this time. This 1067 // only works so long as the checker doesn't try to 1068 // verify the value in memory for stores. 1069 storeQueue[storeWBIdx].inst->setCompleted(); 1070 1071 if (cpu->checker) { 1072 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1073 } 1074 } 1075 1076 if (needsTSO) { 1077 storeInFlight = true; 1078 } 1079 1080 incrStIdx(storeWBIdx); 1081} 1082 1083template <class Impl> 1084void 1085LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1086{ 1087 iewStage->wakeCPU(); 1088 1089 // Squashed instructions do not need to complete their access. 1090 if (inst->isSquashed()) { 1091 iewStage->decrWb(inst->seqNum); 1092 assert(!inst->isStore()); 1093 ++lsqIgnoredResponses; 1094 return; 1095 } 1096 1097 if (!inst->isExecuted()) { 1098 inst->setExecuted(); 1099 1100 // Complete access to copy data to proper place. 1101 inst->completeAcc(pkt); 1102 } 1103 1104 // Need to insert instruction into queue to commit 1105 iewStage->instToCommit(inst); 1106 1107 iewStage->activityThisCycle(); 1108 1109 // see if this load changed the PC 1110 iewStage->checkMisprediction(inst); 1111} 1112 1113template <class Impl> 1114void 1115LSQUnit<Impl>::completeStore(int store_idx) 1116{ 1117 assert(storeQueue[store_idx].inst); 1118 storeQueue[store_idx].completed = true; 1119 --storesToWB; 1120 // A bit conservative because a store completion may not free up entries, 1121 // but hopefully avoids two store completions in one cycle from making 1122 // the CPU tick twice. 1123 cpu->wakeCPU(); 1124 cpu->activityThisCycle(); 1125 1126 if (store_idx == storeHead) { 1127 do { 1128 incrStIdx(storeHead); 1129 1130 --stores; 1131 } while (storeQueue[storeHead].completed && 1132 storeHead != storeTail); 1133 1134 iewStage->updateLSQNextCycle = true; 1135 } 1136 1137 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1138 "idx:%i\n", 1139 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1140 1141#if TRACING_ON 1142 if (DTRACE(O3PipeView)) { 1143 storeQueue[store_idx].inst->storeTick = 1144 curTick() - storeQueue[store_idx].inst->fetchTick; 1145 } 1146#endif 1147 1148 if (isStalled() && 1149 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1150 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1151 "load idx:%i\n", 1152 stallingStoreIsn, stallingLoadIdx); 1153 stalled = false; 1154 stallingStoreIsn = 0; 1155 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1156 } 1157 1158 storeQueue[store_idx].inst->setCompleted(); 1159 1160 if (needsTSO) { 1161 storeInFlight = false; 1162 } 1163 1164 // Tell the checker we've completed this instruction. Some stores 1165 // may get reported twice to the checker, but the checker can 1166 // handle that case. 1167 if (cpu->checker) { 1168 cpu->checker->verify(storeQueue[store_idx].inst); 1169 } 1170} 1171 1172template <class Impl> 1173bool 1174LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1175{ 1176 if (!dcachePort->sendTimingReq(data_pkt)) { 1177 // Need to handle becoming blocked on a store. 1178 isStoreBlocked = true; 1179 ++lsqCacheBlocked; 1180 assert(retryPkt == NULL); 1181 retryPkt = data_pkt; 1182 lsq->setRetryTid(lsqID); 1183 return false; 1184 } 1185 return true; 1186} 1187 1188template <class Impl> 1189void 1190LSQUnit<Impl>::recvRetry() 1191{ 1192 if (isStoreBlocked) { 1193 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1194 assert(retryPkt != NULL); 1195 1196 LSQSenderState *state = 1197 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1198 1199 if (dcachePort->sendTimingReq(retryPkt)) { 1200 // Don't finish the store unless this is the last packet. 1201 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 1202 state->pendingPacket == retryPkt) { 1203 state->pktToSend = false; 1204 storePostSend(retryPkt); 1205 } 1206 retryPkt = NULL; 1207 isStoreBlocked = false; 1208 lsq->setRetryTid(InvalidThreadID); 1209 1210 // Send any outstanding packet. 1211 if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 1212 assert(state->pendingPacket); 1213 if (sendStore(state->pendingPacket)) { 1214 storePostSend(state->pendingPacket); 1215 } 1216 } 1217 } else { 1218 // Still blocked! 1219 ++lsqCacheBlocked; 1220 lsq->setRetryTid(lsqID); 1221 } 1222 } else if (isLoadBlocked) { 1223 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 1224 "no need to resend packet.\n"); 1225 } else { 1226 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 1227 } 1228} 1229 1230template <class Impl> 1231inline void 1232LSQUnit<Impl>::incrStIdx(int &store_idx) const 1233{ 1234 if (++store_idx >= SQEntries) 1235 store_idx = 0; 1236} 1237 1238template <class Impl> 1239inline void 1240LSQUnit<Impl>::decrStIdx(int &store_idx) const 1241{ 1242 if (--store_idx < 0) 1243 store_idx += SQEntries; 1244} 1245 1246template <class Impl> 1247inline void 1248LSQUnit<Impl>::incrLdIdx(int &load_idx) const 1249{ 1250 if (++load_idx >= LQEntries) 1251 load_idx = 0; 1252} 1253 1254template <class Impl> 1255inline void 1256LSQUnit<Impl>::decrLdIdx(int &load_idx) const 1257{ 1258 if (--load_idx < 0) 1259 load_idx += LQEntries; 1260} 1261 1262template <class Impl> 1263void 1264LSQUnit<Impl>::dumpInsts() const 1265{ 1266 cprintf("Load store queue: Dumping instructions.\n"); 1267 cprintf("Load queue size: %i\n", loads); 1268 cprintf("Load queue: "); 1269 1270 int load_idx = loadHead; 1271 1272 while (load_idx != loadTail && loadQueue[load_idx]) { 1273 const DynInstPtr &inst(loadQueue[load_idx]); 1274 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1275 1276 incrLdIdx(load_idx); 1277 } 1278 cprintf("\n"); 1279 1280 cprintf("Store queue size: %i\n", stores); 1281 cprintf("Store queue: "); 1282 1283 int store_idx = storeHead; 1284 1285 while (store_idx != storeTail && storeQueue[store_idx].inst) { 1286 const DynInstPtr &inst(storeQueue[store_idx].inst); 1287 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1288 1289 incrStIdx(store_idx); 1290 } 1291 1292 cprintf("\n"); 1293} 1294