lsq_unit_impl.hh revision 2927
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include "config/use_checker.hh" 33 34#include "cpu/o3/lsq.hh" 35#include "cpu/o3/lsq_unit.hh" 36#include "base/str.hh" 37#include "mem/packet.hh" 38#include "mem/request.hh" 39 40#if USE_CHECKER 41#include "cpu/checker/cpu.hh" 42#endif 43 44template<class Impl> 45LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 46 LSQUnit *lsq_ptr) 47 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 48{ 49 this->setFlags(Event::AutoDelete); 50} 51 52template<class Impl> 53void 54LSQUnit<Impl>::WritebackEvent::process() 55{ 56 if (!lsqPtr->isSwitchedOut()) { 57 lsqPtr->writeback(inst, pkt); 58 } 59 delete pkt; 60} 61 62template<class Impl> 63const char * 64LSQUnit<Impl>::WritebackEvent::description() 65{ 66 return "Store writeback event"; 67} 68 69template<class Impl> 70void 71LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 72{ 73 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 74 DynInstPtr inst = state->inst; 75 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum); 76 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum); 77 78 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 79 80 if (isSwitchedOut() || inst->isSquashed()) { 81 iewStage->decrWb(inst->seqNum); 82 delete state; 83 delete pkt; 84 return; 85 } else { 86 if (!state->noWB) { 87 writeback(inst, pkt); 88 } 89 90 if (inst->isStore()) { 91 completeStore(state->idx); 92 } 93 } 94 95 delete state; 96 delete pkt; 97} 98 99template <class Impl> 100LSQUnit<Impl>::LSQUnit() 101 : loads(0), stores(0), storesToWB(0), stalled(false), 102 isStoreBlocked(false), isLoadBlocked(false), 103 loadBlockedHandled(false) 104{ 105} 106 107template<class Impl> 108void 109LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries, 110 unsigned maxSQEntries, unsigned id) 111{ 112 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 113 114 switchedOut = false; 115 116 lsq = lsq_ptr; 117 118 lsqID = id; 119 120 // Add 1 for the sentinel entry (they are circular queues). 121 LQEntries = maxLQEntries + 1; 122 SQEntries = maxSQEntries + 1; 123 124 loadQueue.resize(LQEntries); 125 storeQueue.resize(SQEntries); 126 127 loadHead = loadTail = 0; 128 129 storeHead = storeWBIdx = storeTail = 0; 130 131 usedPorts = 0; 132 cachePorts = params->cachePorts; 133 134 memDepViolator = NULL; 135 136 blockedLoadSeqNum = 0; 137} 138 139template<class Impl> 140void 141LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr) 142{ 143 cpu = cpu_ptr; 144 145#if USE_CHECKER 146 if (cpu->checker) { 147 cpu->checker->setDcachePort(dcachePort); 148 } 149#endif 150} 151 152template<class Impl> 153std::string 154LSQUnit<Impl>::name() const 155{ 156 if (Impl::MaxThreads == 1) { 157 return iewStage->name() + ".lsq"; 158 } else { 159 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 160 } 161} 162 163template<class Impl> 164void 165LSQUnit<Impl>::regStats() 166{ 167 lsqForwLoads 168 .name(name() + ".forwLoads") 169 .desc("Number of loads that had data forwarded from stores"); 170 171 invAddrLoads 172 .name(name() + ".invAddrLoads") 173 .desc("Number of loads ignored due to an invalid address"); 174 175 lsqSquashedLoads 176 .name(name() + ".squashedLoads") 177 .desc("Number of loads squashed"); 178 179 lsqIgnoredResponses 180 .name(name() + ".ignoredResponses") 181 .desc("Number of memory responses ignored because the instruction is squashed"); 182 183 lsqSquashedStores 184 .name(name() + ".squashedStores") 185 .desc("Number of stores squashed"); 186 187 invAddrSwpfs 188 .name(name() + ".invAddrSwpfs") 189 .desc("Number of software prefetches ignored due to an invalid address"); 190 191 lsqBlockedLoads 192 .name(name() + ".blockedLoads") 193 .desc("Number of blocked loads due to partial load-store forwarding"); 194 195 lsqRescheduledLoads 196 .name(name() + ".rescheduledLoads") 197 .desc("Number of loads that were rescheduled"); 198 199 lsqCacheBlocked 200 .name(name() + ".cacheBlocked") 201 .desc("Number of times an access to memory failed due to the cache being blocked"); 202} 203 204template<class Impl> 205void 206LSQUnit<Impl>::clearLQ() 207{ 208 loadQueue.clear(); 209} 210 211template<class Impl> 212void 213LSQUnit<Impl>::clearSQ() 214{ 215 storeQueue.clear(); 216} 217 218template<class Impl> 219void 220LSQUnit<Impl>::switchOut() 221{ 222 switchedOut = true; 223 for (int i = 0; i < loadQueue.size(); ++i) 224 loadQueue[i] = NULL; 225 226 assert(storesToWB == 0); 227} 228 229template<class Impl> 230void 231LSQUnit<Impl>::takeOverFrom() 232{ 233 switchedOut = false; 234 loads = stores = storesToWB = 0; 235 236 loadHead = loadTail = 0; 237 238 storeHead = storeWBIdx = storeTail = 0; 239 240 usedPorts = 0; 241 242 memDepViolator = NULL; 243 244 blockedLoadSeqNum = 0; 245 246 stalled = false; 247 isLoadBlocked = false; 248 loadBlockedHandled = false; 249} 250 251template<class Impl> 252void 253LSQUnit<Impl>::resizeLQ(unsigned size) 254{ 255 unsigned size_plus_sentinel = size + 1; 256 assert(size_plus_sentinel >= LQEntries); 257 258 if (size_plus_sentinel > LQEntries) { 259 while (size_plus_sentinel > loadQueue.size()) { 260 DynInstPtr dummy; 261 loadQueue.push_back(dummy); 262 LQEntries++; 263 } 264 } else { 265 LQEntries = size_plus_sentinel; 266 } 267 268} 269 270template<class Impl> 271void 272LSQUnit<Impl>::resizeSQ(unsigned size) 273{ 274 unsigned size_plus_sentinel = size + 1; 275 if (size_plus_sentinel > SQEntries) { 276 while (size_plus_sentinel > storeQueue.size()) { 277 SQEntry dummy; 278 storeQueue.push_back(dummy); 279 SQEntries++; 280 } 281 } else { 282 SQEntries = size_plus_sentinel; 283 } 284} 285 286template <class Impl> 287void 288LSQUnit<Impl>::insert(DynInstPtr &inst) 289{ 290 assert(inst->isMemRef()); 291 292 assert(inst->isLoad() || inst->isStore()); 293 294 if (inst->isLoad()) { 295 insertLoad(inst); 296 } else { 297 insertStore(inst); 298 } 299 300 inst->setInLSQ(); 301} 302 303template <class Impl> 304void 305LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 306{ 307 assert((loadTail + 1) % LQEntries != loadHead); 308 assert(loads < LQEntries); 309 310 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n", 311 load_inst->readPC(), loadTail, load_inst->seqNum); 312 313 load_inst->lqIdx = loadTail; 314 315 if (stores == 0) { 316 load_inst->sqIdx = -1; 317 } else { 318 load_inst->sqIdx = storeTail; 319 } 320 321 loadQueue[loadTail] = load_inst; 322 323 incrLdIdx(loadTail); 324 325 ++loads; 326} 327 328template <class Impl> 329void 330LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 331{ 332 // Make sure it is not full before inserting an instruction. 333 assert((storeTail + 1) % SQEntries != storeHead); 334 assert(stores < SQEntries); 335 336 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n", 337 store_inst->readPC(), storeTail, store_inst->seqNum); 338 339 store_inst->sqIdx = storeTail; 340 store_inst->lqIdx = loadTail; 341 342 storeQueue[storeTail] = SQEntry(store_inst); 343 344 incrStIdx(storeTail); 345 346 ++stores; 347} 348 349template <class Impl> 350typename Impl::DynInstPtr 351LSQUnit<Impl>::getMemDepViolator() 352{ 353 DynInstPtr temp = memDepViolator; 354 355 memDepViolator = NULL; 356 357 return temp; 358} 359 360template <class Impl> 361unsigned 362LSQUnit<Impl>::numFreeEntries() 363{ 364 unsigned free_lq_entries = LQEntries - loads; 365 unsigned free_sq_entries = SQEntries - stores; 366 367 // Both the LQ and SQ entries have an extra dummy entry to differentiate 368 // empty/full conditions. Subtract 1 from the free entries. 369 if (free_lq_entries < free_sq_entries) { 370 return free_lq_entries - 1; 371 } else { 372 return free_sq_entries - 1; 373 } 374} 375 376template <class Impl> 377int 378LSQUnit<Impl>::numLoadsReady() 379{ 380 int load_idx = loadHead; 381 int retval = 0; 382 383 while (load_idx != loadTail) { 384 assert(loadQueue[load_idx]); 385 386 if (loadQueue[load_idx]->readyToIssue()) { 387 ++retval; 388 } 389 } 390 391 return retval; 392} 393 394template <class Impl> 395Fault 396LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 397{ 398 // Execute a specific load. 399 Fault load_fault = NoFault; 400 401 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n", 402 inst->readPC(),inst->seqNum); 403 404 load_fault = inst->initiateAcc(); 405 406 // If the instruction faulted, then we need to send it along to commit 407 // without the instruction completing. 408 if (load_fault != NoFault) { 409 // Send this instruction to commit, also make sure iew stage 410 // realizes there is activity. 411 iewStage->instToCommit(inst); 412 iewStage->activityThisCycle(); 413 } 414 415 return load_fault; 416} 417 418template <class Impl> 419Fault 420LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 421{ 422 using namespace TheISA; 423 // Make sure that a store exists. 424 assert(stores != 0); 425 426 int store_idx = store_inst->sqIdx; 427 428 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n", 429 store_inst->readPC(), store_inst->seqNum); 430 431 // Check the recently completed loads to see if any match this store's 432 // address. If so, then we have a memory ordering violation. 433 int load_idx = store_inst->lqIdx; 434 435 Fault store_fault = store_inst->initiateAcc(); 436 437 if (storeQueue[store_idx].size == 0) { 438 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n", 439 store_inst->readPC(),store_inst->seqNum); 440 441 return store_fault; 442 } 443 444 assert(store_fault == NoFault); 445 446 if (store_inst->isStoreConditional()) { 447 // Store conditionals need to set themselves as able to 448 // writeback if we haven't had a fault by here. 449 storeQueue[store_idx].canWB = true; 450 451 ++storesToWB; 452 } 453 454 if (!memDepViolator) { 455 while (load_idx != loadTail) { 456 // Really only need to check loads that have actually executed 457 // It's safe to check all loads because effAddr is set to 458 // InvalAddr when the dyn inst is created. 459 460 // @todo: For now this is extra conservative, detecting a 461 // violation if the addresses match assuming all accesses 462 // are quad word accesses. 463 464 // @todo: Fix this, magic number being used here 465 if ((loadQueue[load_idx]->effAddr >> 8) == 466 (store_inst->effAddr >> 8)) { 467 // A load incorrectly passed this store. Squash and refetch. 468 // For now return a fault to show that it was unsuccessful. 469 memDepViolator = loadQueue[load_idx]; 470 471 return genMachineCheckFault(); 472 } 473 474 incrLdIdx(load_idx); 475 } 476 477 // If we've reached this point, there was no violation. 478 memDepViolator = NULL; 479 } 480 481 return store_fault; 482} 483 484template <class Impl> 485void 486LSQUnit<Impl>::commitLoad() 487{ 488 assert(loadQueue[loadHead]); 489 490 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n", 491 loadQueue[loadHead]->readPC()); 492 493 loadQueue[loadHead] = NULL; 494 495 incrLdIdx(loadHead); 496 497 --loads; 498} 499 500template <class Impl> 501void 502LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 503{ 504 assert(loads == 0 || loadQueue[loadHead]); 505 506 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 507 commitLoad(); 508 } 509} 510 511template <class Impl> 512void 513LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 514{ 515 assert(stores == 0 || storeQueue[storeHead].inst); 516 517 int store_idx = storeHead; 518 519 while (store_idx != storeTail) { 520 assert(storeQueue[store_idx].inst); 521 // Mark any stores that are now committed and have not yet 522 // been marked as able to write back. 523 if (!storeQueue[store_idx].canWB) { 524 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 525 break; 526 } 527 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 528 "%#x [sn:%lli]\n", 529 storeQueue[store_idx].inst->readPC(), 530 storeQueue[store_idx].inst->seqNum); 531 532 storeQueue[store_idx].canWB = true; 533 534 ++storesToWB; 535 } 536 537 incrStIdx(store_idx); 538 } 539} 540 541template <class Impl> 542void 543LSQUnit<Impl>::writebackStores() 544{ 545 while (storesToWB > 0 && 546 storeWBIdx != storeTail && 547 storeQueue[storeWBIdx].inst && 548 storeQueue[storeWBIdx].canWB && 549 usedPorts < cachePorts) { 550 551 if (isStoreBlocked || lsq->cacheBlocked()) { 552 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 553 " is blocked!\n"); 554 break; 555 } 556 557 // Store didn't write any data so no need to write it back to 558 // memory. 559 if (storeQueue[storeWBIdx].size == 0) { 560 completeStore(storeWBIdx); 561 562 incrStIdx(storeWBIdx); 563 564 continue; 565 } 566 567 ++usedPorts; 568 569 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 570 incrStIdx(storeWBIdx); 571 572 continue; 573 } 574 575 assert(storeQueue[storeWBIdx].req); 576 assert(!storeQueue[storeWBIdx].committed); 577 578 DynInstPtr inst = storeQueue[storeWBIdx].inst; 579 580 Request *req = storeQueue[storeWBIdx].req; 581 storeQueue[storeWBIdx].committed = true; 582 583 assert(!inst->memData); 584 inst->memData = new uint8_t[64]; 585 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data, 586 req->getSize()); 587 588 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast); 589 data_pkt->dataStatic(inst->memData); 590 591 LSQSenderState *state = new LSQSenderState; 592 state->isLoad = false; 593 state->idx = storeWBIdx; 594 state->inst = inst; 595 data_pkt->senderState = state; 596 597 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x " 598 "to Addr:%#x, data:%#x [sn:%lli]\n", 599 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(), 600 req->getPaddr(), *(inst->memData), 601 storeQueue[storeWBIdx].inst->seqNum); 602 603 // @todo: Remove this SC hack once the memory system handles it. 604 if (req->getFlags() & LOCKED) { 605 if (req->getFlags() & UNCACHEABLE) { 606 req->setScResult(2); 607 } else { 608 if (cpu->lockFlag) { 609 req->setScResult(1); 610 } else { 611 req->setScResult(0); 612 // Hack: Instantly complete this store. 613 completeDataAccess(data_pkt); 614 incrStIdx(storeWBIdx); 615 continue; 616 } 617 } 618 } else { 619 // Non-store conditionals do not need a writeback. 620 state->noWB = true; 621 } 622 623 if (!dcachePort->sendTiming(data_pkt)) { 624 // Need to handle becoming blocked on a store. 625 isStoreBlocked = true; 626 ++lsqCacheBlocked; 627 assert(retryPkt == NULL); 628 retryPkt = data_pkt; 629 } else { 630 storePostSend(data_pkt); 631 } 632 } 633 634 // Not sure this should set it to 0. 635 usedPorts = 0; 636 637 assert(stores >= 0 && storesToWB >= 0); 638} 639 640/*template <class Impl> 641void 642LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 643{ 644 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 645 mshrSeqNums.end(), 646 seqNum); 647 648 if (mshr_it != mshrSeqNums.end()) { 649 mshrSeqNums.erase(mshr_it); 650 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 651 } 652}*/ 653 654template <class Impl> 655void 656LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 657{ 658 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 659 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 660 661 int load_idx = loadTail; 662 decrLdIdx(load_idx); 663 664 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 665 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, " 666 "[sn:%lli]\n", 667 loadQueue[load_idx]->readPC(), 668 loadQueue[load_idx]->seqNum); 669 670 if (isStalled() && load_idx == stallingLoadIdx) { 671 stalled = false; 672 stallingStoreIsn = 0; 673 stallingLoadIdx = 0; 674 } 675 676 // Clear the smart pointer to make sure it is decremented. 677 loadQueue[load_idx]->setSquashed(); 678 loadQueue[load_idx] = NULL; 679 --loads; 680 681 // Inefficient! 682 loadTail = load_idx; 683 684 decrLdIdx(load_idx); 685 ++lsqSquashedLoads; 686 } 687 688 if (isLoadBlocked) { 689 if (squashed_num < blockedLoadSeqNum) { 690 isLoadBlocked = false; 691 loadBlockedHandled = false; 692 blockedLoadSeqNum = 0; 693 } 694 } 695 696 int store_idx = storeTail; 697 decrStIdx(store_idx); 698 699 while (stores != 0 && 700 storeQueue[store_idx].inst->seqNum > squashed_num) { 701 // Instructions marked as can WB are already committed. 702 if (storeQueue[store_idx].canWB) { 703 break; 704 } 705 706 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, " 707 "idx:%i [sn:%lli]\n", 708 storeQueue[store_idx].inst->readPC(), 709 store_idx, storeQueue[store_idx].inst->seqNum); 710 711 // I don't think this can happen. It should have been cleared 712 // by the stalling load. 713 if (isStalled() && 714 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 715 panic("Is stalled should have been cleared by stalling load!\n"); 716 stalled = false; 717 stallingStoreIsn = 0; 718 } 719 720 // Clear the smart pointer to make sure it is decremented. 721 storeQueue[store_idx].inst->setSquashed(); 722 storeQueue[store_idx].inst = NULL; 723 storeQueue[store_idx].canWB = 0; 724 725 storeQueue[store_idx].req = NULL; 726 --stores; 727 728 // Inefficient! 729 storeTail = store_idx; 730 731 decrStIdx(store_idx); 732 ++lsqSquashedStores; 733 } 734} 735 736template <class Impl> 737void 738LSQUnit<Impl>::storePostSend(Packet *pkt) 739{ 740 if (isStalled() && 741 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 742 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 743 "load idx:%i\n", 744 stallingStoreIsn, stallingLoadIdx); 745 stalled = false; 746 stallingStoreIsn = 0; 747 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 748 } 749 750 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 751 // The store is basically completed at this time. This 752 // only works so long as the checker doesn't try to 753 // verify the value in memory for stores. 754 storeQueue[storeWBIdx].inst->setCompleted(); 755#if USE_CHECKER 756 if (cpu->checker) { 757 cpu->checker->verify(storeQueue[storeWBIdx].inst); 758 } 759#endif 760 } 761 762 if (pkt->result != Packet::Success) { 763 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n", 764 storeWBIdx); 765 766 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n", 767 storeQueue[storeWBIdx].inst->seqNum); 768 769 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum); 770 771 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size()); 772 773 // @todo: Increment stat here. 774 } else { 775 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n", 776 storeWBIdx); 777 778 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n", 779 storeQueue[storeWBIdx].inst->seqNum); 780 } 781 782 incrStIdx(storeWBIdx); 783} 784 785template <class Impl> 786void 787LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 788{ 789 iewStage->wakeCPU(); 790 791 // Squashed instructions do not need to complete their access. 792 if (inst->isSquashed()) { 793 iewStage->decrWb(inst->seqNum); 794 assert(!inst->isStore()); 795 ++lsqIgnoredResponses; 796 return; 797 } 798 799 if (!inst->isExecuted()) { 800 inst->setExecuted(); 801 802 // Complete access to copy data to proper place. 803 inst->completeAcc(pkt); 804 } 805 806 // Need to insert instruction into queue to commit 807 iewStage->instToCommit(inst); 808 809 iewStage->activityThisCycle(); 810} 811 812template <class Impl> 813void 814LSQUnit<Impl>::completeStore(int store_idx) 815{ 816 assert(storeQueue[store_idx].inst); 817 storeQueue[store_idx].completed = true; 818 --storesToWB; 819 // A bit conservative because a store completion may not free up entries, 820 // but hopefully avoids two store completions in one cycle from making 821 // the CPU tick twice. 822 cpu->activityThisCycle(); 823 824 if (store_idx == storeHead) { 825 do { 826 incrStIdx(storeHead); 827 828 --stores; 829 } while (storeQueue[storeHead].completed && 830 storeHead != storeTail); 831 832 iewStage->updateLSQNextCycle = true; 833 } 834 835 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 836 "idx:%i\n", 837 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 838 839 if (isStalled() && 840 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 841 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 842 "load idx:%i\n", 843 stallingStoreIsn, stallingLoadIdx); 844 stalled = false; 845 stallingStoreIsn = 0; 846 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 847 } 848 849 storeQueue[store_idx].inst->setCompleted(); 850 851 // Tell the checker we've completed this instruction. Some stores 852 // may get reported twice to the checker, but the checker can 853 // handle that case. 854#if USE_CHECKER 855 if (cpu->checker) { 856 cpu->checker->verify(storeQueue[store_idx].inst); 857 } 858#endif 859} 860 861template <class Impl> 862void 863LSQUnit<Impl>::recvRetry() 864{ 865 if (isStoreBlocked) { 866 assert(retryPkt != NULL); 867 868 if (dcachePort->sendTiming(retryPkt)) { 869 storePostSend(retryPkt); 870 retryPkt = NULL; 871 isStoreBlocked = false; 872 } else { 873 // Still blocked! 874 ++lsqCacheBlocked; 875 lsq->setRetryTid(lsqID); 876 } 877 } else if (isLoadBlocked) { 878 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 879 "no need to resend packet.\n"); 880 } else { 881 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 882 } 883} 884 885template <class Impl> 886inline void 887LSQUnit<Impl>::incrStIdx(int &store_idx) 888{ 889 if (++store_idx >= SQEntries) 890 store_idx = 0; 891} 892 893template <class Impl> 894inline void 895LSQUnit<Impl>::decrStIdx(int &store_idx) 896{ 897 if (--store_idx < 0) 898 store_idx += SQEntries; 899} 900 901template <class Impl> 902inline void 903LSQUnit<Impl>::incrLdIdx(int &load_idx) 904{ 905 if (++load_idx >= LQEntries) 906 load_idx = 0; 907} 908 909template <class Impl> 910inline void 911LSQUnit<Impl>::decrLdIdx(int &load_idx) 912{ 913 if (--load_idx < 0) 914 load_idx += LQEntries; 915} 916 917template <class Impl> 918void 919LSQUnit<Impl>::dumpInsts() 920{ 921 cprintf("Load store queue: Dumping instructions.\n"); 922 cprintf("Load queue size: %i\n", loads); 923 cprintf("Load queue: "); 924 925 int load_idx = loadHead; 926 927 while (load_idx != loadTail && loadQueue[load_idx]) { 928 cprintf("%#x ", loadQueue[load_idx]->readPC()); 929 930 incrLdIdx(load_idx); 931 } 932 933 cprintf("Store queue size: %i\n", stores); 934 cprintf("Store queue: "); 935 936 int store_idx = storeHead; 937 938 while (store_idx != storeTail && storeQueue[store_idx].inst) { 939 cprintf("%#x ", storeQueue[store_idx].inst->readPC()); 940 941 incrStIdx(store_idx); 942 } 943 944 cprintf("\n"); 945} 946