lsq_unit_impl.hh revision 3126
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include "config/use_checker.hh" 33 34#include "cpu/o3/lsq.hh" 35#include "cpu/o3/lsq_unit.hh" 36#include "base/str.hh" 37#include "mem/packet.hh" 38#include "mem/request.hh" 39 40#if USE_CHECKER 41#include "cpu/checker/cpu.hh" 42#endif 43 44template<class Impl> 45LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 46 LSQUnit *lsq_ptr) 47 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 48{ 49 this->setFlags(Event::AutoDelete); 50} 51 52template<class Impl> 53void 54LSQUnit<Impl>::WritebackEvent::process() 55{ 56 if (!lsqPtr->isSwitchedOut()) { 57 lsqPtr->writeback(inst, pkt); 58 } 59 delete pkt; 60} 61 62template<class Impl> 63const char * 64LSQUnit<Impl>::WritebackEvent::description() 65{ 66 return "Store writeback event"; 67} 68 69template<class Impl> 70void 71LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 72{ 73 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 74 DynInstPtr inst = state->inst; 75 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum); 76 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum); 77 78 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 79 80 if (isSwitchedOut() || inst->isSquashed()) { 81 iewStage->decrWb(inst->seqNum); 82 delete state; 83 delete pkt; 84 return; 85 } else { 86 if (!state->noWB) { 87 writeback(inst, pkt); 88 } 89 90 if (inst->isStore()) { 91 completeStore(state->idx); 92 } 93 } 94 95 delete state; 96 delete pkt; 97} 98 99template <class Impl> 100LSQUnit<Impl>::LSQUnit() 101 : loads(0), stores(0), storesToWB(0), stalled(false), 102 isStoreBlocked(false), isLoadBlocked(false), 103 loadBlockedHandled(false) 104{ 105} 106 107template<class Impl> 108void 109LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries, 110 unsigned maxSQEntries, unsigned id) 111{ 112 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 113 114 switchedOut = false; 115 116 lsq = lsq_ptr; 117 118 lsqID = id; 119 120 // Add 1 for the sentinel entry (they are circular queues). 121 LQEntries = maxLQEntries + 1; 122 SQEntries = maxSQEntries + 1; 123 124 loadQueue.resize(LQEntries); 125 storeQueue.resize(SQEntries); 126 127 loadHead = loadTail = 0; 128 129 storeHead = storeWBIdx = storeTail = 0; 130 131 usedPorts = 0; 132 cachePorts = params->cachePorts; 133 134 memDepViolator = NULL; 135 136 blockedLoadSeqNum = 0; 137} 138 139template<class Impl> 140void 141LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr) 142{ 143 cpu = cpu_ptr; 144 145#if USE_CHECKER 146 if (cpu->checker) { 147 cpu->checker->setDcachePort(dcachePort); 148 } 149#endif 150} 151 152template<class Impl> 153std::string 154LSQUnit<Impl>::name() const 155{ 156 if (Impl::MaxThreads == 1) { 157 return iewStage->name() + ".lsq"; 158 } else { 159 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 160 } 161} 162 163template<class Impl> 164void 165LSQUnit<Impl>::regStats() 166{ 167 lsqForwLoads 168 .name(name() + ".forwLoads") 169 .desc("Number of loads that had data forwarded from stores"); 170 171 invAddrLoads 172 .name(name() + ".invAddrLoads") 173 .desc("Number of loads ignored due to an invalid address"); 174 175 lsqSquashedLoads 176 .name(name() + ".squashedLoads") 177 .desc("Number of loads squashed"); 178 179 lsqIgnoredResponses 180 .name(name() + ".ignoredResponses") 181 .desc("Number of memory responses ignored because the instruction is squashed"); 182 183 lsqMemOrderViolation 184 .name(name() + ".memOrderViolation") 185 .desc("Number of memory ordering violations"); 186 187 lsqSquashedStores 188 .name(name() + ".squashedStores") 189 .desc("Number of stores squashed"); 190 191 invAddrSwpfs 192 .name(name() + ".invAddrSwpfs") 193 .desc("Number of software prefetches ignored due to an invalid address"); 194 195 lsqBlockedLoads 196 .name(name() + ".blockedLoads") 197 .desc("Number of blocked loads due to partial load-store forwarding"); 198 199 lsqRescheduledLoads 200 .name(name() + ".rescheduledLoads") 201 .desc("Number of loads that were rescheduled"); 202 203 lsqCacheBlocked 204 .name(name() + ".cacheBlocked") 205 .desc("Number of times an access to memory failed due to the cache being blocked"); 206} 207 208template<class Impl> 209void 210LSQUnit<Impl>::clearLQ() 211{ 212 loadQueue.clear(); 213} 214 215template<class Impl> 216void 217LSQUnit<Impl>::clearSQ() 218{ 219 storeQueue.clear(); 220} 221 222template<class Impl> 223void 224LSQUnit<Impl>::switchOut() 225{ 226 switchedOut = true; 227 for (int i = 0; i < loadQueue.size(); ++i) { 228 assert(!loadQueue[i]); 229 loadQueue[i] = NULL; 230 } 231 232 assert(storesToWB == 0); 233} 234 235template<class Impl> 236void 237LSQUnit<Impl>::takeOverFrom() 238{ 239 switchedOut = false; 240 loads = stores = storesToWB = 0; 241 242 loadHead = loadTail = 0; 243 244 storeHead = storeWBIdx = storeTail = 0; 245 246 usedPorts = 0; 247 248 memDepViolator = NULL; 249 250 blockedLoadSeqNum = 0; 251 252 stalled = false; 253 isLoadBlocked = false; 254 loadBlockedHandled = false; 255} 256 257template<class Impl> 258void 259LSQUnit<Impl>::resizeLQ(unsigned size) 260{ 261 unsigned size_plus_sentinel = size + 1; 262 assert(size_plus_sentinel >= LQEntries); 263 264 if (size_plus_sentinel > LQEntries) { 265 while (size_plus_sentinel > loadQueue.size()) { 266 DynInstPtr dummy; 267 loadQueue.push_back(dummy); 268 LQEntries++; 269 } 270 } else { 271 LQEntries = size_plus_sentinel; 272 } 273 274} 275 276template<class Impl> 277void 278LSQUnit<Impl>::resizeSQ(unsigned size) 279{ 280 unsigned size_plus_sentinel = size + 1; 281 if (size_plus_sentinel > SQEntries) { 282 while (size_plus_sentinel > storeQueue.size()) { 283 SQEntry dummy; 284 storeQueue.push_back(dummy); 285 SQEntries++; 286 } 287 } else { 288 SQEntries = size_plus_sentinel; 289 } 290} 291 292template <class Impl> 293void 294LSQUnit<Impl>::insert(DynInstPtr &inst) 295{ 296 assert(inst->isMemRef()); 297 298 assert(inst->isLoad() || inst->isStore()); 299 300 if (inst->isLoad()) { 301 insertLoad(inst); 302 } else { 303 insertStore(inst); 304 } 305 306 inst->setInLSQ(); 307} 308 309template <class Impl> 310void 311LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 312{ 313 assert((loadTail + 1) % LQEntries != loadHead); 314 assert(loads < LQEntries); 315 316 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n", 317 load_inst->readPC(), loadTail, load_inst->seqNum); 318 319 load_inst->lqIdx = loadTail; 320 321 if (stores == 0) { 322 load_inst->sqIdx = -1; 323 } else { 324 load_inst->sqIdx = storeTail; 325 } 326 327 loadQueue[loadTail] = load_inst; 328 329 incrLdIdx(loadTail); 330 331 ++loads; 332} 333 334template <class Impl> 335void 336LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 337{ 338 // Make sure it is not full before inserting an instruction. 339 assert((storeTail + 1) % SQEntries != storeHead); 340 assert(stores < SQEntries); 341 342 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n", 343 store_inst->readPC(), storeTail, store_inst->seqNum); 344 345 store_inst->sqIdx = storeTail; 346 store_inst->lqIdx = loadTail; 347 348 storeQueue[storeTail] = SQEntry(store_inst); 349 350 incrStIdx(storeTail); 351 352 ++stores; 353} 354 355template <class Impl> 356typename Impl::DynInstPtr 357LSQUnit<Impl>::getMemDepViolator() 358{ 359 DynInstPtr temp = memDepViolator; 360 361 memDepViolator = NULL; 362 363 return temp; 364} 365 366template <class Impl> 367unsigned 368LSQUnit<Impl>::numFreeEntries() 369{ 370 unsigned free_lq_entries = LQEntries - loads; 371 unsigned free_sq_entries = SQEntries - stores; 372 373 // Both the LQ and SQ entries have an extra dummy entry to differentiate 374 // empty/full conditions. Subtract 1 from the free entries. 375 if (free_lq_entries < free_sq_entries) { 376 return free_lq_entries - 1; 377 } else { 378 return free_sq_entries - 1; 379 } 380} 381 382template <class Impl> 383int 384LSQUnit<Impl>::numLoadsReady() 385{ 386 int load_idx = loadHead; 387 int retval = 0; 388 389 while (load_idx != loadTail) { 390 assert(loadQueue[load_idx]); 391 392 if (loadQueue[load_idx]->readyToIssue()) { 393 ++retval; 394 } 395 } 396 397 return retval; 398} 399 400template <class Impl> 401Fault 402LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 403{ 404 // Execute a specific load. 405 Fault load_fault = NoFault; 406 407 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n", 408 inst->readPC(),inst->seqNum); 409 410 load_fault = inst->initiateAcc(); 411 412 // If the instruction faulted, then we need to send it along to commit 413 // without the instruction completing. 414 if (load_fault != NoFault) { 415 // Send this instruction to commit, also make sure iew stage 416 // realizes there is activity. 417 // Mark it as executed unless it is an uncached load that 418 // needs to hit the head of commit. 419 if (!(inst->req->getFlags() & UNCACHEABLE) || inst->isAtCommit()) { 420 inst->setExecuted(); 421 } 422 iewStage->instToCommit(inst); 423 iewStage->activityThisCycle(); 424 } 425 426 return load_fault; 427} 428 429template <class Impl> 430Fault 431LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 432{ 433 using namespace TheISA; 434 // Make sure that a store exists. 435 assert(stores != 0); 436 437 int store_idx = store_inst->sqIdx; 438 439 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n", 440 store_inst->readPC(), store_inst->seqNum); 441 442 // Check the recently completed loads to see if any match this store's 443 // address. If so, then we have a memory ordering violation. 444 int load_idx = store_inst->lqIdx; 445 446 Fault store_fault = store_inst->initiateAcc(); 447 448 if (storeQueue[store_idx].size == 0) { 449 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n", 450 store_inst->readPC(),store_inst->seqNum); 451 452 return store_fault; 453 } 454 455 assert(store_fault == NoFault); 456 457 if (store_inst->isStoreConditional()) { 458 // Store conditionals need to set themselves as able to 459 // writeback if we haven't had a fault by here. 460 storeQueue[store_idx].canWB = true; 461 462 ++storesToWB; 463 } 464 465 if (!memDepViolator) { 466 while (load_idx != loadTail) { 467 // Really only need to check loads that have actually executed 468 // It's safe to check all loads because effAddr is set to 469 // InvalAddr when the dyn inst is created. 470 471 // @todo: For now this is extra conservative, detecting a 472 // violation if the addresses match assuming all accesses 473 // are quad word accesses. 474 475 // @todo: Fix this, magic number being used here 476 if ((loadQueue[load_idx]->effAddr >> 8) == 477 (store_inst->effAddr >> 8)) { 478 // A load incorrectly passed this store. Squash and refetch. 479 // For now return a fault to show that it was unsuccessful. 480 memDepViolator = loadQueue[load_idx]; 481 ++lsqMemOrderViolation; 482 483 return genMachineCheckFault(); 484 } 485 486 incrLdIdx(load_idx); 487 } 488 489 // If we've reached this point, there was no violation. 490 memDepViolator = NULL; 491 } 492 493 return store_fault; 494} 495 496template <class Impl> 497void 498LSQUnit<Impl>::commitLoad() 499{ 500 assert(loadQueue[loadHead]); 501 502 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n", 503 loadQueue[loadHead]->readPC()); 504 505 loadQueue[loadHead] = NULL; 506 507 incrLdIdx(loadHead); 508 509 --loads; 510} 511 512template <class Impl> 513void 514LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 515{ 516 assert(loads == 0 || loadQueue[loadHead]); 517 518 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 519 commitLoad(); 520 } 521} 522 523template <class Impl> 524void 525LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 526{ 527 assert(stores == 0 || storeQueue[storeHead].inst); 528 529 int store_idx = storeHead; 530 531 while (store_idx != storeTail) { 532 assert(storeQueue[store_idx].inst); 533 // Mark any stores that are now committed and have not yet 534 // been marked as able to write back. 535 if (!storeQueue[store_idx].canWB) { 536 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 537 break; 538 } 539 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 540 "%#x [sn:%lli]\n", 541 storeQueue[store_idx].inst->readPC(), 542 storeQueue[store_idx].inst->seqNum); 543 544 storeQueue[store_idx].canWB = true; 545 546 ++storesToWB; 547 } 548 549 incrStIdx(store_idx); 550 } 551} 552 553template <class Impl> 554void 555LSQUnit<Impl>::writebackStores() 556{ 557 while (storesToWB > 0 && 558 storeWBIdx != storeTail && 559 storeQueue[storeWBIdx].inst && 560 storeQueue[storeWBIdx].canWB && 561 usedPorts < cachePorts) { 562 563 if (isStoreBlocked || lsq->cacheBlocked()) { 564 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 565 " is blocked!\n"); 566 break; 567 } 568 569 // Store didn't write any data so no need to write it back to 570 // memory. 571 if (storeQueue[storeWBIdx].size == 0) { 572 completeStore(storeWBIdx); 573 574 incrStIdx(storeWBIdx); 575 576 continue; 577 } 578 579 ++usedPorts; 580 581 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 582 incrStIdx(storeWBIdx); 583 584 continue; 585 } 586 587 assert(storeQueue[storeWBIdx].req); 588 assert(!storeQueue[storeWBIdx].committed); 589 590 DynInstPtr inst = storeQueue[storeWBIdx].inst; 591 592 Request *req = storeQueue[storeWBIdx].req; 593 storeQueue[storeWBIdx].committed = true; 594 595 assert(!inst->memData); 596 inst->memData = new uint8_t[64]; 597 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data, 598 req->getSize()); 599 600 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast); 601 data_pkt->dataStatic(inst->memData); 602 603 LSQSenderState *state = new LSQSenderState; 604 state->isLoad = false; 605 state->idx = storeWBIdx; 606 state->inst = inst; 607 data_pkt->senderState = state; 608 609 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x " 610 "to Addr:%#x, data:%#x [sn:%lli]\n", 611 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(), 612 req->getPaddr(), *(inst->memData), 613 storeQueue[storeWBIdx].inst->seqNum); 614 615 // @todo: Remove this SC hack once the memory system handles it. 616 if (req->getFlags() & LOCKED) { 617 if (req->getFlags() & UNCACHEABLE) { 618 req->setScResult(2); 619 } else { 620 if (cpu->lockFlag) { 621 req->setScResult(1); 622 } else { 623 req->setScResult(0); 624 // Hack: Instantly complete this store. 625 completeDataAccess(data_pkt); 626 incrStIdx(storeWBIdx); 627 continue; 628 } 629 } 630 } else { 631 // Non-store conditionals do not need a writeback. 632 state->noWB = true; 633 } 634 635 if (!dcachePort->sendTiming(data_pkt)) { 636 // Need to handle becoming blocked on a store. 637 isStoreBlocked = true; 638 ++lsqCacheBlocked; 639 assert(retryPkt == NULL); 640 retryPkt = data_pkt; 641 lsq->setRetryTid(lsqID); 642 } else { 643 storePostSend(data_pkt); 644 } 645 } 646 647 // Not sure this should set it to 0. 648 usedPorts = 0; 649 650 assert(stores >= 0 && storesToWB >= 0); 651} 652 653/*template <class Impl> 654void 655LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 656{ 657 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 658 mshrSeqNums.end(), 659 seqNum); 660 661 if (mshr_it != mshrSeqNums.end()) { 662 mshrSeqNums.erase(mshr_it); 663 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 664 } 665}*/ 666 667template <class Impl> 668void 669LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 670{ 671 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 672 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 673 674 int load_idx = loadTail; 675 decrLdIdx(load_idx); 676 677 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 678 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, " 679 "[sn:%lli]\n", 680 loadQueue[load_idx]->readPC(), 681 loadQueue[load_idx]->seqNum); 682 683 if (isStalled() && load_idx == stallingLoadIdx) { 684 stalled = false; 685 stallingStoreIsn = 0; 686 stallingLoadIdx = 0; 687 } 688 689 // Clear the smart pointer to make sure it is decremented. 690 loadQueue[load_idx]->setSquashed(); 691 loadQueue[load_idx] = NULL; 692 --loads; 693 694 // Inefficient! 695 loadTail = load_idx; 696 697 decrLdIdx(load_idx); 698 ++lsqSquashedLoads; 699 } 700 701 if (isLoadBlocked) { 702 if (squashed_num < blockedLoadSeqNum) { 703 isLoadBlocked = false; 704 loadBlockedHandled = false; 705 blockedLoadSeqNum = 0; 706 } 707 } 708 709 int store_idx = storeTail; 710 decrStIdx(store_idx); 711 712 while (stores != 0 && 713 storeQueue[store_idx].inst->seqNum > squashed_num) { 714 // Instructions marked as can WB are already committed. 715 if (storeQueue[store_idx].canWB) { 716 break; 717 } 718 719 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, " 720 "idx:%i [sn:%lli]\n", 721 storeQueue[store_idx].inst->readPC(), 722 store_idx, storeQueue[store_idx].inst->seqNum); 723 724 // I don't think this can happen. It should have been cleared 725 // by the stalling load. 726 if (isStalled() && 727 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 728 panic("Is stalled should have been cleared by stalling load!\n"); 729 stalled = false; 730 stallingStoreIsn = 0; 731 } 732 733 // Clear the smart pointer to make sure it is decremented. 734 storeQueue[store_idx].inst->setSquashed(); 735 storeQueue[store_idx].inst = NULL; 736 storeQueue[store_idx].canWB = 0; 737 738 storeQueue[store_idx].req = NULL; 739 --stores; 740 741 // Inefficient! 742 storeTail = store_idx; 743 744 decrStIdx(store_idx); 745 ++lsqSquashedStores; 746 } 747} 748 749template <class Impl> 750void 751LSQUnit<Impl>::storePostSend(Packet *pkt) 752{ 753 if (isStalled() && 754 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 755 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 756 "load idx:%i\n", 757 stallingStoreIsn, stallingLoadIdx); 758 stalled = false; 759 stallingStoreIsn = 0; 760 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 761 } 762 763 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 764 // The store is basically completed at this time. This 765 // only works so long as the checker doesn't try to 766 // verify the value in memory for stores. 767 storeQueue[storeWBIdx].inst->setCompleted(); 768#if USE_CHECKER 769 if (cpu->checker) { 770 cpu->checker->verify(storeQueue[storeWBIdx].inst); 771 } 772#endif 773 } 774 775 if (pkt->result != Packet::Success) { 776 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n", 777 storeWBIdx); 778 779 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n", 780 storeQueue[storeWBIdx].inst->seqNum); 781 782 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum); 783 784 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size()); 785 786 // @todo: Increment stat here. 787 } else { 788 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n", 789 storeWBIdx); 790 791 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n", 792 storeQueue[storeWBIdx].inst->seqNum); 793 } 794 795 incrStIdx(storeWBIdx); 796} 797 798template <class Impl> 799void 800LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 801{ 802 iewStage->wakeCPU(); 803 804 // Squashed instructions do not need to complete their access. 805 if (inst->isSquashed()) { 806 iewStage->decrWb(inst->seqNum); 807 assert(!inst->isStore()); 808 ++lsqIgnoredResponses; 809 return; 810 } 811 812 if (!inst->isExecuted()) { 813 inst->setExecuted(); 814 815 // Complete access to copy data to proper place. 816 inst->completeAcc(pkt); 817 } 818 819 // Need to insert instruction into queue to commit 820 iewStage->instToCommit(inst); 821 822 iewStage->activityThisCycle(); 823} 824 825template <class Impl> 826void 827LSQUnit<Impl>::completeStore(int store_idx) 828{ 829 assert(storeQueue[store_idx].inst); 830 storeQueue[store_idx].completed = true; 831 --storesToWB; 832 // A bit conservative because a store completion may not free up entries, 833 // but hopefully avoids two store completions in one cycle from making 834 // the CPU tick twice. 835 cpu->wakeCPU(); 836 cpu->activityThisCycle(); 837 838 if (store_idx == storeHead) { 839 do { 840 incrStIdx(storeHead); 841 842 --stores; 843 } while (storeQueue[storeHead].completed && 844 storeHead != storeTail); 845 846 iewStage->updateLSQNextCycle = true; 847 } 848 849 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 850 "idx:%i\n", 851 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 852 853 if (isStalled() && 854 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 855 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 856 "load idx:%i\n", 857 stallingStoreIsn, stallingLoadIdx); 858 stalled = false; 859 stallingStoreIsn = 0; 860 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 861 } 862 863 storeQueue[store_idx].inst->setCompleted(); 864 865 // Tell the checker we've completed this instruction. Some stores 866 // may get reported twice to the checker, but the checker can 867 // handle that case. 868#if USE_CHECKER 869 if (cpu->checker) { 870 cpu->checker->verify(storeQueue[store_idx].inst); 871 } 872#endif 873} 874 875template <class Impl> 876void 877LSQUnit<Impl>::recvRetry() 878{ 879 if (isStoreBlocked) { 880 assert(retryPkt != NULL); 881 882 if (dcachePort->sendTiming(retryPkt)) { 883 storePostSend(retryPkt); 884 retryPkt = NULL; 885 isStoreBlocked = false; 886 lsq->setRetryTid(-1); 887 } else { 888 // Still blocked! 889 ++lsqCacheBlocked; 890 lsq->setRetryTid(lsqID); 891 } 892 } else if (isLoadBlocked) { 893 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 894 "no need to resend packet.\n"); 895 } else { 896 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 897 } 898} 899 900template <class Impl> 901inline void 902LSQUnit<Impl>::incrStIdx(int &store_idx) 903{ 904 if (++store_idx >= SQEntries) 905 store_idx = 0; 906} 907 908template <class Impl> 909inline void 910LSQUnit<Impl>::decrStIdx(int &store_idx) 911{ 912 if (--store_idx < 0) 913 store_idx += SQEntries; 914} 915 916template <class Impl> 917inline void 918LSQUnit<Impl>::incrLdIdx(int &load_idx) 919{ 920 if (++load_idx >= LQEntries) 921 load_idx = 0; 922} 923 924template <class Impl> 925inline void 926LSQUnit<Impl>::decrLdIdx(int &load_idx) 927{ 928 if (--load_idx < 0) 929 load_idx += LQEntries; 930} 931 932template <class Impl> 933void 934LSQUnit<Impl>::dumpInsts() 935{ 936 cprintf("Load store queue: Dumping instructions.\n"); 937 cprintf("Load queue size: %i\n", loads); 938 cprintf("Load queue: "); 939 940 int load_idx = loadHead; 941 942 while (load_idx != loadTail && loadQueue[load_idx]) { 943 cprintf("%#x ", loadQueue[load_idx]->readPC()); 944 945 incrLdIdx(load_idx); 946 } 947 948 cprintf("Store queue size: %i\n", stores); 949 cprintf("Store queue: "); 950 951 int store_idx = storeHead; 952 953 while (store_idx != storeTail && storeQueue[store_idx].inst) { 954 cprintf("%#x ", storeQueue[store_idx].inst->readPC()); 955 956 incrStIdx(store_idx); 957 } 958 959 cprintf("\n"); 960} 961