1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include "arch/locked_mem.hh" 33#include "config/use_checker.hh" 34 35#include "cpu/o3/lsq.hh" 36#include "cpu/o3/lsq_unit.hh" 37#include "base/str.hh" 38#include "mem/packet.hh" 39#include "mem/request.hh" 40 41#if USE_CHECKER 42#include "cpu/checker/cpu.hh" 43#endif 44 45template<class Impl> 46LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 47 LSQUnit *lsq_ptr) 48 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 49{ 50 this->setFlags(Event::AutoDelete); 51} 52 53template<class Impl> 54void 55LSQUnit<Impl>::WritebackEvent::process() 56{ 57 if (!lsqPtr->isSwitchedOut()) { 58 lsqPtr->writeback(inst, pkt); 59 } 60 61 if (pkt->senderState) 62 delete pkt->senderState; 63 64 delete pkt->req; 65 delete pkt; 66} 67 68template<class Impl> 69const char * 70LSQUnit<Impl>::WritebackEvent::description() 71{ 72 return "Store writeback"; 73} 74 75template<class Impl> 76void 77LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 78{ 79 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 80 DynInstPtr inst = state->inst; 81 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum); 82 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum); 83 84 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 85 86 if (isSwitchedOut() || inst->isSquashed()) { 87 iewStage->decrWb(inst->seqNum); 88 } else { 89 if (!state->noWB) { 90 writeback(inst, pkt); 91 } 92 93 if (inst->isStore()) { 94 completeStore(state->idx); 95 } 96 } 97 98 delete state; 99 delete pkt->req; 100 delete pkt; 101} 102 103template <class Impl> 104LSQUnit<Impl>::LSQUnit() 105 : loads(0), stores(0), storesToWB(0), stalled(false), 106 isStoreBlocked(false), isLoadBlocked(false), 107 loadBlockedHandled(false) 108{ 109} 110 111template<class Impl> 112void 113LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, Params *params, LSQ *lsq_ptr, 114 unsigned maxLQEntries, unsigned maxSQEntries, unsigned id) 115{ 116 cpu = cpu_ptr; 117 iewStage = iew_ptr; 118 119 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 120 121 switchedOut = false; 122 123 lsq = lsq_ptr; 124 125 lsqID = id; 126 127 // Add 1 for the sentinel entry (they are circular queues). 128 LQEntries = maxLQEntries + 1; 129 SQEntries = maxSQEntries + 1; 130 131 loadQueue.resize(LQEntries); 132 storeQueue.resize(SQEntries); 133 134 loadHead = loadTail = 0; 135 136 storeHead = storeWBIdx = storeTail = 0; 137 138 usedPorts = 0; 139 cachePorts = params->cachePorts; 140 141 retryPkt = NULL; 142 memDepViolator = NULL; 143 144 blockedLoadSeqNum = 0; 145} 146 147template<class Impl> 148std::string 149LSQUnit<Impl>::name() const 150{ 151 if (Impl::MaxThreads == 1) { 152 return iewStage->name() + ".lsq"; 153 } else { 154 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 155 } 156} 157 158template<class Impl> 159void 160LSQUnit<Impl>::regStats() 161{ 162 lsqForwLoads 163 .name(name() + ".forwLoads") 164 .desc("Number of loads that had data forwarded from stores"); 165 166 invAddrLoads 167 .name(name() + ".invAddrLoads") 168 .desc("Number of loads ignored due to an invalid address"); 169 170 lsqSquashedLoads 171 .name(name() + ".squashedLoads") 172 .desc("Number of loads squashed"); 173 174 lsqIgnoredResponses 175 .name(name() + ".ignoredResponses") 176 .desc("Number of memory responses ignored because the instruction is squashed"); 177 178 lsqMemOrderViolation 179 .name(name() + ".memOrderViolation") 180 .desc("Number of memory ordering violations"); 181 182 lsqSquashedStores 183 .name(name() + ".squashedStores") 184 .desc("Number of stores squashed"); 185 186 invAddrSwpfs 187 .name(name() + ".invAddrSwpfs") 188 .desc("Number of software prefetches ignored due to an invalid address"); 189 190 lsqBlockedLoads 191 .name(name() + ".blockedLoads") 192 .desc("Number of blocked loads due to partial load-store forwarding"); 193 194 lsqRescheduledLoads 195 .name(name() + ".rescheduledLoads") 196 .desc("Number of loads that were rescheduled"); 197 198 lsqCacheBlocked 199 .name(name() + ".cacheBlocked") 200 .desc("Number of times an access to memory failed due to the cache being blocked"); 201} 202 203template<class Impl> 204void 205LSQUnit<Impl>::setDcachePort(Port *dcache_port) 206{ 207 dcachePort = dcache_port; 208 209#if USE_CHECKER 210 if (cpu->checker) { 211 cpu->checker->setDcachePort(dcachePort); 212 } 213#endif 214} 215 216template<class Impl> 217void 218LSQUnit<Impl>::clearLQ() 219{ 220 loadQueue.clear(); 221} 222 223template<class Impl> 224void 225LSQUnit<Impl>::clearSQ() 226{ 227 storeQueue.clear(); 228} 229 230template<class Impl> 231void 232LSQUnit<Impl>::switchOut() 233{ 234 switchedOut = true; 235 for (int i = 0; i < loadQueue.size(); ++i) { 236 assert(!loadQueue[i]); 237 loadQueue[i] = NULL; 238 } 239 240 assert(storesToWB == 0); 241} 242 243template<class Impl> 244void 245LSQUnit<Impl>::takeOverFrom() 246{ 247 switchedOut = false; 248 loads = stores = storesToWB = 0; 249 250 loadHead = loadTail = 0; 251 252 storeHead = storeWBIdx = storeTail = 0; 253 254 usedPorts = 0; 255 256 memDepViolator = NULL; 257 258 blockedLoadSeqNum = 0; 259 260 stalled = false; 261 isLoadBlocked = false; 262 loadBlockedHandled = false; 263} 264 265template<class Impl> 266void 267LSQUnit<Impl>::resizeLQ(unsigned size) 268{ 269 unsigned size_plus_sentinel = size + 1; 270 assert(size_plus_sentinel >= LQEntries); 271 272 if (size_plus_sentinel > LQEntries) { 273 while (size_plus_sentinel > loadQueue.size()) { 274 DynInstPtr dummy; 275 loadQueue.push_back(dummy); 276 LQEntries++; 277 } 278 } else { 279 LQEntries = size_plus_sentinel; 280 } 281 282} 283 284template<class Impl> 285void 286LSQUnit<Impl>::resizeSQ(unsigned size) 287{ 288 unsigned size_plus_sentinel = size + 1; 289 if (size_plus_sentinel > SQEntries) { 290 while (size_plus_sentinel > storeQueue.size()) { 291 SQEntry dummy; 292 storeQueue.push_back(dummy); 293 SQEntries++; 294 } 295 } else { 296 SQEntries = size_plus_sentinel; 297 } 298} 299 300template <class Impl> 301void 302LSQUnit<Impl>::insert(DynInstPtr &inst) 303{ 304 assert(inst->isMemRef()); 305 306 assert(inst->isLoad() || inst->isStore()); 307 308 if (inst->isLoad()) { 309 insertLoad(inst); 310 } else { 311 insertStore(inst); 312 } 313 314 inst->setInLSQ(); 315} 316 317template <class Impl> 318void 319LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 320{ 321 assert((loadTail + 1) % LQEntries != loadHead); 322 assert(loads < LQEntries); 323 324 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n", 325 load_inst->readPC(), loadTail, load_inst->seqNum); 326 327 load_inst->lqIdx = loadTail; 328 329 if (stores == 0) { 330 load_inst->sqIdx = -1; 331 } else { 332 load_inst->sqIdx = storeTail; 333 } 334 335 loadQueue[loadTail] = load_inst; 336 337 incrLdIdx(loadTail); 338 339 ++loads; 340} 341 342template <class Impl> 343void 344LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 345{ 346 // Make sure it is not full before inserting an instruction. 347 assert((storeTail + 1) % SQEntries != storeHead); 348 assert(stores < SQEntries); 349 350 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n", 351 store_inst->readPC(), storeTail, store_inst->seqNum); 352 353 store_inst->sqIdx = storeTail; 354 store_inst->lqIdx = loadTail; 355 356 storeQueue[storeTail] = SQEntry(store_inst); 357 358 incrStIdx(storeTail); 359 360 ++stores; 361} 362 363template <class Impl> 364typename Impl::DynInstPtr 365LSQUnit<Impl>::getMemDepViolator() 366{ 367 DynInstPtr temp = memDepViolator; 368 369 memDepViolator = NULL; 370 371 return temp; 372} 373 374template <class Impl> 375unsigned 376LSQUnit<Impl>::numFreeEntries() 377{ 378 unsigned free_lq_entries = LQEntries - loads; 379 unsigned free_sq_entries = SQEntries - stores; 380 381 // Both the LQ and SQ entries have an extra dummy entry to differentiate 382 // empty/full conditions. Subtract 1 from the free entries. 383 if (free_lq_entries < free_sq_entries) { 384 return free_lq_entries - 1; 385 } else { 386 return free_sq_entries - 1; 387 } 388} 389 390template <class Impl> 391int 392LSQUnit<Impl>::numLoadsReady() 393{ 394 int load_idx = loadHead; 395 int retval = 0; 396 397 while (load_idx != loadTail) { 398 assert(loadQueue[load_idx]); 399 400 if (loadQueue[load_idx]->readyToIssue()) { 401 ++retval; 402 } 403 } 404 405 return retval; 406} 407 408template <class Impl> 409Fault 410LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 411{ 412 using namespace TheISA; 413 // Execute a specific load. 414 Fault load_fault = NoFault; 415 416 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n", 417 inst->readPC(),inst->seqNum); 418 419 assert(!inst->isSquashed()); 420 421 load_fault = inst->initiateAcc(); 422 423 // If the instruction faulted, then we need to send it along to commit 424 // without the instruction completing. 425 if (load_fault != NoFault) { 426 // Send this instruction to commit, also make sure iew stage 427 // realizes there is activity. 428 // Mark it as executed unless it is an uncached load that 429 // needs to hit the head of commit. 430 if (!(inst->hasRequest() && inst->uncacheable()) || 431 inst->isAtCommit()) { 432 inst->setExecuted(); 433 } 434 iewStage->instToCommit(inst); 435 iewStage->activityThisCycle(); 436 } else if (!loadBlocked()) { 437 assert(inst->effAddrValid); 438 int load_idx = inst->lqIdx; 439 incrLdIdx(load_idx); 440 while (load_idx != loadTail) { 441 // Really only need to check loads that have actually executed 442 443 // @todo: For now this is extra conservative, detecting a 444 // violation if the addresses match assuming all accesses 445 // are quad word accesses. 446 447 // @todo: Fix this, magic number being used here 448 if (loadQueue[load_idx]->effAddrValid && 449 (loadQueue[load_idx]->effAddr >> 8) == 450 (inst->effAddr >> 8)) { 451 // A load incorrectly passed this load. Squash and refetch. 452 // For now return a fault to show that it was unsuccessful. 453 DynInstPtr violator = loadQueue[load_idx]; 454 if (!memDepViolator || 455 (violator->seqNum < memDepViolator->seqNum)) { 456 memDepViolator = violator; 457 } else { 458 break; 459 } 460 461 ++lsqMemOrderViolation; 462 463 return genMachineCheckFault(); 464 } 465 466 incrLdIdx(load_idx); 467 } 468 } 469 470 return load_fault; 471} 472 473template <class Impl> 474Fault 475LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 476{ 477 using namespace TheISA; 478 // Make sure that a store exists. 479 assert(stores != 0); 480 481 int store_idx = store_inst->sqIdx; 482 483 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n", 484 store_inst->readPC(), store_inst->seqNum); 485 486 assert(!store_inst->isSquashed()); 487 488 // Check the recently completed loads to see if any match this store's 489 // address. If so, then we have a memory ordering violation. 490 int load_idx = store_inst->lqIdx; 491 492 Fault store_fault = store_inst->initiateAcc(); 493 494 if (storeQueue[store_idx].size == 0) { 495 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n", 496 store_inst->readPC(),store_inst->seqNum); 497 498 return store_fault; 499 } 500 501 assert(store_fault == NoFault); 502 503 if (store_inst->isStoreConditional()) { 504 // Store conditionals need to set themselves as able to 505 // writeback if we haven't had a fault by here. 506 storeQueue[store_idx].canWB = true; 507 508 ++storesToWB; 509 } 510 511 assert(store_inst->effAddrValid); 512 while (load_idx != loadTail) { 513 // Really only need to check loads that have actually executed 514 // It's safe to check all loads because effAddr is set to 515 // InvalAddr when the dyn inst is created. 516 517 // @todo: For now this is extra conservative, detecting a 518 // violation if the addresses match assuming all accesses 519 // are quad word accesses. 520 521 // @todo: Fix this, magic number being used here 522 if (loadQueue[load_idx]->effAddrValid && 523 (loadQueue[load_idx]->effAddr >> 8) == 524 (store_inst->effAddr >> 8)) { 525 // A load incorrectly passed this store. Squash and refetch. 526 // For now return a fault to show that it was unsuccessful. 527 DynInstPtr violator = loadQueue[load_idx]; 528 if (!memDepViolator || 529 (violator->seqNum < memDepViolator->seqNum)) { 530 memDepViolator = violator; 531 } else { 532 break; 533 } 534 535 ++lsqMemOrderViolation; 536 537 return genMachineCheckFault(); 538 } 539 540 incrLdIdx(load_idx); 541 } 542 543 return store_fault; 544} 545 546template <class Impl> 547void 548LSQUnit<Impl>::commitLoad() 549{ 550 assert(loadQueue[loadHead]); 551 552 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n", 553 loadQueue[loadHead]->readPC()); 554 555 loadQueue[loadHead] = NULL; 556 557 incrLdIdx(loadHead); 558 559 --loads; 560} 561 562template <class Impl> 563void 564LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 565{ 566 assert(loads == 0 || loadQueue[loadHead]); 567 568 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 569 commitLoad(); 570 } 571} 572 573template <class Impl> 574void 575LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 576{ 577 assert(stores == 0 || storeQueue[storeHead].inst); 578 579 int store_idx = storeHead; 580 581 while (store_idx != storeTail) { 582 assert(storeQueue[store_idx].inst); 583 // Mark any stores that are now committed and have not yet 584 // been marked as able to write back. 585 if (!storeQueue[store_idx].canWB) { 586 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 587 break; 588 } 589 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 590 "%#x [sn:%lli]\n", 591 storeQueue[store_idx].inst->readPC(), 592 storeQueue[store_idx].inst->seqNum); 593 594 storeQueue[store_idx].canWB = true; 595 596 ++storesToWB; 597 } 598 599 incrStIdx(store_idx); 600 } 601} 602 603template <class Impl> 604void 605LSQUnit<Impl>::writebackStores() 606{ 607 while (storesToWB > 0 && 608 storeWBIdx != storeTail && 609 storeQueue[storeWBIdx].inst && 610 storeQueue[storeWBIdx].canWB && 611 usedPorts < cachePorts) { 612 613 if (isStoreBlocked || lsq->cacheBlocked()) { 614 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 615 " is blocked!\n"); 616 break; 617 } 618 619 // Store didn't write any data so no need to write it back to 620 // memory. 621 if (storeQueue[storeWBIdx].size == 0) { 622 completeStore(storeWBIdx); 623 624 incrStIdx(storeWBIdx); 625 626 continue; 627 } 628 629 ++usedPorts; 630 631 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 632 incrStIdx(storeWBIdx); 633 634 continue; 635 } 636 637 assert(storeQueue[storeWBIdx].req); 638 assert(!storeQueue[storeWBIdx].committed); 639 640 DynInstPtr inst = storeQueue[storeWBIdx].inst; 641 642 Request *req = storeQueue[storeWBIdx].req; 643 storeQueue[storeWBIdx].committed = true; 644 645 assert(!inst->memData); 646 inst->memData = new uint8_t[64]; 647 648 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 649 650 MemCmd command = 651 req->isSwap() ? MemCmd::SwapReq : 652 (req->isLocked() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 653 PacketPtr data_pkt = new Packet(req, command, 654 Packet::Broadcast); 655 data_pkt->dataStatic(inst->memData); 656 657 LSQSenderState *state = new LSQSenderState; 658 state->isLoad = false; 659 state->idx = storeWBIdx; 660 state->inst = inst; 661 data_pkt->senderState = state; 662 663 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x " 664 "to Addr:%#x, data:%#x [sn:%lli]\n", 665 storeWBIdx, inst->readPC(), 666 req->getPaddr(), (int)*(inst->memData), 667 inst->seqNum); 668 669 // @todo: Remove this SC hack once the memory system handles it. 670 if (inst->isStoreConditional()) { 671 // Disable recording the result temporarily. Writing to 672 // misc regs normally updates the result, but this is not 673 // the desired behavior when handling store conditionals. 674 inst->recordResult = false; 675 bool success = TheISA::handleLockedWrite(inst.get(), req); 676 inst->recordResult = true; 677 678 if (!success) { 679 // Instantly complete this store. 680 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 681 "Instantly completing it.\n", 682 inst->seqNum); 683 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 684 wb->schedule(curTick + 1); 685 completeStore(storeWBIdx); 686 incrStIdx(storeWBIdx); 687 continue; 688 } 689 } else { 690 // Non-store conditionals do not need a writeback. 691 state->noWB = true; 692 } 693 694 if (!dcachePort->sendTiming(data_pkt)) { 695 // Need to handle becoming blocked on a store. 696 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 697 "retry later\n", 698 inst->seqNum); 699 isStoreBlocked = true; 700 ++lsqCacheBlocked; 701 assert(retryPkt == NULL); 702 retryPkt = data_pkt; 703 lsq->setRetryTid(lsqID); 704 } else { 705 storePostSend(data_pkt); 706 } 707 } 708 709 // Not sure this should set it to 0. 710 usedPorts = 0; 711 712 assert(stores >= 0 && storesToWB >= 0); 713} 714 715/*template <class Impl> 716void 717LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 718{ 719 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 720 mshrSeqNums.end(), 721 seqNum); 722 723 if (mshr_it != mshrSeqNums.end()) { 724 mshrSeqNums.erase(mshr_it); 725 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 726 } 727}*/ 728 729template <class Impl> 730void 731LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 732{ 733 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 734 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 735 736 int load_idx = loadTail; 737 decrLdIdx(load_idx); 738 739 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 740 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, " 741 "[sn:%lli]\n", 742 loadQueue[load_idx]->readPC(), 743 loadQueue[load_idx]->seqNum); 744 745 if (isStalled() && load_idx == stallingLoadIdx) { 746 stalled = false; 747 stallingStoreIsn = 0; 748 stallingLoadIdx = 0; 749 } 750 751 // Clear the smart pointer to make sure it is decremented. 752 loadQueue[load_idx]->setSquashed(); 753 loadQueue[load_idx] = NULL; 754 --loads; 755 756 // Inefficient! 757 loadTail = load_idx; 758 759 decrLdIdx(load_idx); 760 ++lsqSquashedLoads; 761 } 762 763 if (isLoadBlocked) { 764 if (squashed_num < blockedLoadSeqNum) { 765 isLoadBlocked = false; 766 loadBlockedHandled = false; 767 blockedLoadSeqNum = 0; 768 } 769 } 770 771 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 772 memDepViolator = NULL; 773 } 774 775 int store_idx = storeTail; 776 decrStIdx(store_idx); 777 778 while (stores != 0 && 779 storeQueue[store_idx].inst->seqNum > squashed_num) { 780 // Instructions marked as can WB are already committed. 781 if (storeQueue[store_idx].canWB) { 782 break; 783 } 784 785 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, " 786 "idx:%i [sn:%lli]\n", 787 storeQueue[store_idx].inst->readPC(), 788 store_idx, storeQueue[store_idx].inst->seqNum); 789 790 // I don't think this can happen. It should have been cleared 791 // by the stalling load. 792 if (isStalled() && 793 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 794 panic("Is stalled should have been cleared by stalling load!\n"); 795 stalled = false; 796 stallingStoreIsn = 0; 797 } 798 799 // Clear the smart pointer to make sure it is decremented. 800 storeQueue[store_idx].inst->setSquashed(); 801 storeQueue[store_idx].inst = NULL; 802 storeQueue[store_idx].canWB = 0; 803 804 // Must delete request now that it wasn't handed off to 805 // memory. This is quite ugly. @todo: Figure out the proper 806 // place to really handle request deletes. 807 delete storeQueue[store_idx].req; 808 809 storeQueue[store_idx].req = NULL; 810 --stores; 811 812 // Inefficient! 813 storeTail = store_idx; 814 815 decrStIdx(store_idx); 816 ++lsqSquashedStores; 817 } 818} 819 820template <class Impl> 821void 822LSQUnit<Impl>::storePostSend(PacketPtr pkt) 823{ 824 if (isStalled() && 825 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 826 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 827 "load idx:%i\n", 828 stallingStoreIsn, stallingLoadIdx); 829 stalled = false; 830 stallingStoreIsn = 0; 831 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 832 } 833 834 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 835 // The store is basically completed at this time. This 836 // only works so long as the checker doesn't try to 837 // verify the value in memory for stores. 838 storeQueue[storeWBIdx].inst->setCompleted(); 839#if USE_CHECKER 840 if (cpu->checker) { 841 cpu->checker->verify(storeQueue[storeWBIdx].inst); 842 } 843#endif 844 } 845 846 incrStIdx(storeWBIdx); 847} 848 849template <class Impl> 850void 851LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 852{ 853 iewStage->wakeCPU(); 854 855 // Squashed instructions do not need to complete their access. 856 if (inst->isSquashed()) { 857 iewStage->decrWb(inst->seqNum); 858 assert(!inst->isStore()); 859 ++lsqIgnoredResponses; 860 return; 861 } 862 863 if (!inst->isExecuted()) { 864 inst->setExecuted(); 865 866 // Complete access to copy data to proper place. 867 inst->completeAcc(pkt); 868 } 869 870 // Need to insert instruction into queue to commit 871 iewStage->instToCommit(inst); 872 873 iewStage->activityThisCycle(); 874} 875 876template <class Impl> 877void 878LSQUnit<Impl>::completeStore(int store_idx) 879{ 880 assert(storeQueue[store_idx].inst); 881 storeQueue[store_idx].completed = true; 882 --storesToWB; 883 // A bit conservative because a store completion may not free up entries, 884 // but hopefully avoids two store completions in one cycle from making 885 // the CPU tick twice. 886 cpu->wakeCPU(); 887 cpu->activityThisCycle(); 888 889 if (store_idx == storeHead) { 890 do { 891 incrStIdx(storeHead); 892 893 --stores; 894 } while (storeQueue[storeHead].completed && 895 storeHead != storeTail); 896 897 iewStage->updateLSQNextCycle = true; 898 } 899 900 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 901 "idx:%i\n", 902 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 903 904 if (isStalled() && 905 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 906 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 907 "load idx:%i\n", 908 stallingStoreIsn, stallingLoadIdx); 909 stalled = false; 910 stallingStoreIsn = 0; 911 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 912 } 913 914 storeQueue[store_idx].inst->setCompleted(); 915 916 // Tell the checker we've completed this instruction. Some stores 917 // may get reported twice to the checker, but the checker can 918 // handle that case. 919#if USE_CHECKER 920 if (cpu->checker) { 921 cpu->checker->verify(storeQueue[store_idx].inst); 922 } 923#endif 924} 925 926template <class Impl> 927void 928LSQUnit<Impl>::recvRetry() 929{ 930 if (isStoreBlocked) {
| 1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Kevin Lim 29 * Korey Sewell 30 */ 31 32#include "arch/locked_mem.hh" 33#include "config/use_checker.hh" 34 35#include "cpu/o3/lsq.hh" 36#include "cpu/o3/lsq_unit.hh" 37#include "base/str.hh" 38#include "mem/packet.hh" 39#include "mem/request.hh" 40 41#if USE_CHECKER 42#include "cpu/checker/cpu.hh" 43#endif 44 45template<class Impl> 46LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 47 LSQUnit *lsq_ptr) 48 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 49{ 50 this->setFlags(Event::AutoDelete); 51} 52 53template<class Impl> 54void 55LSQUnit<Impl>::WritebackEvent::process() 56{ 57 if (!lsqPtr->isSwitchedOut()) { 58 lsqPtr->writeback(inst, pkt); 59 } 60 61 if (pkt->senderState) 62 delete pkt->senderState; 63 64 delete pkt->req; 65 delete pkt; 66} 67 68template<class Impl> 69const char * 70LSQUnit<Impl>::WritebackEvent::description() 71{ 72 return "Store writeback"; 73} 74 75template<class Impl> 76void 77LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 78{ 79 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 80 DynInstPtr inst = state->inst; 81 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum); 82 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum); 83 84 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 85 86 if (isSwitchedOut() || inst->isSquashed()) { 87 iewStage->decrWb(inst->seqNum); 88 } else { 89 if (!state->noWB) { 90 writeback(inst, pkt); 91 } 92 93 if (inst->isStore()) { 94 completeStore(state->idx); 95 } 96 } 97 98 delete state; 99 delete pkt->req; 100 delete pkt; 101} 102 103template <class Impl> 104LSQUnit<Impl>::LSQUnit() 105 : loads(0), stores(0), storesToWB(0), stalled(false), 106 isStoreBlocked(false), isLoadBlocked(false), 107 loadBlockedHandled(false) 108{ 109} 110 111template<class Impl> 112void 113LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, Params *params, LSQ *lsq_ptr, 114 unsigned maxLQEntries, unsigned maxSQEntries, unsigned id) 115{ 116 cpu = cpu_ptr; 117 iewStage = iew_ptr; 118 119 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 120 121 switchedOut = false; 122 123 lsq = lsq_ptr; 124 125 lsqID = id; 126 127 // Add 1 for the sentinel entry (they are circular queues). 128 LQEntries = maxLQEntries + 1; 129 SQEntries = maxSQEntries + 1; 130 131 loadQueue.resize(LQEntries); 132 storeQueue.resize(SQEntries); 133 134 loadHead = loadTail = 0; 135 136 storeHead = storeWBIdx = storeTail = 0; 137 138 usedPorts = 0; 139 cachePorts = params->cachePorts; 140 141 retryPkt = NULL; 142 memDepViolator = NULL; 143 144 blockedLoadSeqNum = 0; 145} 146 147template<class Impl> 148std::string 149LSQUnit<Impl>::name() const 150{ 151 if (Impl::MaxThreads == 1) { 152 return iewStage->name() + ".lsq"; 153 } else { 154 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 155 } 156} 157 158template<class Impl> 159void 160LSQUnit<Impl>::regStats() 161{ 162 lsqForwLoads 163 .name(name() + ".forwLoads") 164 .desc("Number of loads that had data forwarded from stores"); 165 166 invAddrLoads 167 .name(name() + ".invAddrLoads") 168 .desc("Number of loads ignored due to an invalid address"); 169 170 lsqSquashedLoads 171 .name(name() + ".squashedLoads") 172 .desc("Number of loads squashed"); 173 174 lsqIgnoredResponses 175 .name(name() + ".ignoredResponses") 176 .desc("Number of memory responses ignored because the instruction is squashed"); 177 178 lsqMemOrderViolation 179 .name(name() + ".memOrderViolation") 180 .desc("Number of memory ordering violations"); 181 182 lsqSquashedStores 183 .name(name() + ".squashedStores") 184 .desc("Number of stores squashed"); 185 186 invAddrSwpfs 187 .name(name() + ".invAddrSwpfs") 188 .desc("Number of software prefetches ignored due to an invalid address"); 189 190 lsqBlockedLoads 191 .name(name() + ".blockedLoads") 192 .desc("Number of blocked loads due to partial load-store forwarding"); 193 194 lsqRescheduledLoads 195 .name(name() + ".rescheduledLoads") 196 .desc("Number of loads that were rescheduled"); 197 198 lsqCacheBlocked 199 .name(name() + ".cacheBlocked") 200 .desc("Number of times an access to memory failed due to the cache being blocked"); 201} 202 203template<class Impl> 204void 205LSQUnit<Impl>::setDcachePort(Port *dcache_port) 206{ 207 dcachePort = dcache_port; 208 209#if USE_CHECKER 210 if (cpu->checker) { 211 cpu->checker->setDcachePort(dcachePort); 212 } 213#endif 214} 215 216template<class Impl> 217void 218LSQUnit<Impl>::clearLQ() 219{ 220 loadQueue.clear(); 221} 222 223template<class Impl> 224void 225LSQUnit<Impl>::clearSQ() 226{ 227 storeQueue.clear(); 228} 229 230template<class Impl> 231void 232LSQUnit<Impl>::switchOut() 233{ 234 switchedOut = true; 235 for (int i = 0; i < loadQueue.size(); ++i) { 236 assert(!loadQueue[i]); 237 loadQueue[i] = NULL; 238 } 239 240 assert(storesToWB == 0); 241} 242 243template<class Impl> 244void 245LSQUnit<Impl>::takeOverFrom() 246{ 247 switchedOut = false; 248 loads = stores = storesToWB = 0; 249 250 loadHead = loadTail = 0; 251 252 storeHead = storeWBIdx = storeTail = 0; 253 254 usedPorts = 0; 255 256 memDepViolator = NULL; 257 258 blockedLoadSeqNum = 0; 259 260 stalled = false; 261 isLoadBlocked = false; 262 loadBlockedHandled = false; 263} 264 265template<class Impl> 266void 267LSQUnit<Impl>::resizeLQ(unsigned size) 268{ 269 unsigned size_plus_sentinel = size + 1; 270 assert(size_plus_sentinel >= LQEntries); 271 272 if (size_plus_sentinel > LQEntries) { 273 while (size_plus_sentinel > loadQueue.size()) { 274 DynInstPtr dummy; 275 loadQueue.push_back(dummy); 276 LQEntries++; 277 } 278 } else { 279 LQEntries = size_plus_sentinel; 280 } 281 282} 283 284template<class Impl> 285void 286LSQUnit<Impl>::resizeSQ(unsigned size) 287{ 288 unsigned size_plus_sentinel = size + 1; 289 if (size_plus_sentinel > SQEntries) { 290 while (size_plus_sentinel > storeQueue.size()) { 291 SQEntry dummy; 292 storeQueue.push_back(dummy); 293 SQEntries++; 294 } 295 } else { 296 SQEntries = size_plus_sentinel; 297 } 298} 299 300template <class Impl> 301void 302LSQUnit<Impl>::insert(DynInstPtr &inst) 303{ 304 assert(inst->isMemRef()); 305 306 assert(inst->isLoad() || inst->isStore()); 307 308 if (inst->isLoad()) { 309 insertLoad(inst); 310 } else { 311 insertStore(inst); 312 } 313 314 inst->setInLSQ(); 315} 316 317template <class Impl> 318void 319LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 320{ 321 assert((loadTail + 1) % LQEntries != loadHead); 322 assert(loads < LQEntries); 323 324 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n", 325 load_inst->readPC(), loadTail, load_inst->seqNum); 326 327 load_inst->lqIdx = loadTail; 328 329 if (stores == 0) { 330 load_inst->sqIdx = -1; 331 } else { 332 load_inst->sqIdx = storeTail; 333 } 334 335 loadQueue[loadTail] = load_inst; 336 337 incrLdIdx(loadTail); 338 339 ++loads; 340} 341 342template <class Impl> 343void 344LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 345{ 346 // Make sure it is not full before inserting an instruction. 347 assert((storeTail + 1) % SQEntries != storeHead); 348 assert(stores < SQEntries); 349 350 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n", 351 store_inst->readPC(), storeTail, store_inst->seqNum); 352 353 store_inst->sqIdx = storeTail; 354 store_inst->lqIdx = loadTail; 355 356 storeQueue[storeTail] = SQEntry(store_inst); 357 358 incrStIdx(storeTail); 359 360 ++stores; 361} 362 363template <class Impl> 364typename Impl::DynInstPtr 365LSQUnit<Impl>::getMemDepViolator() 366{ 367 DynInstPtr temp = memDepViolator; 368 369 memDepViolator = NULL; 370 371 return temp; 372} 373 374template <class Impl> 375unsigned 376LSQUnit<Impl>::numFreeEntries() 377{ 378 unsigned free_lq_entries = LQEntries - loads; 379 unsigned free_sq_entries = SQEntries - stores; 380 381 // Both the LQ and SQ entries have an extra dummy entry to differentiate 382 // empty/full conditions. Subtract 1 from the free entries. 383 if (free_lq_entries < free_sq_entries) { 384 return free_lq_entries - 1; 385 } else { 386 return free_sq_entries - 1; 387 } 388} 389 390template <class Impl> 391int 392LSQUnit<Impl>::numLoadsReady() 393{ 394 int load_idx = loadHead; 395 int retval = 0; 396 397 while (load_idx != loadTail) { 398 assert(loadQueue[load_idx]); 399 400 if (loadQueue[load_idx]->readyToIssue()) { 401 ++retval; 402 } 403 } 404 405 return retval; 406} 407 408template <class Impl> 409Fault 410LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 411{ 412 using namespace TheISA; 413 // Execute a specific load. 414 Fault load_fault = NoFault; 415 416 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n", 417 inst->readPC(),inst->seqNum); 418 419 assert(!inst->isSquashed()); 420 421 load_fault = inst->initiateAcc(); 422 423 // If the instruction faulted, then we need to send it along to commit 424 // without the instruction completing. 425 if (load_fault != NoFault) { 426 // Send this instruction to commit, also make sure iew stage 427 // realizes there is activity. 428 // Mark it as executed unless it is an uncached load that 429 // needs to hit the head of commit. 430 if (!(inst->hasRequest() && inst->uncacheable()) || 431 inst->isAtCommit()) { 432 inst->setExecuted(); 433 } 434 iewStage->instToCommit(inst); 435 iewStage->activityThisCycle(); 436 } else if (!loadBlocked()) { 437 assert(inst->effAddrValid); 438 int load_idx = inst->lqIdx; 439 incrLdIdx(load_idx); 440 while (load_idx != loadTail) { 441 // Really only need to check loads that have actually executed 442 443 // @todo: For now this is extra conservative, detecting a 444 // violation if the addresses match assuming all accesses 445 // are quad word accesses. 446 447 // @todo: Fix this, magic number being used here 448 if (loadQueue[load_idx]->effAddrValid && 449 (loadQueue[load_idx]->effAddr >> 8) == 450 (inst->effAddr >> 8)) { 451 // A load incorrectly passed this load. Squash and refetch. 452 // For now return a fault to show that it was unsuccessful. 453 DynInstPtr violator = loadQueue[load_idx]; 454 if (!memDepViolator || 455 (violator->seqNum < memDepViolator->seqNum)) { 456 memDepViolator = violator; 457 } else { 458 break; 459 } 460 461 ++lsqMemOrderViolation; 462 463 return genMachineCheckFault(); 464 } 465 466 incrLdIdx(load_idx); 467 } 468 } 469 470 return load_fault; 471} 472 473template <class Impl> 474Fault 475LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 476{ 477 using namespace TheISA; 478 // Make sure that a store exists. 479 assert(stores != 0); 480 481 int store_idx = store_inst->sqIdx; 482 483 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n", 484 store_inst->readPC(), store_inst->seqNum); 485 486 assert(!store_inst->isSquashed()); 487 488 // Check the recently completed loads to see if any match this store's 489 // address. If so, then we have a memory ordering violation. 490 int load_idx = store_inst->lqIdx; 491 492 Fault store_fault = store_inst->initiateAcc(); 493 494 if (storeQueue[store_idx].size == 0) { 495 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n", 496 store_inst->readPC(),store_inst->seqNum); 497 498 return store_fault; 499 } 500 501 assert(store_fault == NoFault); 502 503 if (store_inst->isStoreConditional()) { 504 // Store conditionals need to set themselves as able to 505 // writeback if we haven't had a fault by here. 506 storeQueue[store_idx].canWB = true; 507 508 ++storesToWB; 509 } 510 511 assert(store_inst->effAddrValid); 512 while (load_idx != loadTail) { 513 // Really only need to check loads that have actually executed 514 // It's safe to check all loads because effAddr is set to 515 // InvalAddr when the dyn inst is created. 516 517 // @todo: For now this is extra conservative, detecting a 518 // violation if the addresses match assuming all accesses 519 // are quad word accesses. 520 521 // @todo: Fix this, magic number being used here 522 if (loadQueue[load_idx]->effAddrValid && 523 (loadQueue[load_idx]->effAddr >> 8) == 524 (store_inst->effAddr >> 8)) { 525 // A load incorrectly passed this store. Squash and refetch. 526 // For now return a fault to show that it was unsuccessful. 527 DynInstPtr violator = loadQueue[load_idx]; 528 if (!memDepViolator || 529 (violator->seqNum < memDepViolator->seqNum)) { 530 memDepViolator = violator; 531 } else { 532 break; 533 } 534 535 ++lsqMemOrderViolation; 536 537 return genMachineCheckFault(); 538 } 539 540 incrLdIdx(load_idx); 541 } 542 543 return store_fault; 544} 545 546template <class Impl> 547void 548LSQUnit<Impl>::commitLoad() 549{ 550 assert(loadQueue[loadHead]); 551 552 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n", 553 loadQueue[loadHead]->readPC()); 554 555 loadQueue[loadHead] = NULL; 556 557 incrLdIdx(loadHead); 558 559 --loads; 560} 561 562template <class Impl> 563void 564LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 565{ 566 assert(loads == 0 || loadQueue[loadHead]); 567 568 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 569 commitLoad(); 570 } 571} 572 573template <class Impl> 574void 575LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 576{ 577 assert(stores == 0 || storeQueue[storeHead].inst); 578 579 int store_idx = storeHead; 580 581 while (store_idx != storeTail) { 582 assert(storeQueue[store_idx].inst); 583 // Mark any stores that are now committed and have not yet 584 // been marked as able to write back. 585 if (!storeQueue[store_idx].canWB) { 586 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 587 break; 588 } 589 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 590 "%#x [sn:%lli]\n", 591 storeQueue[store_idx].inst->readPC(), 592 storeQueue[store_idx].inst->seqNum); 593 594 storeQueue[store_idx].canWB = true; 595 596 ++storesToWB; 597 } 598 599 incrStIdx(store_idx); 600 } 601} 602 603template <class Impl> 604void 605LSQUnit<Impl>::writebackStores() 606{ 607 while (storesToWB > 0 && 608 storeWBIdx != storeTail && 609 storeQueue[storeWBIdx].inst && 610 storeQueue[storeWBIdx].canWB && 611 usedPorts < cachePorts) { 612 613 if (isStoreBlocked || lsq->cacheBlocked()) { 614 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 615 " is blocked!\n"); 616 break; 617 } 618 619 // Store didn't write any data so no need to write it back to 620 // memory. 621 if (storeQueue[storeWBIdx].size == 0) { 622 completeStore(storeWBIdx); 623 624 incrStIdx(storeWBIdx); 625 626 continue; 627 } 628 629 ++usedPorts; 630 631 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 632 incrStIdx(storeWBIdx); 633 634 continue; 635 } 636 637 assert(storeQueue[storeWBIdx].req); 638 assert(!storeQueue[storeWBIdx].committed); 639 640 DynInstPtr inst = storeQueue[storeWBIdx].inst; 641 642 Request *req = storeQueue[storeWBIdx].req; 643 storeQueue[storeWBIdx].committed = true; 644 645 assert(!inst->memData); 646 inst->memData = new uint8_t[64]; 647 648 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 649 650 MemCmd command = 651 req->isSwap() ? MemCmd::SwapReq : 652 (req->isLocked() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 653 PacketPtr data_pkt = new Packet(req, command, 654 Packet::Broadcast); 655 data_pkt->dataStatic(inst->memData); 656 657 LSQSenderState *state = new LSQSenderState; 658 state->isLoad = false; 659 state->idx = storeWBIdx; 660 state->inst = inst; 661 data_pkt->senderState = state; 662 663 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x " 664 "to Addr:%#x, data:%#x [sn:%lli]\n", 665 storeWBIdx, inst->readPC(), 666 req->getPaddr(), (int)*(inst->memData), 667 inst->seqNum); 668 669 // @todo: Remove this SC hack once the memory system handles it. 670 if (inst->isStoreConditional()) { 671 // Disable recording the result temporarily. Writing to 672 // misc regs normally updates the result, but this is not 673 // the desired behavior when handling store conditionals. 674 inst->recordResult = false; 675 bool success = TheISA::handleLockedWrite(inst.get(), req); 676 inst->recordResult = true; 677 678 if (!success) { 679 // Instantly complete this store. 680 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 681 "Instantly completing it.\n", 682 inst->seqNum); 683 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 684 wb->schedule(curTick + 1); 685 completeStore(storeWBIdx); 686 incrStIdx(storeWBIdx); 687 continue; 688 } 689 } else { 690 // Non-store conditionals do not need a writeback. 691 state->noWB = true; 692 } 693 694 if (!dcachePort->sendTiming(data_pkt)) { 695 // Need to handle becoming blocked on a store. 696 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 697 "retry later\n", 698 inst->seqNum); 699 isStoreBlocked = true; 700 ++lsqCacheBlocked; 701 assert(retryPkt == NULL); 702 retryPkt = data_pkt; 703 lsq->setRetryTid(lsqID); 704 } else { 705 storePostSend(data_pkt); 706 } 707 } 708 709 // Not sure this should set it to 0. 710 usedPorts = 0; 711 712 assert(stores >= 0 && storesToWB >= 0); 713} 714 715/*template <class Impl> 716void 717LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 718{ 719 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 720 mshrSeqNums.end(), 721 seqNum); 722 723 if (mshr_it != mshrSeqNums.end()) { 724 mshrSeqNums.erase(mshr_it); 725 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 726 } 727}*/ 728 729template <class Impl> 730void 731LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 732{ 733 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 734 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 735 736 int load_idx = loadTail; 737 decrLdIdx(load_idx); 738 739 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 740 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, " 741 "[sn:%lli]\n", 742 loadQueue[load_idx]->readPC(), 743 loadQueue[load_idx]->seqNum); 744 745 if (isStalled() && load_idx == stallingLoadIdx) { 746 stalled = false; 747 stallingStoreIsn = 0; 748 stallingLoadIdx = 0; 749 } 750 751 // Clear the smart pointer to make sure it is decremented. 752 loadQueue[load_idx]->setSquashed(); 753 loadQueue[load_idx] = NULL; 754 --loads; 755 756 // Inefficient! 757 loadTail = load_idx; 758 759 decrLdIdx(load_idx); 760 ++lsqSquashedLoads; 761 } 762 763 if (isLoadBlocked) { 764 if (squashed_num < blockedLoadSeqNum) { 765 isLoadBlocked = false; 766 loadBlockedHandled = false; 767 blockedLoadSeqNum = 0; 768 } 769 } 770 771 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 772 memDepViolator = NULL; 773 } 774 775 int store_idx = storeTail; 776 decrStIdx(store_idx); 777 778 while (stores != 0 && 779 storeQueue[store_idx].inst->seqNum > squashed_num) { 780 // Instructions marked as can WB are already committed. 781 if (storeQueue[store_idx].canWB) { 782 break; 783 } 784 785 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, " 786 "idx:%i [sn:%lli]\n", 787 storeQueue[store_idx].inst->readPC(), 788 store_idx, storeQueue[store_idx].inst->seqNum); 789 790 // I don't think this can happen. It should have been cleared 791 // by the stalling load. 792 if (isStalled() && 793 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 794 panic("Is stalled should have been cleared by stalling load!\n"); 795 stalled = false; 796 stallingStoreIsn = 0; 797 } 798 799 // Clear the smart pointer to make sure it is decremented. 800 storeQueue[store_idx].inst->setSquashed(); 801 storeQueue[store_idx].inst = NULL; 802 storeQueue[store_idx].canWB = 0; 803 804 // Must delete request now that it wasn't handed off to 805 // memory. This is quite ugly. @todo: Figure out the proper 806 // place to really handle request deletes. 807 delete storeQueue[store_idx].req; 808 809 storeQueue[store_idx].req = NULL; 810 --stores; 811 812 // Inefficient! 813 storeTail = store_idx; 814 815 decrStIdx(store_idx); 816 ++lsqSquashedStores; 817 } 818} 819 820template <class Impl> 821void 822LSQUnit<Impl>::storePostSend(PacketPtr pkt) 823{ 824 if (isStalled() && 825 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 826 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 827 "load idx:%i\n", 828 stallingStoreIsn, stallingLoadIdx); 829 stalled = false; 830 stallingStoreIsn = 0; 831 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 832 } 833 834 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 835 // The store is basically completed at this time. This 836 // only works so long as the checker doesn't try to 837 // verify the value in memory for stores. 838 storeQueue[storeWBIdx].inst->setCompleted(); 839#if USE_CHECKER 840 if (cpu->checker) { 841 cpu->checker->verify(storeQueue[storeWBIdx].inst); 842 } 843#endif 844 } 845 846 incrStIdx(storeWBIdx); 847} 848 849template <class Impl> 850void 851LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 852{ 853 iewStage->wakeCPU(); 854 855 // Squashed instructions do not need to complete their access. 856 if (inst->isSquashed()) { 857 iewStage->decrWb(inst->seqNum); 858 assert(!inst->isStore()); 859 ++lsqIgnoredResponses; 860 return; 861 } 862 863 if (!inst->isExecuted()) { 864 inst->setExecuted(); 865 866 // Complete access to copy data to proper place. 867 inst->completeAcc(pkt); 868 } 869 870 // Need to insert instruction into queue to commit 871 iewStage->instToCommit(inst); 872 873 iewStage->activityThisCycle(); 874} 875 876template <class Impl> 877void 878LSQUnit<Impl>::completeStore(int store_idx) 879{ 880 assert(storeQueue[store_idx].inst); 881 storeQueue[store_idx].completed = true; 882 --storesToWB; 883 // A bit conservative because a store completion may not free up entries, 884 // but hopefully avoids two store completions in one cycle from making 885 // the CPU tick twice. 886 cpu->wakeCPU(); 887 cpu->activityThisCycle(); 888 889 if (store_idx == storeHead) { 890 do { 891 incrStIdx(storeHead); 892 893 --stores; 894 } while (storeQueue[storeHead].completed && 895 storeHead != storeTail); 896 897 iewStage->updateLSQNextCycle = true; 898 } 899 900 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 901 "idx:%i\n", 902 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 903 904 if (isStalled() && 905 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 906 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 907 "load idx:%i\n", 908 stallingStoreIsn, stallingLoadIdx); 909 stalled = false; 910 stallingStoreIsn = 0; 911 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 912 } 913 914 storeQueue[store_idx].inst->setCompleted(); 915 916 // Tell the checker we've completed this instruction. Some stores 917 // may get reported twice to the checker, but the checker can 918 // handle that case. 919#if USE_CHECKER 920 if (cpu->checker) { 921 cpu->checker->verify(storeQueue[store_idx].inst); 922 } 923#endif 924} 925 926template <class Impl> 927void 928LSQUnit<Impl>::recvRetry() 929{ 930 if (isStoreBlocked) {
|