1/* 2 * Copyright (c) 2010 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2004-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Kevin Lim 41 * Korey Sewell 42 */ 43 44#include "arch/locked_mem.hh" 45#include "config/the_isa.hh" 46#include "config/use_checker.hh" 47#include "cpu/o3/lsq.hh" 48#include "cpu/o3/lsq_unit.hh" 49#include "base/str.hh" 50#include "mem/packet.hh" 51#include "mem/request.hh" 52 53#if USE_CHECKER 54#include "cpu/checker/cpu.hh" 55#endif 56 57template<class Impl> 58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 59 LSQUnit *lsq_ptr) 60 : inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 61{ 62 this->setFlags(Event::AutoDelete); 63} 64 65template<class Impl> 66void 67LSQUnit<Impl>::WritebackEvent::process() 68{ 69 if (!lsqPtr->isSwitchedOut()) { 70 lsqPtr->writeback(inst, pkt); 71 } 72 73 if (pkt->senderState) 74 delete pkt->senderState; 75 76 delete pkt->req; 77 delete pkt; 78} 79 80template<class Impl> 81const char * 82LSQUnit<Impl>::WritebackEvent::description() const 83{ 84 return "Store writeback"; 85} 86 87template<class Impl> 88void 89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 90{ 91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 92 DynInstPtr inst = state->inst; 93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 95 96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 97 98 assert(!pkt->wasNacked()); 99 100 // If this is a split access, wait until all packets are received. 101 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 102 delete pkt->req; 103 delete pkt; 104 return; 105 } 106 107 if (isSwitchedOut() || inst->isSquashed()) { 108 iewStage->decrWb(inst->seqNum); 109 } else { 110 if (!state->noWB) { 111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 112 !state->isLoad) { 113 writeback(inst, pkt); 114 } else { 115 writeback(inst, state->mainPkt); 116 } 117 } 118 119 if (inst->isStore()) { 120 completeStore(state->idx); 121 } 122 } 123 124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 125 delete state->mainPkt->req; 126 delete state->mainPkt; 127 } 128 delete state; 129 delete pkt->req; 130 delete pkt; 131} 132 133template <class Impl> 134LSQUnit<Impl>::LSQUnit() 135 : loads(0), stores(0), storesToWB(0), stalled(false), 136 isStoreBlocked(false), isLoadBlocked(false), 137 loadBlockedHandled(false), hasPendingPkt(false) 138{ 139} 140 141template<class Impl> 142void 143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 145 unsigned id) 146{ 147 cpu = cpu_ptr; 148 iewStage = iew_ptr; 149 150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 151 152 switchedOut = false; 153 154 lsq = lsq_ptr; 155 156 lsqID = id; 157 158 // Add 1 for the sentinel entry (they are circular queues). 159 LQEntries = maxLQEntries + 1; 160 SQEntries = maxSQEntries + 1; 161 162 loadQueue.resize(LQEntries); 163 storeQueue.resize(SQEntries); 164 165 loadHead = loadTail = 0; 166 167 storeHead = storeWBIdx = storeTail = 0; 168 169 usedPorts = 0; 170 cachePorts = params->cachePorts; 171 172 retryPkt = NULL; 173 memDepViolator = NULL; 174 175 blockedLoadSeqNum = 0; 176} 177 178template<class Impl> 179std::string 180LSQUnit<Impl>::name() const 181{ 182 if (Impl::MaxThreads == 1) { 183 return iewStage->name() + ".lsq"; 184 } else { 185 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 186 } 187} 188 189template<class Impl> 190void 191LSQUnit<Impl>::regStats() 192{ 193 lsqForwLoads 194 .name(name() + ".forwLoads") 195 .desc("Number of loads that had data forwarded from stores"); 196 197 invAddrLoads 198 .name(name() + ".invAddrLoads") 199 .desc("Number of loads ignored due to an invalid address"); 200 201 lsqSquashedLoads 202 .name(name() + ".squashedLoads") 203 .desc("Number of loads squashed"); 204 205 lsqIgnoredResponses 206 .name(name() + ".ignoredResponses") 207 .desc("Number of memory responses ignored because the instruction is squashed"); 208 209 lsqMemOrderViolation 210 .name(name() + ".memOrderViolation") 211 .desc("Number of memory ordering violations"); 212 213 lsqSquashedStores 214 .name(name() + ".squashedStores") 215 .desc("Number of stores squashed"); 216 217 invAddrSwpfs 218 .name(name() + ".invAddrSwpfs") 219 .desc("Number of software prefetches ignored due to an invalid address"); 220 221 lsqBlockedLoads 222 .name(name() + ".blockedLoads") 223 .desc("Number of blocked loads due to partial load-store forwarding"); 224 225 lsqRescheduledLoads 226 .name(name() + ".rescheduledLoads") 227 .desc("Number of loads that were rescheduled"); 228 229 lsqCacheBlocked 230 .name(name() + ".cacheBlocked") 231 .desc("Number of times an access to memory failed due to the cache being blocked"); 232} 233 234template<class Impl> 235void 236LSQUnit<Impl>::setDcachePort(Port *dcache_port) 237{ 238 dcachePort = dcache_port; 239 240#if USE_CHECKER 241 if (cpu->checker) { 242 cpu->checker->setDcachePort(dcachePort); 243 } 244#endif 245} 246 247template<class Impl> 248void 249LSQUnit<Impl>::clearLQ() 250{ 251 loadQueue.clear(); 252} 253 254template<class Impl> 255void 256LSQUnit<Impl>::clearSQ() 257{ 258 storeQueue.clear(); 259} 260 261template<class Impl> 262void 263LSQUnit<Impl>::switchOut() 264{ 265 switchedOut = true; 266 for (int i = 0; i < loadQueue.size(); ++i) { 267 assert(!loadQueue[i]); 268 loadQueue[i] = NULL; 269 } 270 271 assert(storesToWB == 0); 272} 273 274template<class Impl> 275void 276LSQUnit<Impl>::takeOverFrom() 277{ 278 switchedOut = false; 279 loads = stores = storesToWB = 0; 280 281 loadHead = loadTail = 0; 282 283 storeHead = storeWBIdx = storeTail = 0; 284 285 usedPorts = 0; 286 287 memDepViolator = NULL; 288 289 blockedLoadSeqNum = 0; 290 291 stalled = false; 292 isLoadBlocked = false; 293 loadBlockedHandled = false; 294} 295 296template<class Impl> 297void 298LSQUnit<Impl>::resizeLQ(unsigned size) 299{ 300 unsigned size_plus_sentinel = size + 1; 301 assert(size_plus_sentinel >= LQEntries); 302 303 if (size_plus_sentinel > LQEntries) { 304 while (size_plus_sentinel > loadQueue.size()) { 305 DynInstPtr dummy; 306 loadQueue.push_back(dummy); 307 LQEntries++; 308 } 309 } else { 310 LQEntries = size_plus_sentinel; 311 } 312 313} 314 315template<class Impl> 316void 317LSQUnit<Impl>::resizeSQ(unsigned size) 318{ 319 unsigned size_plus_sentinel = size + 1; 320 if (size_plus_sentinel > SQEntries) { 321 while (size_plus_sentinel > storeQueue.size()) { 322 SQEntry dummy; 323 storeQueue.push_back(dummy); 324 SQEntries++; 325 } 326 } else { 327 SQEntries = size_plus_sentinel; 328 } 329} 330 331template <class Impl> 332void 333LSQUnit<Impl>::insert(DynInstPtr &inst) 334{ 335 assert(inst->isMemRef()); 336 337 assert(inst->isLoad() || inst->isStore()); 338 339 if (inst->isLoad()) { 340 insertLoad(inst); 341 } else { 342 insertStore(inst); 343 } 344 345 inst->setInLSQ(); 346} 347 348template <class Impl> 349void 350LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 351{ 352 assert((loadTail + 1) % LQEntries != loadHead); 353 assert(loads < LQEntries); 354 355 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 356 load_inst->pcState(), loadTail, load_inst->seqNum); 357 358 load_inst->lqIdx = loadTail; 359 360 if (stores == 0) { 361 load_inst->sqIdx = -1; 362 } else { 363 load_inst->sqIdx = storeTail; 364 } 365 366 loadQueue[loadTail] = load_inst; 367 368 incrLdIdx(loadTail); 369 370 ++loads; 371} 372 373template <class Impl> 374void 375LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 376{ 377 // Make sure it is not full before inserting an instruction. 378 assert((storeTail + 1) % SQEntries != storeHead); 379 assert(stores < SQEntries); 380 381 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 382 store_inst->pcState(), storeTail, store_inst->seqNum); 383 384 store_inst->sqIdx = storeTail; 385 store_inst->lqIdx = loadTail; 386 387 storeQueue[storeTail] = SQEntry(store_inst); 388 389 incrStIdx(storeTail); 390 391 ++stores; 392} 393 394template <class Impl> 395typename Impl::DynInstPtr 396LSQUnit<Impl>::getMemDepViolator() 397{ 398 DynInstPtr temp = memDepViolator; 399 400 memDepViolator = NULL; 401 402 return temp; 403} 404 405template <class Impl> 406unsigned 407LSQUnit<Impl>::numFreeEntries() 408{ 409 unsigned free_lq_entries = LQEntries - loads; 410 unsigned free_sq_entries = SQEntries - stores; 411 412 // Both the LQ and SQ entries have an extra dummy entry to differentiate 413 // empty/full conditions. Subtract 1 from the free entries. 414 if (free_lq_entries < free_sq_entries) { 415 return free_lq_entries - 1; 416 } else { 417 return free_sq_entries - 1; 418 } 419} 420 421template <class Impl> 422int 423LSQUnit<Impl>::numLoadsReady() 424{ 425 int load_idx = loadHead; 426 int retval = 0; 427 428 while (load_idx != loadTail) { 429 assert(loadQueue[load_idx]); 430 431 if (loadQueue[load_idx]->readyToIssue()) { 432 ++retval; 433 } 434 } 435 436 return retval; 437} 438 439template <class Impl> 440Fault 441LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 442{ 443 using namespace TheISA; 444 // Execute a specific load. 445 Fault load_fault = NoFault; 446 447 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 448 inst->pcState(), inst->seqNum); 449 450 assert(!inst->isSquashed()); 451 452 load_fault = inst->initiateAcc(); 453 454 if (inst->isTranslationDelayed() && 455 load_fault == NoFault) 456 return load_fault; 457 458 // If the instruction faulted or predicated false, then we need to send it 459 // along to commit without the instruction completing. 460 if (load_fault != NoFault || inst->readPredicate() == false) { 461 // Send this instruction to commit, also make sure iew stage 462 // realizes there is activity. 463 // Mark it as executed unless it is an uncached load that 464 // needs to hit the head of commit. 465 if (inst->readPredicate() == false) 466 inst->forwardOldRegs(); 467 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 468 inst->seqNum, 469 (load_fault != NoFault ? "fault" : "predication")); 470 if (!(inst->hasRequest() && inst->uncacheable()) || 471 inst->isAtCommit()) { 472 inst->setExecuted(); 473 } 474 iewStage->instToCommit(inst); 475 iewStage->activityThisCycle(); 476 } else if (!loadBlocked()) { 477 assert(inst->effAddrValid); 478 int load_idx = inst->lqIdx; 479 incrLdIdx(load_idx); 480 while (load_idx != loadTail) { 481 // Really only need to check loads that have actually executed 482 483 // @todo: For now this is extra conservative, detecting a 484 // violation if the addresses match assuming all accesses 485 // are quad word accesses. 486 487 // @todo: Fix this, magic number being used here 488 489 // @todo: Uncachable load is not executed until it reaches 490 // the head of the ROB. Once this if checks only the executed 491 // loads(as noted above), this check can be removed 492 if (loadQueue[load_idx]->effAddrValid && 493 ((loadQueue[load_idx]->effAddr >> 8) 494 == (inst->effAddr >> 8)) && 495 !loadQueue[load_idx]->uncacheable()) { 496 // A load incorrectly passed this load. Squash and refetch. 497 // For now return a fault to show that it was unsuccessful. 498 DynInstPtr violator = loadQueue[load_idx]; 499 if (!memDepViolator || 500 (violator->seqNum < memDepViolator->seqNum)) { 501 memDepViolator = violator; 502 } else { 503 break; 504 } 505 506 ++lsqMemOrderViolation; 507 508 return genMachineCheckFault(); 509 } 510 511 incrLdIdx(load_idx); 512 } 513 } 514 515 return load_fault; 516} 517 518template <class Impl> 519Fault 520LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 521{ 522 using namespace TheISA; 523 // Make sure that a store exists. 524 assert(stores != 0); 525 526 int store_idx = store_inst->sqIdx; 527 528 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 529 store_inst->pcState(), store_inst->seqNum); 530 531 assert(!store_inst->isSquashed()); 532 533 // Check the recently completed loads to see if any match this store's 534 // address. If so, then we have a memory ordering violation. 535 int load_idx = store_inst->lqIdx; 536 537 Fault store_fault = store_inst->initiateAcc(); 538 539 if (store_inst->isTranslationDelayed() && 540 store_fault == NoFault) 541 return store_fault; 542 543 if (store_inst->readPredicate() == false) 544 store_inst->forwardOldRegs(); 545 546 if (storeQueue[store_idx].size == 0) { 547 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 548 store_inst->pcState(), store_inst->seqNum); 549 550 return store_fault; 551 } else if (store_inst->readPredicate() == false) { 552 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 553 store_inst->seqNum); 554 return store_fault; 555 } 556 557 assert(store_fault == NoFault); 558 559 if (store_inst->isStoreConditional()) { 560 // Store conditionals need to set themselves as able to 561 // writeback if we haven't had a fault by here. 562 storeQueue[store_idx].canWB = true; 563 564 ++storesToWB; 565 } 566 567 assert(store_inst->effAddrValid); 568 while (load_idx != loadTail) { 569 // Really only need to check loads that have actually executed 570 // It's safe to check all loads because effAddr is set to 571 // InvalAddr when the dyn inst is created. 572 573 // @todo: For now this is extra conservative, detecting a 574 // violation if the addresses match assuming all accesses 575 // are quad word accesses. 576 577 // @todo: Fix this, magic number being used here 578 579 // @todo: Uncachable load is not executed until it reaches 580 // the head of the ROB. Once this if checks only the executed 581 // loads(as noted above), this check can be removed 582 if (loadQueue[load_idx]->effAddrValid && 583 ((loadQueue[load_idx]->effAddr >> 8) 584 == (store_inst->effAddr >> 8)) && 585 !loadQueue[load_idx]->uncacheable()) { 586 // A load incorrectly passed this store. Squash and refetch. 587 // For now return a fault to show that it was unsuccessful. 588 DynInstPtr violator = loadQueue[load_idx]; 589 if (!memDepViolator || 590 (violator->seqNum < memDepViolator->seqNum)) { 591 memDepViolator = violator; 592 } else { 593 break; 594 } 595 596 ++lsqMemOrderViolation; 597 598 return genMachineCheckFault(); 599 } 600 601 incrLdIdx(load_idx); 602 } 603 604 return store_fault; 605} 606 607template <class Impl> 608void 609LSQUnit<Impl>::commitLoad() 610{ 611 assert(loadQueue[loadHead]); 612 613 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 614 loadQueue[loadHead]->pcState()); 615 616 loadQueue[loadHead] = NULL; 617 618 incrLdIdx(loadHead); 619 620 --loads; 621} 622 623template <class Impl> 624void 625LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 626{ 627 assert(loads == 0 || loadQueue[loadHead]); 628 629 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 630 commitLoad(); 631 } 632} 633 634template <class Impl> 635void 636LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 637{ 638 assert(stores == 0 || storeQueue[storeHead].inst); 639 640 int store_idx = storeHead; 641 642 while (store_idx != storeTail) { 643 assert(storeQueue[store_idx].inst); 644 // Mark any stores that are now committed and have not yet 645 // been marked as able to write back. 646 if (!storeQueue[store_idx].canWB) { 647 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 648 break; 649 } 650 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 651 "%s [sn:%lli]\n", 652 storeQueue[store_idx].inst->pcState(), 653 storeQueue[store_idx].inst->seqNum); 654 655 storeQueue[store_idx].canWB = true; 656 657 ++storesToWB; 658 } 659 660 incrStIdx(store_idx); 661 } 662} 663 664template <class Impl> 665void 666LSQUnit<Impl>::writebackPendingStore() 667{ 668 if (hasPendingPkt) { 669 assert(pendingPkt != NULL); 670 671 // If the cache is blocked, this will store the packet for retry. 672 if (sendStore(pendingPkt)) { 673 storePostSend(pendingPkt); 674 } 675 pendingPkt = NULL; 676 hasPendingPkt = false; 677 } 678} 679 680template <class Impl> 681void 682LSQUnit<Impl>::writebackStores() 683{ 684 // First writeback the second packet from any split store that didn't 685 // complete last cycle because there weren't enough cache ports available. 686 if (TheISA::HasUnalignedMemAcc) { 687 writebackPendingStore(); 688 } 689 690 while (storesToWB > 0 && 691 storeWBIdx != storeTail && 692 storeQueue[storeWBIdx].inst && 693 storeQueue[storeWBIdx].canWB && 694 usedPorts < cachePorts) { 695 696 if (isStoreBlocked || lsq->cacheBlocked()) { 697 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 698 " is blocked!\n"); 699 break; 700 } 701 702 // Store didn't write any data so no need to write it back to 703 // memory. 704 if (storeQueue[storeWBIdx].size == 0) { 705 completeStore(storeWBIdx); 706 707 incrStIdx(storeWBIdx); 708 709 continue; 710 } 711 712 ++usedPorts; 713 714 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 715 incrStIdx(storeWBIdx); 716 717 continue; 718 } 719 720 assert(storeQueue[storeWBIdx].req); 721 assert(!storeQueue[storeWBIdx].committed); 722 723 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 724 assert(storeQueue[storeWBIdx].sreqLow); 725 assert(storeQueue[storeWBIdx].sreqHigh); 726 } 727 728 DynInstPtr inst = storeQueue[storeWBIdx].inst; 729 730 Request *req = storeQueue[storeWBIdx].req; 731 storeQueue[storeWBIdx].committed = true; 732 733 assert(!inst->memData); 734 inst->memData = new uint8_t[64]; 735 736 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 737 738 MemCmd command = 739 req->isSwap() ? MemCmd::SwapReq : 740 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 741 PacketPtr data_pkt; 742 PacketPtr snd_data_pkt = NULL; 743 744 LSQSenderState *state = new LSQSenderState; 745 state->isLoad = false; 746 state->idx = storeWBIdx; 747 state->inst = inst; 748 749 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 750 751 // Build a single data packet if the store isn't split. 752 data_pkt = new Packet(req, command, Packet::Broadcast); 753 data_pkt->dataStatic(inst->memData); 754 data_pkt->senderState = state; 755 } else { 756 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 757 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 758 759 // Create two packets if the store is split in two. 760 data_pkt = new Packet(sreqLow, command, Packet::Broadcast); 761 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast); 762 763 data_pkt->dataStatic(inst->memData); 764 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 765 766 data_pkt->senderState = state; 767 snd_data_pkt->senderState = state; 768 769 state->isSplit = true; 770 state->outstanding = 2; 771 772 // Can delete the main request now. 773 delete req; 774 req = sreqLow; 775 } 776 777 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 778 "to Addr:%#x, data:%#x [sn:%lli]\n", 779 storeWBIdx, inst->pcState(), 780 req->getPaddr(), (int)*(inst->memData), 781 inst->seqNum); 782 783 // @todo: Remove this SC hack once the memory system handles it. 784 if (inst->isStoreConditional()) { 785 assert(!storeQueue[storeWBIdx].isSplit); 786 // Disable recording the result temporarily. Writing to 787 // misc regs normally updates the result, but this is not 788 // the desired behavior when handling store conditionals. 789 inst->recordResult = false; 790 bool success = TheISA::handleLockedWrite(inst.get(), req); 791 inst->recordResult = true; 792 793 if (!success) { 794 // Instantly complete this store. 795 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 796 "Instantly completing it.\n", 797 inst->seqNum); 798 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 799 cpu->schedule(wb, curTick() + 1); 800 completeStore(storeWBIdx); 801 incrStIdx(storeWBIdx); 802 continue; 803 } 804 } else { 805 // Non-store conditionals do not need a writeback. 806 state->noWB = true; 807 } 808 809 if (!sendStore(data_pkt)) { 810 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 811 "retry later\n", 812 inst->seqNum); 813 814 // Need to store the second packet, if split. 815 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 816 state->pktToSend = true; 817 state->pendingPacket = snd_data_pkt; 818 } 819 } else { 820 821 // If split, try to send the second packet too 822 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 823 assert(snd_data_pkt); 824 825 // Ensure there are enough ports to use. 826 if (usedPorts < cachePorts) { 827 ++usedPorts; 828 if (sendStore(snd_data_pkt)) { 829 storePostSend(snd_data_pkt); 830 } else { 831 DPRINTF(IEW, "D-Cache became blocked when writing" 832 " [sn:%lli] second packet, will retry later\n", 833 inst->seqNum); 834 } 835 } else { 836 837 // Store the packet for when there's free ports. 838 assert(pendingPkt == NULL); 839 pendingPkt = snd_data_pkt; 840 hasPendingPkt = true; 841 } 842 } else { 843 844 // Not a split store. 845 storePostSend(data_pkt); 846 } 847 } 848 } 849 850 // Not sure this should set it to 0. 851 usedPorts = 0; 852 853 assert(stores >= 0 && storesToWB >= 0); 854} 855 856/*template <class Impl> 857void 858LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 859{ 860 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 861 mshrSeqNums.end(), 862 seqNum); 863 864 if (mshr_it != mshrSeqNums.end()) { 865 mshrSeqNums.erase(mshr_it); 866 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 867 } 868}*/ 869 870template <class Impl> 871void 872LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 873{ 874 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 875 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 876 877 int load_idx = loadTail; 878 decrLdIdx(load_idx); 879 880 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 881 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 882 "[sn:%lli]\n", 883 loadQueue[load_idx]->pcState(), 884 loadQueue[load_idx]->seqNum); 885 886 if (isStalled() && load_idx == stallingLoadIdx) { 887 stalled = false; 888 stallingStoreIsn = 0; 889 stallingLoadIdx = 0; 890 } 891 892 // Clear the smart pointer to make sure it is decremented. 893 loadQueue[load_idx]->setSquashed(); 894 loadQueue[load_idx] = NULL; 895 --loads; 896 897 // Inefficient! 898 loadTail = load_idx; 899 900 decrLdIdx(load_idx); 901 ++lsqSquashedLoads; 902 } 903 904 if (isLoadBlocked) { 905 if (squashed_num < blockedLoadSeqNum) { 906 isLoadBlocked = false; 907 loadBlockedHandled = false; 908 blockedLoadSeqNum = 0; 909 } 910 } 911 912 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 913 memDepViolator = NULL; 914 } 915 916 int store_idx = storeTail; 917 decrStIdx(store_idx); 918 919 while (stores != 0 && 920 storeQueue[store_idx].inst->seqNum > squashed_num) { 921 // Instructions marked as can WB are already committed. 922 if (storeQueue[store_idx].canWB) { 923 break; 924 } 925 926 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 927 "idx:%i [sn:%lli]\n", 928 storeQueue[store_idx].inst->pcState(), 929 store_idx, storeQueue[store_idx].inst->seqNum); 930 931 // I don't think this can happen. It should have been cleared 932 // by the stalling load. 933 if (isStalled() && 934 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 935 panic("Is stalled should have been cleared by stalling load!\n"); 936 stalled = false; 937 stallingStoreIsn = 0; 938 } 939 940 // Clear the smart pointer to make sure it is decremented. 941 storeQueue[store_idx].inst->setSquashed(); 942 storeQueue[store_idx].inst = NULL; 943 storeQueue[store_idx].canWB = 0; 944 945 // Must delete request now that it wasn't handed off to 946 // memory. This is quite ugly. @todo: Figure out the proper 947 // place to really handle request deletes. 948 delete storeQueue[store_idx].req; 949 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 950 delete storeQueue[store_idx].sreqLow; 951 delete storeQueue[store_idx].sreqHigh; 952 953 storeQueue[store_idx].sreqLow = NULL; 954 storeQueue[store_idx].sreqHigh = NULL; 955 } 956 957 storeQueue[store_idx].req = NULL; 958 --stores; 959 960 // Inefficient! 961 storeTail = store_idx; 962 963 decrStIdx(store_idx); 964 ++lsqSquashedStores; 965 } 966} 967 968template <class Impl> 969void 970LSQUnit<Impl>::storePostSend(PacketPtr pkt) 971{ 972 if (isStalled() && 973 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 974 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 975 "load idx:%i\n", 976 stallingStoreIsn, stallingLoadIdx); 977 stalled = false; 978 stallingStoreIsn = 0; 979 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 980 } 981 982 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 983 // The store is basically completed at this time. This 984 // only works so long as the checker doesn't try to 985 // verify the value in memory for stores. 986 storeQueue[storeWBIdx].inst->setCompleted(); 987#if USE_CHECKER 988 if (cpu->checker) { 989 cpu->checker->verify(storeQueue[storeWBIdx].inst); 990 } 991#endif 992 } 993 994 incrStIdx(storeWBIdx); 995} 996 997template <class Impl> 998void 999LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1000{ 1001 iewStage->wakeCPU(); 1002 1003 // Squashed instructions do not need to complete their access. 1004 if (inst->isSquashed()) { 1005 iewStage->decrWb(inst->seqNum); 1006 assert(!inst->isStore()); 1007 ++lsqIgnoredResponses; 1008 return; 1009 } 1010 1011 if (!inst->isExecuted()) { 1012 inst->setExecuted(); 1013 1014 // Complete access to copy data to proper place. 1015 inst->completeAcc(pkt); 1016 } 1017 1018 // Need to insert instruction into queue to commit 1019 iewStage->instToCommit(inst); 1020 1021 iewStage->activityThisCycle(); 1022 1023 // see if this load changed the PC 1024 iewStage->checkMisprediction(inst); 1025} 1026 1027template <class Impl> 1028void 1029LSQUnit<Impl>::completeStore(int store_idx) 1030{ 1031 assert(storeQueue[store_idx].inst); 1032 storeQueue[store_idx].completed = true; 1033 --storesToWB; 1034 // A bit conservative because a store completion may not free up entries, 1035 // but hopefully avoids two store completions in one cycle from making 1036 // the CPU tick twice. 1037 cpu->wakeCPU(); 1038 cpu->activityThisCycle(); 1039 1040 if (store_idx == storeHead) { 1041 do { 1042 incrStIdx(storeHead); 1043 1044 --stores; 1045 } while (storeQueue[storeHead].completed && 1046 storeHead != storeTail); 1047 1048 iewStage->updateLSQNextCycle = true; 1049 } 1050 1051 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1052 "idx:%i\n", 1053 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1054 1055 if (isStalled() && 1056 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1057 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1058 "load idx:%i\n", 1059 stallingStoreIsn, stallingLoadIdx); 1060 stalled = false; 1061 stallingStoreIsn = 0; 1062 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1063 } 1064 1065 storeQueue[store_idx].inst->setCompleted(); 1066 1067 // Tell the checker we've completed this instruction. Some stores 1068 // may get reported twice to the checker, but the checker can 1069 // handle that case. 1070#if USE_CHECKER 1071 if (cpu->checker) { 1072 cpu->checker->verify(storeQueue[store_idx].inst); 1073 } 1074#endif 1075} 1076 1077template <class Impl> 1078bool 1079LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1080{ 1081 if (!dcachePort->sendTiming(data_pkt)) { 1082 // Need to handle becoming blocked on a store. 1083 isStoreBlocked = true; 1084 ++lsqCacheBlocked; 1085 assert(retryPkt == NULL); 1086 retryPkt = data_pkt; 1087 lsq->setRetryTid(lsqID); 1088 return false; 1089 } 1090 return true; 1091} 1092 1093template <class Impl> 1094void 1095LSQUnit<Impl>::recvRetry() 1096{ 1097 if (isStoreBlocked) { 1098 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1099 assert(retryPkt != NULL); 1100 1101 if (dcachePort->sendTiming(retryPkt)) { 1102 LSQSenderState *state = 1103 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1104 1105 // Don't finish the store unless this is the last packet.
| 1/* 2 * Copyright (c) 2010 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2004-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Kevin Lim 41 * Korey Sewell 42 */ 43 44#include "arch/locked_mem.hh" 45#include "config/the_isa.hh" 46#include "config/use_checker.hh" 47#include "cpu/o3/lsq.hh" 48#include "cpu/o3/lsq_unit.hh" 49#include "base/str.hh" 50#include "mem/packet.hh" 51#include "mem/request.hh" 52 53#if USE_CHECKER 54#include "cpu/checker/cpu.hh" 55#endif 56 57template<class Impl> 58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 59 LSQUnit *lsq_ptr) 60 : inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 61{ 62 this->setFlags(Event::AutoDelete); 63} 64 65template<class Impl> 66void 67LSQUnit<Impl>::WritebackEvent::process() 68{ 69 if (!lsqPtr->isSwitchedOut()) { 70 lsqPtr->writeback(inst, pkt); 71 } 72 73 if (pkt->senderState) 74 delete pkt->senderState; 75 76 delete pkt->req; 77 delete pkt; 78} 79 80template<class Impl> 81const char * 82LSQUnit<Impl>::WritebackEvent::description() const 83{ 84 return "Store writeback"; 85} 86 87template<class Impl> 88void 89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 90{ 91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 92 DynInstPtr inst = state->inst; 93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 95 96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 97 98 assert(!pkt->wasNacked()); 99 100 // If this is a split access, wait until all packets are received. 101 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 102 delete pkt->req; 103 delete pkt; 104 return; 105 } 106 107 if (isSwitchedOut() || inst->isSquashed()) { 108 iewStage->decrWb(inst->seqNum); 109 } else { 110 if (!state->noWB) { 111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 112 !state->isLoad) { 113 writeback(inst, pkt); 114 } else { 115 writeback(inst, state->mainPkt); 116 } 117 } 118 119 if (inst->isStore()) { 120 completeStore(state->idx); 121 } 122 } 123 124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 125 delete state->mainPkt->req; 126 delete state->mainPkt; 127 } 128 delete state; 129 delete pkt->req; 130 delete pkt; 131} 132 133template <class Impl> 134LSQUnit<Impl>::LSQUnit() 135 : loads(0), stores(0), storesToWB(0), stalled(false), 136 isStoreBlocked(false), isLoadBlocked(false), 137 loadBlockedHandled(false), hasPendingPkt(false) 138{ 139} 140 141template<class Impl> 142void 143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 145 unsigned id) 146{ 147 cpu = cpu_ptr; 148 iewStage = iew_ptr; 149 150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 151 152 switchedOut = false; 153 154 lsq = lsq_ptr; 155 156 lsqID = id; 157 158 // Add 1 for the sentinel entry (they are circular queues). 159 LQEntries = maxLQEntries + 1; 160 SQEntries = maxSQEntries + 1; 161 162 loadQueue.resize(LQEntries); 163 storeQueue.resize(SQEntries); 164 165 loadHead = loadTail = 0; 166 167 storeHead = storeWBIdx = storeTail = 0; 168 169 usedPorts = 0; 170 cachePorts = params->cachePorts; 171 172 retryPkt = NULL; 173 memDepViolator = NULL; 174 175 blockedLoadSeqNum = 0; 176} 177 178template<class Impl> 179std::string 180LSQUnit<Impl>::name() const 181{ 182 if (Impl::MaxThreads == 1) { 183 return iewStage->name() + ".lsq"; 184 } else { 185 return iewStage->name() + ".lsq.thread." + to_string(lsqID); 186 } 187} 188 189template<class Impl> 190void 191LSQUnit<Impl>::regStats() 192{ 193 lsqForwLoads 194 .name(name() + ".forwLoads") 195 .desc("Number of loads that had data forwarded from stores"); 196 197 invAddrLoads 198 .name(name() + ".invAddrLoads") 199 .desc("Number of loads ignored due to an invalid address"); 200 201 lsqSquashedLoads 202 .name(name() + ".squashedLoads") 203 .desc("Number of loads squashed"); 204 205 lsqIgnoredResponses 206 .name(name() + ".ignoredResponses") 207 .desc("Number of memory responses ignored because the instruction is squashed"); 208 209 lsqMemOrderViolation 210 .name(name() + ".memOrderViolation") 211 .desc("Number of memory ordering violations"); 212 213 lsqSquashedStores 214 .name(name() + ".squashedStores") 215 .desc("Number of stores squashed"); 216 217 invAddrSwpfs 218 .name(name() + ".invAddrSwpfs") 219 .desc("Number of software prefetches ignored due to an invalid address"); 220 221 lsqBlockedLoads 222 .name(name() + ".blockedLoads") 223 .desc("Number of blocked loads due to partial load-store forwarding"); 224 225 lsqRescheduledLoads 226 .name(name() + ".rescheduledLoads") 227 .desc("Number of loads that were rescheduled"); 228 229 lsqCacheBlocked 230 .name(name() + ".cacheBlocked") 231 .desc("Number of times an access to memory failed due to the cache being blocked"); 232} 233 234template<class Impl> 235void 236LSQUnit<Impl>::setDcachePort(Port *dcache_port) 237{ 238 dcachePort = dcache_port; 239 240#if USE_CHECKER 241 if (cpu->checker) { 242 cpu->checker->setDcachePort(dcachePort); 243 } 244#endif 245} 246 247template<class Impl> 248void 249LSQUnit<Impl>::clearLQ() 250{ 251 loadQueue.clear(); 252} 253 254template<class Impl> 255void 256LSQUnit<Impl>::clearSQ() 257{ 258 storeQueue.clear(); 259} 260 261template<class Impl> 262void 263LSQUnit<Impl>::switchOut() 264{ 265 switchedOut = true; 266 for (int i = 0; i < loadQueue.size(); ++i) { 267 assert(!loadQueue[i]); 268 loadQueue[i] = NULL; 269 } 270 271 assert(storesToWB == 0); 272} 273 274template<class Impl> 275void 276LSQUnit<Impl>::takeOverFrom() 277{ 278 switchedOut = false; 279 loads = stores = storesToWB = 0; 280 281 loadHead = loadTail = 0; 282 283 storeHead = storeWBIdx = storeTail = 0; 284 285 usedPorts = 0; 286 287 memDepViolator = NULL; 288 289 blockedLoadSeqNum = 0; 290 291 stalled = false; 292 isLoadBlocked = false; 293 loadBlockedHandled = false; 294} 295 296template<class Impl> 297void 298LSQUnit<Impl>::resizeLQ(unsigned size) 299{ 300 unsigned size_plus_sentinel = size + 1; 301 assert(size_plus_sentinel >= LQEntries); 302 303 if (size_plus_sentinel > LQEntries) { 304 while (size_plus_sentinel > loadQueue.size()) { 305 DynInstPtr dummy; 306 loadQueue.push_back(dummy); 307 LQEntries++; 308 } 309 } else { 310 LQEntries = size_plus_sentinel; 311 } 312 313} 314 315template<class Impl> 316void 317LSQUnit<Impl>::resizeSQ(unsigned size) 318{ 319 unsigned size_plus_sentinel = size + 1; 320 if (size_plus_sentinel > SQEntries) { 321 while (size_plus_sentinel > storeQueue.size()) { 322 SQEntry dummy; 323 storeQueue.push_back(dummy); 324 SQEntries++; 325 } 326 } else { 327 SQEntries = size_plus_sentinel; 328 } 329} 330 331template <class Impl> 332void 333LSQUnit<Impl>::insert(DynInstPtr &inst) 334{ 335 assert(inst->isMemRef()); 336 337 assert(inst->isLoad() || inst->isStore()); 338 339 if (inst->isLoad()) { 340 insertLoad(inst); 341 } else { 342 insertStore(inst); 343 } 344 345 inst->setInLSQ(); 346} 347 348template <class Impl> 349void 350LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 351{ 352 assert((loadTail + 1) % LQEntries != loadHead); 353 assert(loads < LQEntries); 354 355 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 356 load_inst->pcState(), loadTail, load_inst->seqNum); 357 358 load_inst->lqIdx = loadTail; 359 360 if (stores == 0) { 361 load_inst->sqIdx = -1; 362 } else { 363 load_inst->sqIdx = storeTail; 364 } 365 366 loadQueue[loadTail] = load_inst; 367 368 incrLdIdx(loadTail); 369 370 ++loads; 371} 372 373template <class Impl> 374void 375LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 376{ 377 // Make sure it is not full before inserting an instruction. 378 assert((storeTail + 1) % SQEntries != storeHead); 379 assert(stores < SQEntries); 380 381 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 382 store_inst->pcState(), storeTail, store_inst->seqNum); 383 384 store_inst->sqIdx = storeTail; 385 store_inst->lqIdx = loadTail; 386 387 storeQueue[storeTail] = SQEntry(store_inst); 388 389 incrStIdx(storeTail); 390 391 ++stores; 392} 393 394template <class Impl> 395typename Impl::DynInstPtr 396LSQUnit<Impl>::getMemDepViolator() 397{ 398 DynInstPtr temp = memDepViolator; 399 400 memDepViolator = NULL; 401 402 return temp; 403} 404 405template <class Impl> 406unsigned 407LSQUnit<Impl>::numFreeEntries() 408{ 409 unsigned free_lq_entries = LQEntries - loads; 410 unsigned free_sq_entries = SQEntries - stores; 411 412 // Both the LQ and SQ entries have an extra dummy entry to differentiate 413 // empty/full conditions. Subtract 1 from the free entries. 414 if (free_lq_entries < free_sq_entries) { 415 return free_lq_entries - 1; 416 } else { 417 return free_sq_entries - 1; 418 } 419} 420 421template <class Impl> 422int 423LSQUnit<Impl>::numLoadsReady() 424{ 425 int load_idx = loadHead; 426 int retval = 0; 427 428 while (load_idx != loadTail) { 429 assert(loadQueue[load_idx]); 430 431 if (loadQueue[load_idx]->readyToIssue()) { 432 ++retval; 433 } 434 } 435 436 return retval; 437} 438 439template <class Impl> 440Fault 441LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 442{ 443 using namespace TheISA; 444 // Execute a specific load. 445 Fault load_fault = NoFault; 446 447 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 448 inst->pcState(), inst->seqNum); 449 450 assert(!inst->isSquashed()); 451 452 load_fault = inst->initiateAcc(); 453 454 if (inst->isTranslationDelayed() && 455 load_fault == NoFault) 456 return load_fault; 457 458 // If the instruction faulted or predicated false, then we need to send it 459 // along to commit without the instruction completing. 460 if (load_fault != NoFault || inst->readPredicate() == false) { 461 // Send this instruction to commit, also make sure iew stage 462 // realizes there is activity. 463 // Mark it as executed unless it is an uncached load that 464 // needs to hit the head of commit. 465 if (inst->readPredicate() == false) 466 inst->forwardOldRegs(); 467 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 468 inst->seqNum, 469 (load_fault != NoFault ? "fault" : "predication")); 470 if (!(inst->hasRequest() && inst->uncacheable()) || 471 inst->isAtCommit()) { 472 inst->setExecuted(); 473 } 474 iewStage->instToCommit(inst); 475 iewStage->activityThisCycle(); 476 } else if (!loadBlocked()) { 477 assert(inst->effAddrValid); 478 int load_idx = inst->lqIdx; 479 incrLdIdx(load_idx); 480 while (load_idx != loadTail) { 481 // Really only need to check loads that have actually executed 482 483 // @todo: For now this is extra conservative, detecting a 484 // violation if the addresses match assuming all accesses 485 // are quad word accesses. 486 487 // @todo: Fix this, magic number being used here 488 489 // @todo: Uncachable load is not executed until it reaches 490 // the head of the ROB. Once this if checks only the executed 491 // loads(as noted above), this check can be removed 492 if (loadQueue[load_idx]->effAddrValid && 493 ((loadQueue[load_idx]->effAddr >> 8) 494 == (inst->effAddr >> 8)) && 495 !loadQueue[load_idx]->uncacheable()) { 496 // A load incorrectly passed this load. Squash and refetch. 497 // For now return a fault to show that it was unsuccessful. 498 DynInstPtr violator = loadQueue[load_idx]; 499 if (!memDepViolator || 500 (violator->seqNum < memDepViolator->seqNum)) { 501 memDepViolator = violator; 502 } else { 503 break; 504 } 505 506 ++lsqMemOrderViolation; 507 508 return genMachineCheckFault(); 509 } 510 511 incrLdIdx(load_idx); 512 } 513 } 514 515 return load_fault; 516} 517 518template <class Impl> 519Fault 520LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 521{ 522 using namespace TheISA; 523 // Make sure that a store exists. 524 assert(stores != 0); 525 526 int store_idx = store_inst->sqIdx; 527 528 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 529 store_inst->pcState(), store_inst->seqNum); 530 531 assert(!store_inst->isSquashed()); 532 533 // Check the recently completed loads to see if any match this store's 534 // address. If so, then we have a memory ordering violation. 535 int load_idx = store_inst->lqIdx; 536 537 Fault store_fault = store_inst->initiateAcc(); 538 539 if (store_inst->isTranslationDelayed() && 540 store_fault == NoFault) 541 return store_fault; 542 543 if (store_inst->readPredicate() == false) 544 store_inst->forwardOldRegs(); 545 546 if (storeQueue[store_idx].size == 0) { 547 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 548 store_inst->pcState(), store_inst->seqNum); 549 550 return store_fault; 551 } else if (store_inst->readPredicate() == false) { 552 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 553 store_inst->seqNum); 554 return store_fault; 555 } 556 557 assert(store_fault == NoFault); 558 559 if (store_inst->isStoreConditional()) { 560 // Store conditionals need to set themselves as able to 561 // writeback if we haven't had a fault by here. 562 storeQueue[store_idx].canWB = true; 563 564 ++storesToWB; 565 } 566 567 assert(store_inst->effAddrValid); 568 while (load_idx != loadTail) { 569 // Really only need to check loads that have actually executed 570 // It's safe to check all loads because effAddr is set to 571 // InvalAddr when the dyn inst is created. 572 573 // @todo: For now this is extra conservative, detecting a 574 // violation if the addresses match assuming all accesses 575 // are quad word accesses. 576 577 // @todo: Fix this, magic number being used here 578 579 // @todo: Uncachable load is not executed until it reaches 580 // the head of the ROB. Once this if checks only the executed 581 // loads(as noted above), this check can be removed 582 if (loadQueue[load_idx]->effAddrValid && 583 ((loadQueue[load_idx]->effAddr >> 8) 584 == (store_inst->effAddr >> 8)) && 585 !loadQueue[load_idx]->uncacheable()) { 586 // A load incorrectly passed this store. Squash and refetch. 587 // For now return a fault to show that it was unsuccessful. 588 DynInstPtr violator = loadQueue[load_idx]; 589 if (!memDepViolator || 590 (violator->seqNum < memDepViolator->seqNum)) { 591 memDepViolator = violator; 592 } else { 593 break; 594 } 595 596 ++lsqMemOrderViolation; 597 598 return genMachineCheckFault(); 599 } 600 601 incrLdIdx(load_idx); 602 } 603 604 return store_fault; 605} 606 607template <class Impl> 608void 609LSQUnit<Impl>::commitLoad() 610{ 611 assert(loadQueue[loadHead]); 612 613 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 614 loadQueue[loadHead]->pcState()); 615 616 loadQueue[loadHead] = NULL; 617 618 incrLdIdx(loadHead); 619 620 --loads; 621} 622 623template <class Impl> 624void 625LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 626{ 627 assert(loads == 0 || loadQueue[loadHead]); 628 629 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 630 commitLoad(); 631 } 632} 633 634template <class Impl> 635void 636LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 637{ 638 assert(stores == 0 || storeQueue[storeHead].inst); 639 640 int store_idx = storeHead; 641 642 while (store_idx != storeTail) { 643 assert(storeQueue[store_idx].inst); 644 // Mark any stores that are now committed and have not yet 645 // been marked as able to write back. 646 if (!storeQueue[store_idx].canWB) { 647 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 648 break; 649 } 650 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 651 "%s [sn:%lli]\n", 652 storeQueue[store_idx].inst->pcState(), 653 storeQueue[store_idx].inst->seqNum); 654 655 storeQueue[store_idx].canWB = true; 656 657 ++storesToWB; 658 } 659 660 incrStIdx(store_idx); 661 } 662} 663 664template <class Impl> 665void 666LSQUnit<Impl>::writebackPendingStore() 667{ 668 if (hasPendingPkt) { 669 assert(pendingPkt != NULL); 670 671 // If the cache is blocked, this will store the packet for retry. 672 if (sendStore(pendingPkt)) { 673 storePostSend(pendingPkt); 674 } 675 pendingPkt = NULL; 676 hasPendingPkt = false; 677 } 678} 679 680template <class Impl> 681void 682LSQUnit<Impl>::writebackStores() 683{ 684 // First writeback the second packet from any split store that didn't 685 // complete last cycle because there weren't enough cache ports available. 686 if (TheISA::HasUnalignedMemAcc) { 687 writebackPendingStore(); 688 } 689 690 while (storesToWB > 0 && 691 storeWBIdx != storeTail && 692 storeQueue[storeWBIdx].inst && 693 storeQueue[storeWBIdx].canWB && 694 usedPorts < cachePorts) { 695 696 if (isStoreBlocked || lsq->cacheBlocked()) { 697 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 698 " is blocked!\n"); 699 break; 700 } 701 702 // Store didn't write any data so no need to write it back to 703 // memory. 704 if (storeQueue[storeWBIdx].size == 0) { 705 completeStore(storeWBIdx); 706 707 incrStIdx(storeWBIdx); 708 709 continue; 710 } 711 712 ++usedPorts; 713 714 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 715 incrStIdx(storeWBIdx); 716 717 continue; 718 } 719 720 assert(storeQueue[storeWBIdx].req); 721 assert(!storeQueue[storeWBIdx].committed); 722 723 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 724 assert(storeQueue[storeWBIdx].sreqLow); 725 assert(storeQueue[storeWBIdx].sreqHigh); 726 } 727 728 DynInstPtr inst = storeQueue[storeWBIdx].inst; 729 730 Request *req = storeQueue[storeWBIdx].req; 731 storeQueue[storeWBIdx].committed = true; 732 733 assert(!inst->memData); 734 inst->memData = new uint8_t[64]; 735 736 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 737 738 MemCmd command = 739 req->isSwap() ? MemCmd::SwapReq : 740 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 741 PacketPtr data_pkt; 742 PacketPtr snd_data_pkt = NULL; 743 744 LSQSenderState *state = new LSQSenderState; 745 state->isLoad = false; 746 state->idx = storeWBIdx; 747 state->inst = inst; 748 749 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 750 751 // Build a single data packet if the store isn't split. 752 data_pkt = new Packet(req, command, Packet::Broadcast); 753 data_pkt->dataStatic(inst->memData); 754 data_pkt->senderState = state; 755 } else { 756 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 757 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 758 759 // Create two packets if the store is split in two. 760 data_pkt = new Packet(sreqLow, command, Packet::Broadcast); 761 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast); 762 763 data_pkt->dataStatic(inst->memData); 764 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 765 766 data_pkt->senderState = state; 767 snd_data_pkt->senderState = state; 768 769 state->isSplit = true; 770 state->outstanding = 2; 771 772 // Can delete the main request now. 773 delete req; 774 req = sreqLow; 775 } 776 777 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 778 "to Addr:%#x, data:%#x [sn:%lli]\n", 779 storeWBIdx, inst->pcState(), 780 req->getPaddr(), (int)*(inst->memData), 781 inst->seqNum); 782 783 // @todo: Remove this SC hack once the memory system handles it. 784 if (inst->isStoreConditional()) { 785 assert(!storeQueue[storeWBIdx].isSplit); 786 // Disable recording the result temporarily. Writing to 787 // misc regs normally updates the result, but this is not 788 // the desired behavior when handling store conditionals. 789 inst->recordResult = false; 790 bool success = TheISA::handleLockedWrite(inst.get(), req); 791 inst->recordResult = true; 792 793 if (!success) { 794 // Instantly complete this store. 795 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 796 "Instantly completing it.\n", 797 inst->seqNum); 798 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 799 cpu->schedule(wb, curTick() + 1); 800 completeStore(storeWBIdx); 801 incrStIdx(storeWBIdx); 802 continue; 803 } 804 } else { 805 // Non-store conditionals do not need a writeback. 806 state->noWB = true; 807 } 808 809 if (!sendStore(data_pkt)) { 810 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 811 "retry later\n", 812 inst->seqNum); 813 814 // Need to store the second packet, if split. 815 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 816 state->pktToSend = true; 817 state->pendingPacket = snd_data_pkt; 818 } 819 } else { 820 821 // If split, try to send the second packet too 822 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 823 assert(snd_data_pkt); 824 825 // Ensure there are enough ports to use. 826 if (usedPorts < cachePorts) { 827 ++usedPorts; 828 if (sendStore(snd_data_pkt)) { 829 storePostSend(snd_data_pkt); 830 } else { 831 DPRINTF(IEW, "D-Cache became blocked when writing" 832 " [sn:%lli] second packet, will retry later\n", 833 inst->seqNum); 834 } 835 } else { 836 837 // Store the packet for when there's free ports. 838 assert(pendingPkt == NULL); 839 pendingPkt = snd_data_pkt; 840 hasPendingPkt = true; 841 } 842 } else { 843 844 // Not a split store. 845 storePostSend(data_pkt); 846 } 847 } 848 } 849 850 // Not sure this should set it to 0. 851 usedPorts = 0; 852 853 assert(stores >= 0 && storesToWB >= 0); 854} 855 856/*template <class Impl> 857void 858LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 859{ 860 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 861 mshrSeqNums.end(), 862 seqNum); 863 864 if (mshr_it != mshrSeqNums.end()) { 865 mshrSeqNums.erase(mshr_it); 866 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 867 } 868}*/ 869 870template <class Impl> 871void 872LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 873{ 874 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 875 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 876 877 int load_idx = loadTail; 878 decrLdIdx(load_idx); 879 880 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 881 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 882 "[sn:%lli]\n", 883 loadQueue[load_idx]->pcState(), 884 loadQueue[load_idx]->seqNum); 885 886 if (isStalled() && load_idx == stallingLoadIdx) { 887 stalled = false; 888 stallingStoreIsn = 0; 889 stallingLoadIdx = 0; 890 } 891 892 // Clear the smart pointer to make sure it is decremented. 893 loadQueue[load_idx]->setSquashed(); 894 loadQueue[load_idx] = NULL; 895 --loads; 896 897 // Inefficient! 898 loadTail = load_idx; 899 900 decrLdIdx(load_idx); 901 ++lsqSquashedLoads; 902 } 903 904 if (isLoadBlocked) { 905 if (squashed_num < blockedLoadSeqNum) { 906 isLoadBlocked = false; 907 loadBlockedHandled = false; 908 blockedLoadSeqNum = 0; 909 } 910 } 911 912 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 913 memDepViolator = NULL; 914 } 915 916 int store_idx = storeTail; 917 decrStIdx(store_idx); 918 919 while (stores != 0 && 920 storeQueue[store_idx].inst->seqNum > squashed_num) { 921 // Instructions marked as can WB are already committed. 922 if (storeQueue[store_idx].canWB) { 923 break; 924 } 925 926 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 927 "idx:%i [sn:%lli]\n", 928 storeQueue[store_idx].inst->pcState(), 929 store_idx, storeQueue[store_idx].inst->seqNum); 930 931 // I don't think this can happen. It should have been cleared 932 // by the stalling load. 933 if (isStalled() && 934 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 935 panic("Is stalled should have been cleared by stalling load!\n"); 936 stalled = false; 937 stallingStoreIsn = 0; 938 } 939 940 // Clear the smart pointer to make sure it is decremented. 941 storeQueue[store_idx].inst->setSquashed(); 942 storeQueue[store_idx].inst = NULL; 943 storeQueue[store_idx].canWB = 0; 944 945 // Must delete request now that it wasn't handed off to 946 // memory. This is quite ugly. @todo: Figure out the proper 947 // place to really handle request deletes. 948 delete storeQueue[store_idx].req; 949 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 950 delete storeQueue[store_idx].sreqLow; 951 delete storeQueue[store_idx].sreqHigh; 952 953 storeQueue[store_idx].sreqLow = NULL; 954 storeQueue[store_idx].sreqHigh = NULL; 955 } 956 957 storeQueue[store_idx].req = NULL; 958 --stores; 959 960 // Inefficient! 961 storeTail = store_idx; 962 963 decrStIdx(store_idx); 964 ++lsqSquashedStores; 965 } 966} 967 968template <class Impl> 969void 970LSQUnit<Impl>::storePostSend(PacketPtr pkt) 971{ 972 if (isStalled() && 973 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 974 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 975 "load idx:%i\n", 976 stallingStoreIsn, stallingLoadIdx); 977 stalled = false; 978 stallingStoreIsn = 0; 979 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 980 } 981 982 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 983 // The store is basically completed at this time. This 984 // only works so long as the checker doesn't try to 985 // verify the value in memory for stores. 986 storeQueue[storeWBIdx].inst->setCompleted(); 987#if USE_CHECKER 988 if (cpu->checker) { 989 cpu->checker->verify(storeQueue[storeWBIdx].inst); 990 } 991#endif 992 } 993 994 incrStIdx(storeWBIdx); 995} 996 997template <class Impl> 998void 999LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1000{ 1001 iewStage->wakeCPU(); 1002 1003 // Squashed instructions do not need to complete their access. 1004 if (inst->isSquashed()) { 1005 iewStage->decrWb(inst->seqNum); 1006 assert(!inst->isStore()); 1007 ++lsqIgnoredResponses; 1008 return; 1009 } 1010 1011 if (!inst->isExecuted()) { 1012 inst->setExecuted(); 1013 1014 // Complete access to copy data to proper place. 1015 inst->completeAcc(pkt); 1016 } 1017 1018 // Need to insert instruction into queue to commit 1019 iewStage->instToCommit(inst); 1020 1021 iewStage->activityThisCycle(); 1022 1023 // see if this load changed the PC 1024 iewStage->checkMisprediction(inst); 1025} 1026 1027template <class Impl> 1028void 1029LSQUnit<Impl>::completeStore(int store_idx) 1030{ 1031 assert(storeQueue[store_idx].inst); 1032 storeQueue[store_idx].completed = true; 1033 --storesToWB; 1034 // A bit conservative because a store completion may not free up entries, 1035 // but hopefully avoids two store completions in one cycle from making 1036 // the CPU tick twice. 1037 cpu->wakeCPU(); 1038 cpu->activityThisCycle(); 1039 1040 if (store_idx == storeHead) { 1041 do { 1042 incrStIdx(storeHead); 1043 1044 --stores; 1045 } while (storeQueue[storeHead].completed && 1046 storeHead != storeTail); 1047 1048 iewStage->updateLSQNextCycle = true; 1049 } 1050 1051 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1052 "idx:%i\n", 1053 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1054 1055 if (isStalled() && 1056 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1057 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1058 "load idx:%i\n", 1059 stallingStoreIsn, stallingLoadIdx); 1060 stalled = false; 1061 stallingStoreIsn = 0; 1062 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1063 } 1064 1065 storeQueue[store_idx].inst->setCompleted(); 1066 1067 // Tell the checker we've completed this instruction. Some stores 1068 // may get reported twice to the checker, but the checker can 1069 // handle that case. 1070#if USE_CHECKER 1071 if (cpu->checker) { 1072 cpu->checker->verify(storeQueue[store_idx].inst); 1073 } 1074#endif 1075} 1076 1077template <class Impl> 1078bool 1079LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1080{ 1081 if (!dcachePort->sendTiming(data_pkt)) { 1082 // Need to handle becoming blocked on a store. 1083 isStoreBlocked = true; 1084 ++lsqCacheBlocked; 1085 assert(retryPkt == NULL); 1086 retryPkt = data_pkt; 1087 lsq->setRetryTid(lsqID); 1088 return false; 1089 } 1090 return true; 1091} 1092 1093template <class Impl> 1094void 1095LSQUnit<Impl>::recvRetry() 1096{ 1097 if (isStoreBlocked) { 1098 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1099 assert(retryPkt != NULL); 1100 1101 if (dcachePort->sendTiming(retryPkt)) { 1102 LSQSenderState *state = 1103 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1104 1105 // Don't finish the store unless this is the last packet.
|