1 2/* 3 * Copyright (c) 2010-2013 ARM Limited 4 * Copyright (c) 2013 Advanced Micro Devices, Inc. 5 * All rights reserved 6 * 7 * The license below extends only to copyright in the software and shall 8 * not be construed as granting a license to any other intellectual 9 * property including but not limited to intellectual property relating 10 * to a hardware implementation of the functionality of the software 11 * licensed hereunder. You may use the software subject to the license 12 * terms below provided that you ensure that this notice is replicated 13 * unmodified and in its entirety in all distributions of the software, 14 * modified or unmodified, in source code or in binary form. 15 * 16 * Copyright (c) 2004-2005 The Regents of The University of Michigan 17 * All rights reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions are 21 * met: redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer; 23 * redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution; 26 * neither the name of the copyright holders nor the names of its 27 * contributors may be used to endorse or promote products derived from 28 * this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Authors: Kevin Lim 43 * Korey Sewell 44 */ 45 46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 47#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 48 49#include "arch/generic/debugfaults.hh" 50#include "arch/locked_mem.hh" 51#include "base/str.hh" 52#include "config/the_isa.hh" 53#include "cpu/checker/cpu.hh" 54#include "cpu/o3/lsq.hh" 55#include "cpu/o3/lsq_unit.hh" 56#include "debug/Activity.hh" 57#include "debug/IEW.hh" 58#include "debug/LSQUnit.hh" 59#include "debug/O3PipeView.hh" 60#include "mem/packet.hh" 61#include "mem/request.hh" 62 63template<class Impl> 64LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 65 LSQUnit *lsq_ptr) 66 : Event(Default_Pri, AutoDelete), 67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 68{ 69} 70 71template<class Impl> 72void 73LSQUnit<Impl>::WritebackEvent::process() 74{ 75 assert(!lsqPtr->cpu->switchedOut()); 76 77 lsqPtr->writeback(inst, pkt); 78 79 if (pkt->senderState) 80 delete pkt->senderState; 81 82 delete pkt->req; 83 delete pkt; 84} 85 86template<class Impl> 87const char * 88LSQUnit<Impl>::WritebackEvent::description() const 89{ 90 return "Store writeback"; 91} 92 93template<class Impl> 94void 95LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 96{ 97 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 98 DynInstPtr inst = state->inst; 99 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 100 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 101 102 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 103 104 // If this is a split access, wait until all packets are received. 105 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 106 delete pkt->req; 107 delete pkt; 108 return; 109 } 110 111 assert(!cpu->switchedOut());
|
113 if (!state->noWB) { 114 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 115 !state->isLoad) { 116 writeback(inst, pkt); 117 } else { 118 writeback(inst, state->mainPkt); 119 } 120 } 121 122 if (inst->isStore()) { 123 completeStore(state->idx); 124 } 125 } 126 127 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 128 delete state->mainPkt->req; 129 delete state->mainPkt; 130 } 131 132 pkt->req->setAccessLatency(); 133 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 134 135 delete state; 136 delete pkt->req; 137 delete pkt; 138} 139 140template <class Impl> 141LSQUnit<Impl>::LSQUnit() 142 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 143 isStoreBlocked(false), isLoadBlocked(false), 144 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 145{ 146} 147 148template<class Impl> 149void 150LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 151 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 152 unsigned id) 153{ 154 cpu = cpu_ptr; 155 iewStage = iew_ptr; 156 157 lsq = lsq_ptr; 158 159 lsqID = id; 160 161 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 162 163 // Add 1 for the sentinel entry (they are circular queues). 164 LQEntries = maxLQEntries + 1; 165 SQEntries = maxSQEntries + 1; 166 167 //Due to uint8_t index in LSQSenderState 168 assert(LQEntries <= 256); 169 assert(SQEntries <= 256); 170 171 loadQueue.resize(LQEntries); 172 storeQueue.resize(SQEntries); 173 174 depCheckShift = params->LSQDepCheckShift; 175 checkLoads = params->LSQCheckLoads; 176 cachePorts = params->cachePorts; 177 needsTSO = params->needsTSO; 178 179 resetState(); 180} 181 182 183template<class Impl> 184void 185LSQUnit<Impl>::resetState() 186{ 187 loads = stores = storesToWB = 0; 188 189 loadHead = loadTail = 0; 190 191 storeHead = storeWBIdx = storeTail = 0; 192 193 usedPorts = 0; 194 195 retryPkt = NULL; 196 memDepViolator = NULL; 197 198 blockedLoadSeqNum = 0; 199 200 stalled = false; 201 isLoadBlocked = false; 202 loadBlockedHandled = false; 203 204 cacheBlockMask = ~(cpu->cacheLineSize() - 1); 205} 206 207template<class Impl> 208std::string 209LSQUnit<Impl>::name() const 210{ 211 if (Impl::MaxThreads == 1) { 212 return iewStage->name() + ".lsq"; 213 } else { 214 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 215 } 216} 217 218template<class Impl> 219void 220LSQUnit<Impl>::regStats() 221{ 222 lsqForwLoads 223 .name(name() + ".forwLoads") 224 .desc("Number of loads that had data forwarded from stores"); 225 226 invAddrLoads 227 .name(name() + ".invAddrLoads") 228 .desc("Number of loads ignored due to an invalid address"); 229 230 lsqSquashedLoads 231 .name(name() + ".squashedLoads") 232 .desc("Number of loads squashed"); 233 234 lsqIgnoredResponses 235 .name(name() + ".ignoredResponses") 236 .desc("Number of memory responses ignored because the instruction is squashed"); 237 238 lsqMemOrderViolation 239 .name(name() + ".memOrderViolation") 240 .desc("Number of memory ordering violations"); 241 242 lsqSquashedStores 243 .name(name() + ".squashedStores") 244 .desc("Number of stores squashed"); 245 246 invAddrSwpfs 247 .name(name() + ".invAddrSwpfs") 248 .desc("Number of software prefetches ignored due to an invalid address"); 249 250 lsqBlockedLoads 251 .name(name() + ".blockedLoads") 252 .desc("Number of blocked loads due to partial load-store forwarding"); 253 254 lsqRescheduledLoads 255 .name(name() + ".rescheduledLoads") 256 .desc("Number of loads that were rescheduled"); 257 258 lsqCacheBlocked 259 .name(name() + ".cacheBlocked") 260 .desc("Number of times an access to memory failed due to the cache being blocked"); 261} 262 263template<class Impl> 264void 265LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 266{ 267 dcachePort = dcache_port; 268} 269 270template<class Impl> 271void 272LSQUnit<Impl>::clearLQ() 273{ 274 loadQueue.clear(); 275} 276 277template<class Impl> 278void 279LSQUnit<Impl>::clearSQ() 280{ 281 storeQueue.clear(); 282} 283 284template<class Impl> 285void 286LSQUnit<Impl>::drainSanityCheck() const 287{ 288 for (int i = 0; i < loadQueue.size(); ++i) 289 assert(!loadQueue[i]); 290 291 assert(storesToWB == 0); 292 assert(!retryPkt); 293} 294 295template<class Impl> 296void 297LSQUnit<Impl>::takeOverFrom() 298{ 299 resetState(); 300} 301 302template<class Impl> 303void 304LSQUnit<Impl>::resizeLQ(unsigned size) 305{ 306 unsigned size_plus_sentinel = size + 1; 307 assert(size_plus_sentinel >= LQEntries); 308 309 if (size_plus_sentinel > LQEntries) { 310 while (size_plus_sentinel > loadQueue.size()) { 311 DynInstPtr dummy; 312 loadQueue.push_back(dummy); 313 LQEntries++; 314 } 315 } else { 316 LQEntries = size_plus_sentinel; 317 } 318 319 assert(LQEntries <= 256); 320} 321 322template<class Impl> 323void 324LSQUnit<Impl>::resizeSQ(unsigned size) 325{ 326 unsigned size_plus_sentinel = size + 1; 327 if (size_plus_sentinel > SQEntries) { 328 while (size_plus_sentinel > storeQueue.size()) { 329 SQEntry dummy; 330 storeQueue.push_back(dummy); 331 SQEntries++; 332 } 333 } else { 334 SQEntries = size_plus_sentinel; 335 } 336 337 assert(SQEntries <= 256); 338} 339 340template <class Impl> 341void 342LSQUnit<Impl>::insert(DynInstPtr &inst) 343{ 344 assert(inst->isMemRef()); 345 346 assert(inst->isLoad() || inst->isStore()); 347 348 if (inst->isLoad()) { 349 insertLoad(inst); 350 } else { 351 insertStore(inst); 352 } 353 354 inst->setInLSQ(); 355} 356 357template <class Impl> 358void 359LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 360{ 361 assert((loadTail + 1) % LQEntries != loadHead); 362 assert(loads < LQEntries); 363 364 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 365 load_inst->pcState(), loadTail, load_inst->seqNum); 366 367 load_inst->lqIdx = loadTail; 368 369 if (stores == 0) { 370 load_inst->sqIdx = -1; 371 } else { 372 load_inst->sqIdx = storeTail; 373 } 374 375 loadQueue[loadTail] = load_inst; 376 377 incrLdIdx(loadTail); 378 379 ++loads; 380} 381 382template <class Impl> 383void 384LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 385{ 386 // Make sure it is not full before inserting an instruction. 387 assert((storeTail + 1) % SQEntries != storeHead); 388 assert(stores < SQEntries); 389 390 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 391 store_inst->pcState(), storeTail, store_inst->seqNum); 392 393 store_inst->sqIdx = storeTail; 394 store_inst->lqIdx = loadTail; 395 396 storeQueue[storeTail] = SQEntry(store_inst); 397 398 incrStIdx(storeTail); 399 400 ++stores; 401} 402 403template <class Impl> 404typename Impl::DynInstPtr 405LSQUnit<Impl>::getMemDepViolator() 406{ 407 DynInstPtr temp = memDepViolator; 408 409 memDepViolator = NULL; 410 411 return temp; 412} 413 414template <class Impl> 415unsigned 416LSQUnit<Impl>::numFreeLoadEntries() 417{ 418 //LQ has an extra dummy entry to differentiate 419 //empty/full conditions. Subtract 1 from the free entries. 420 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads); 421 return LQEntries - loads - 1; 422} 423 424template <class Impl> 425unsigned 426LSQUnit<Impl>::numFreeStoreEntries() 427{ 428 //SQ has an extra dummy entry to differentiate 429 //empty/full conditions. Subtract 1 from the free entries. 430 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores); 431 return SQEntries - stores - 1; 432 433 } 434 435template <class Impl> 436void 437LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 438{ 439 int load_idx = loadHead; 440 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 441 442 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 443 // address. The CPU can speculatively execute a LL operation after a pending 444 // SC operation in the pipeline and that can make the cache monitor the CPU 445 // is connected to valid while it really shouldn't be. 446 for (int x = 0; x < cpu->numContexts(); x++) { 447 ThreadContext *tc = cpu->getContext(x); 448 bool no_squash = cpu->thread[x]->noSquashFromTC; 449 cpu->thread[x]->noSquashFromTC = true; 450 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 451 cpu->thread[x]->noSquashFromTC = no_squash; 452 } 453 454 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 455 456 DynInstPtr ld_inst = loadQueue[load_idx]; 457 if (ld_inst) { 458 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 459 // Check that this snoop didn't just invalidate our lock flag 460 if (ld_inst->effAddrValid() && load_addr == invalidate_addr && 461 ld_inst->memReqFlags & Request::LLSC) 462 TheISA::handleLockedSnoopHit(ld_inst.get()); 463 } 464 465 // If this is the only load in the LSQ we don't care 466 if (load_idx == loadTail) 467 return; 468 469 incrLdIdx(load_idx); 470 471 bool force_squash = false; 472 473 while (load_idx != loadTail) { 474 DynInstPtr ld_inst = loadQueue[load_idx]; 475 476 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 477 incrLdIdx(load_idx); 478 continue; 479 } 480 481 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 482 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 483 ld_inst->seqNum, load_addr, invalidate_addr); 484 485 if (load_addr == invalidate_addr || force_squash) { 486 if (needsTSO) { 487 // If we have a TSO system, as all loads must be ordered with 488 // all other loads, this load as well as *all* subsequent loads 489 // need to be squashed to prevent possible load reordering. 490 force_squash = true; 491 } 492 if (ld_inst->possibleLoadViolation() || force_squash) { 493 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 494 pkt->getAddr(), ld_inst->seqNum); 495 496 // Mark the load for re-execution 497 ld_inst->fault = new ReExec; 498 } else { 499 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 500 pkt->getAddr(), ld_inst->seqNum); 501 502 // Make sure that we don't lose a snoop hitting a LOCKED 503 // address since the LOCK* flags don't get updated until 504 // commit. 505 if (ld_inst->memReqFlags & Request::LLSC) 506 TheISA::handleLockedSnoopHit(ld_inst.get()); 507 508 // If a older load checks this and it's true 509 // then we might have missed the snoop 510 // in which case we need to invalidate to be sure 511 ld_inst->hitExternalSnoop(true); 512 } 513 } 514 incrLdIdx(load_idx); 515 } 516 return; 517} 518 519template <class Impl> 520Fault 521LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 522{ 523 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 524 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 525 526 /** @todo in theory you only need to check an instruction that has executed 527 * however, there isn't a good way in the pipeline at the moment to check 528 * all instructions that will execute before the store writes back. Thus, 529 * like the implementation that came before it, we're overly conservative. 530 */ 531 while (load_idx != loadTail) { 532 DynInstPtr ld_inst = loadQueue[load_idx]; 533 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 534 incrLdIdx(load_idx); 535 continue; 536 } 537 538 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 539 Addr ld_eff_addr2 = 540 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 541 542 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 543 if (inst->isLoad()) { 544 // If this load is to the same block as an external snoop 545 // invalidate that we've observed then the load needs to be 546 // squashed as it could have newer data 547 if (ld_inst->hitExternalSnoop()) { 548 if (!memDepViolator || 549 ld_inst->seqNum < memDepViolator->seqNum) { 550 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 551 "and [sn:%lli] at address %#x\n", 552 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 553 memDepViolator = ld_inst; 554 555 ++lsqMemOrderViolation; 556 557 return new GenericISA::M5PanicFault( 558 "Detected fault with inst [sn:%lli] and " 559 "[sn:%lli] at address %#x\n", 560 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 561 } 562 } 563 564 // Otherwise, mark the load has a possible load violation 565 // and if we see a snoop before it's commited, we need to squash 566 ld_inst->possibleLoadViolation(true); 567 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 568 " between instructions [sn:%lli] and [sn:%lli]\n", 569 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 570 } else { 571 // A load/store incorrectly passed this store. 572 // Check if we already have a violator, or if it's newer 573 // squash and refetch. 574 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 575 break; 576 577 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 578 "[sn:%lli] at address %#x\n", 579 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 580 memDepViolator = ld_inst; 581 582 ++lsqMemOrderViolation; 583 584 return new GenericISA::M5PanicFault("Detected fault with " 585 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 586 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 587 } 588 } 589 590 incrLdIdx(load_idx); 591 } 592 return NoFault; 593} 594 595 596 597 598template <class Impl> 599Fault 600LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 601{ 602 using namespace TheISA; 603 // Execute a specific load. 604 Fault load_fault = NoFault; 605 606 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 607 inst->pcState(), inst->seqNum); 608 609 assert(!inst->isSquashed()); 610 611 load_fault = inst->initiateAcc(); 612 613 if (inst->isTranslationDelayed() && 614 load_fault == NoFault) 615 return load_fault; 616 617 // If the instruction faulted or predicated false, then we need to send it 618 // along to commit without the instruction completing. 619 if (load_fault != NoFault || !inst->readPredicate()) { 620 // Send this instruction to commit, also make sure iew stage 621 // realizes there is activity. 622 // Mark it as executed unless it is an uncached load that 623 // needs to hit the head of commit. 624 if (!inst->readPredicate()) 625 inst->forwardOldRegs(); 626 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 627 inst->seqNum, 628 (load_fault != NoFault ? "fault" : "predication")); 629 if (!(inst->hasRequest() && inst->uncacheable()) || 630 inst->isAtCommit()) { 631 inst->setExecuted(); 632 } 633 iewStage->instToCommit(inst); 634 iewStage->activityThisCycle(); 635 } else if (!loadBlocked()) { 636 assert(inst->effAddrValid()); 637 int load_idx = inst->lqIdx; 638 incrLdIdx(load_idx); 639 640 if (checkLoads) 641 return checkViolations(load_idx, inst); 642 } 643 644 return load_fault; 645} 646 647template <class Impl> 648Fault 649LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 650{ 651 using namespace TheISA; 652 // Make sure that a store exists. 653 assert(stores != 0); 654 655 int store_idx = store_inst->sqIdx; 656 657 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 658 store_inst->pcState(), store_inst->seqNum); 659 660 assert(!store_inst->isSquashed()); 661 662 // Check the recently completed loads to see if any match this store's 663 // address. If so, then we have a memory ordering violation. 664 int load_idx = store_inst->lqIdx; 665 666 Fault store_fault = store_inst->initiateAcc(); 667 668 if (store_inst->isTranslationDelayed() && 669 store_fault == NoFault) 670 return store_fault; 671 672 if (!store_inst->readPredicate()) 673 store_inst->forwardOldRegs(); 674 675 if (storeQueue[store_idx].size == 0) { 676 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 677 store_inst->pcState(), store_inst->seqNum); 678 679 return store_fault; 680 } else if (!store_inst->readPredicate()) { 681 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 682 store_inst->seqNum); 683 return store_fault; 684 } 685 686 assert(store_fault == NoFault); 687 688 if (store_inst->isStoreConditional()) { 689 // Store conditionals need to set themselves as able to 690 // writeback if we haven't had a fault by here. 691 storeQueue[store_idx].canWB = true; 692 693 ++storesToWB; 694 } 695 696 return checkViolations(load_idx, store_inst); 697 698} 699 700template <class Impl> 701void 702LSQUnit<Impl>::commitLoad() 703{ 704 assert(loadQueue[loadHead]); 705 706 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 707 loadQueue[loadHead]->pcState()); 708 709 loadQueue[loadHead] = NULL; 710 711 incrLdIdx(loadHead); 712 713 --loads; 714} 715 716template <class Impl> 717void 718LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 719{ 720 assert(loads == 0 || loadQueue[loadHead]); 721 722 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 723 commitLoad(); 724 } 725} 726 727template <class Impl> 728void 729LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 730{ 731 assert(stores == 0 || storeQueue[storeHead].inst); 732 733 int store_idx = storeHead; 734 735 while (store_idx != storeTail) { 736 assert(storeQueue[store_idx].inst); 737 // Mark any stores that are now committed and have not yet 738 // been marked as able to write back. 739 if (!storeQueue[store_idx].canWB) { 740 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 741 break; 742 } 743 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 744 "%s [sn:%lli]\n", 745 storeQueue[store_idx].inst->pcState(), 746 storeQueue[store_idx].inst->seqNum); 747 748 storeQueue[store_idx].canWB = true; 749 750 ++storesToWB; 751 } 752 753 incrStIdx(store_idx); 754 } 755} 756 757template <class Impl> 758void 759LSQUnit<Impl>::writebackPendingStore() 760{ 761 if (hasPendingPkt) { 762 assert(pendingPkt != NULL); 763 764 // If the cache is blocked, this will store the packet for retry. 765 if (sendStore(pendingPkt)) { 766 storePostSend(pendingPkt); 767 } 768 pendingPkt = NULL; 769 hasPendingPkt = false; 770 } 771} 772 773template <class Impl> 774void 775LSQUnit<Impl>::writebackStores() 776{ 777 // First writeback the second packet from any split store that didn't 778 // complete last cycle because there weren't enough cache ports available. 779 if (TheISA::HasUnalignedMemAcc) { 780 writebackPendingStore(); 781 } 782 783 while (storesToWB > 0 && 784 storeWBIdx != storeTail && 785 storeQueue[storeWBIdx].inst && 786 storeQueue[storeWBIdx].canWB && 787 ((!needsTSO) || (!storeInFlight)) && 788 usedPorts < cachePorts) { 789 790 if (isStoreBlocked || lsq->cacheBlocked()) { 791 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 792 " is blocked!\n"); 793 break; 794 } 795 796 // Store didn't write any data so no need to write it back to 797 // memory. 798 if (storeQueue[storeWBIdx].size == 0) { 799 completeStore(storeWBIdx); 800 801 incrStIdx(storeWBIdx); 802 803 continue; 804 } 805 806 ++usedPorts; 807 808 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 809 incrStIdx(storeWBIdx); 810 811 continue; 812 } 813 814 assert(storeQueue[storeWBIdx].req); 815 assert(!storeQueue[storeWBIdx].committed); 816 817 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 818 assert(storeQueue[storeWBIdx].sreqLow); 819 assert(storeQueue[storeWBIdx].sreqHigh); 820 } 821 822 DynInstPtr inst = storeQueue[storeWBIdx].inst; 823 824 Request *req = storeQueue[storeWBIdx].req; 825 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 826 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 827 828 storeQueue[storeWBIdx].committed = true; 829 830 assert(!inst->memData); 831 inst->memData = new uint8_t[req->getSize()]; 832 833 if (storeQueue[storeWBIdx].isAllZeros) 834 memset(inst->memData, 0, req->getSize()); 835 else 836 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 837 838 MemCmd command = 839 req->isSwap() ? MemCmd::SwapReq : 840 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 841 PacketPtr data_pkt; 842 PacketPtr snd_data_pkt = NULL; 843 844 LSQSenderState *state = new LSQSenderState; 845 state->isLoad = false; 846 state->idx = storeWBIdx; 847 state->inst = inst; 848 849 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 850 851 // Build a single data packet if the store isn't split. 852 data_pkt = new Packet(req, command); 853 data_pkt->dataStatic(inst->memData); 854 data_pkt->senderState = state; 855 } else { 856 // Create two packets if the store is split in two. 857 data_pkt = new Packet(sreqLow, command); 858 snd_data_pkt = new Packet(sreqHigh, command); 859 860 data_pkt->dataStatic(inst->memData); 861 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 862 863 data_pkt->senderState = state; 864 snd_data_pkt->senderState = state; 865 866 state->isSplit = true; 867 state->outstanding = 2; 868 869 // Can delete the main request now. 870 delete req; 871 req = sreqLow; 872 } 873 874 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 875 "to Addr:%#x, data:%#x [sn:%lli]\n", 876 storeWBIdx, inst->pcState(), 877 req->getPaddr(), (int)*(inst->memData), 878 inst->seqNum); 879 880 // @todo: Remove this SC hack once the memory system handles it. 881 if (inst->isStoreConditional()) { 882 assert(!storeQueue[storeWBIdx].isSplit); 883 // Disable recording the result temporarily. Writing to 884 // misc regs normally updates the result, but this is not 885 // the desired behavior when handling store conditionals. 886 inst->recordResult(false); 887 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); 888 inst->recordResult(true); 889 890 if (!success) { 891 // Instantly complete this store. 892 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 893 "Instantly completing it.\n", 894 inst->seqNum); 895 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 896 cpu->schedule(wb, curTick() + 1); 897 if (cpu->checker) { 898 // Make sure to set the LLSC data for verification 899 // if checker is loaded 900 inst->reqToVerify->setExtraData(0); 901 inst->completeAcc(data_pkt); 902 } 903 completeStore(storeWBIdx); 904 incrStIdx(storeWBIdx); 905 continue; 906 } 907 } else { 908 // Non-store conditionals do not need a writeback. 909 state->noWB = true; 910 } 911 912 bool split = 913 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 914 915 ThreadContext *thread = cpu->tcBase(lsqID); 916 917 if (req->isMmappedIpr()) { 918 assert(!inst->isStoreConditional()); 919 TheISA::handleIprWrite(thread, data_pkt); 920 delete data_pkt; 921 if (split) { 922 assert(snd_data_pkt->req->isMmappedIpr()); 923 TheISA::handleIprWrite(thread, snd_data_pkt); 924 delete snd_data_pkt; 925 delete sreqLow; 926 delete sreqHigh; 927 } 928 delete state; 929 delete req; 930 completeStore(storeWBIdx); 931 incrStIdx(storeWBIdx); 932 } else if (!sendStore(data_pkt)) { 933 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 934 "retry later\n", 935 inst->seqNum); 936 937 // Need to store the second packet, if split. 938 if (split) { 939 state->pktToSend = true; 940 state->pendingPacket = snd_data_pkt; 941 } 942 } else { 943 944 // If split, try to send the second packet too 945 if (split) { 946 assert(snd_data_pkt); 947 948 // Ensure there are enough ports to use. 949 if (usedPorts < cachePorts) { 950 ++usedPorts; 951 if (sendStore(snd_data_pkt)) { 952 storePostSend(snd_data_pkt); 953 } else { 954 DPRINTF(IEW, "D-Cache became blocked when writing" 955 " [sn:%lli] second packet, will retry later\n", 956 inst->seqNum); 957 } 958 } else { 959 960 // Store the packet for when there's free ports. 961 assert(pendingPkt == NULL); 962 pendingPkt = snd_data_pkt; 963 hasPendingPkt = true; 964 } 965 } else { 966 967 // Not a split store. 968 storePostSend(data_pkt); 969 } 970 } 971 } 972 973 // Not sure this should set it to 0. 974 usedPorts = 0; 975 976 assert(stores >= 0 && storesToWB >= 0); 977} 978 979/*template <class Impl> 980void 981LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 982{ 983 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 984 mshrSeqNums.end(), 985 seqNum); 986 987 if (mshr_it != mshrSeqNums.end()) { 988 mshrSeqNums.erase(mshr_it); 989 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 990 } 991}*/ 992 993template <class Impl> 994void 995LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 996{ 997 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 998 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 999 1000 int load_idx = loadTail; 1001 decrLdIdx(load_idx); 1002 1003 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 1004 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 1005 "[sn:%lli]\n", 1006 loadQueue[load_idx]->pcState(), 1007 loadQueue[load_idx]->seqNum); 1008 1009 if (isStalled() && load_idx == stallingLoadIdx) { 1010 stalled = false; 1011 stallingStoreIsn = 0; 1012 stallingLoadIdx = 0; 1013 } 1014 1015 // Clear the smart pointer to make sure it is decremented. 1016 loadQueue[load_idx]->setSquashed(); 1017 loadQueue[load_idx] = NULL; 1018 --loads; 1019 1020 // Inefficient! 1021 loadTail = load_idx; 1022 1023 decrLdIdx(load_idx); 1024 ++lsqSquashedLoads; 1025 } 1026 1027 if (isLoadBlocked) { 1028 if (squashed_num < blockedLoadSeqNum) { 1029 isLoadBlocked = false; 1030 loadBlockedHandled = false; 1031 blockedLoadSeqNum = 0; 1032 } 1033 } 1034 1035 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1036 memDepViolator = NULL; 1037 } 1038 1039 int store_idx = storeTail; 1040 decrStIdx(store_idx); 1041 1042 while (stores != 0 && 1043 storeQueue[store_idx].inst->seqNum > squashed_num) { 1044 // Instructions marked as can WB are already committed. 1045 if (storeQueue[store_idx].canWB) { 1046 break; 1047 } 1048 1049 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1050 "idx:%i [sn:%lli]\n", 1051 storeQueue[store_idx].inst->pcState(), 1052 store_idx, storeQueue[store_idx].inst->seqNum); 1053 1054 // I don't think this can happen. It should have been cleared 1055 // by the stalling load. 1056 if (isStalled() && 1057 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1058 panic("Is stalled should have been cleared by stalling load!\n"); 1059 stalled = false; 1060 stallingStoreIsn = 0; 1061 } 1062 1063 // Clear the smart pointer to make sure it is decremented. 1064 storeQueue[store_idx].inst->setSquashed(); 1065 storeQueue[store_idx].inst = NULL; 1066 storeQueue[store_idx].canWB = 0; 1067 1068 // Must delete request now that it wasn't handed off to 1069 // memory. This is quite ugly. @todo: Figure out the proper 1070 // place to really handle request deletes. 1071 delete storeQueue[store_idx].req; 1072 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1073 delete storeQueue[store_idx].sreqLow; 1074 delete storeQueue[store_idx].sreqHigh; 1075 1076 storeQueue[store_idx].sreqLow = NULL; 1077 storeQueue[store_idx].sreqHigh = NULL; 1078 } 1079 1080 storeQueue[store_idx].req = NULL; 1081 --stores; 1082 1083 // Inefficient! 1084 storeTail = store_idx; 1085 1086 decrStIdx(store_idx); 1087 ++lsqSquashedStores; 1088 } 1089} 1090 1091template <class Impl> 1092void 1093LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1094{ 1095 if (isStalled() && 1096 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1097 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1098 "load idx:%i\n", 1099 stallingStoreIsn, stallingLoadIdx); 1100 stalled = false; 1101 stallingStoreIsn = 0; 1102 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1103 } 1104 1105 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1106 // The store is basically completed at this time. This 1107 // only works so long as the checker doesn't try to 1108 // verify the value in memory for stores. 1109 storeQueue[storeWBIdx].inst->setCompleted(); 1110 1111 if (cpu->checker) { 1112 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1113 } 1114 } 1115 1116 if (needsTSO) { 1117 storeInFlight = true; 1118 } 1119 1120 incrStIdx(storeWBIdx); 1121} 1122 1123template <class Impl> 1124void 1125LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1126{ 1127 iewStage->wakeCPU(); 1128 1129 // Squashed instructions do not need to complete their access. 1130 if (inst->isSquashed()) {
|