lsq_unit_impl.hh revision 13953:43ae8a30ec1f
1 2/* 3 * Copyright (c) 2010-2014, 2017-2018 ARM Limited 4 * Copyright (c) 2013 Advanced Micro Devices, Inc. 5 * All rights reserved 6 * 7 * The license below extends only to copyright in the software and shall 8 * not be construed as granting a license to any other intellectual 9 * property including but not limited to intellectual property relating 10 * to a hardware implementation of the functionality of the software 11 * licensed hereunder. You may use the software subject to the license 12 * terms below provided that you ensure that this notice is replicated 13 * unmodified and in its entirety in all distributions of the software, 14 * modified or unmodified, in source code or in binary form. 15 * 16 * Copyright (c) 2004-2005 The Regents of The University of Michigan 17 * All rights reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions are 21 * met: redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer; 23 * redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution; 26 * neither the name of the copyright holders nor the names of its 27 * contributors may be used to endorse or promote products derived from 28 * this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Authors: Kevin Lim 43 * Korey Sewell 44 */ 45 46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 47#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 48 49#include "arch/generic/debugfaults.hh" 50#include "arch/locked_mem.hh" 51#include "base/str.hh" 52#include "config/the_isa.hh" 53#include "cpu/checker/cpu.hh" 54#include "cpu/o3/lsq.hh" 55#include "cpu/o3/lsq_unit.hh" 56#include "debug/Activity.hh" 57#include "debug/IEW.hh" 58#include "debug/LSQUnit.hh" 59#include "debug/O3PipeView.hh" 60#include "mem/packet.hh" 61#include "mem/request.hh" 62 63template<class Impl> 64LSQUnit<Impl>::WritebackEvent::WritebackEvent(const DynInstPtr &_inst, 65 PacketPtr _pkt, LSQUnit *lsq_ptr) 66 : Event(Default_Pri, AutoDelete), 67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 68{ 69 assert(_inst->savedReq); 70 _inst->savedReq->writebackScheduled(); 71} 72 73template<class Impl> 74void 75LSQUnit<Impl>::WritebackEvent::process() 76{ 77 assert(!lsqPtr->cpu->switchedOut()); 78 79 lsqPtr->writeback(inst, pkt); 80 81 assert(inst->savedReq); 82 inst->savedReq->writebackDone(); 83 delete pkt; 84} 85 86template<class Impl> 87const char * 88LSQUnit<Impl>::WritebackEvent::description() const 89{ 90 return "Store writeback"; 91} 92 93template <class Impl> 94bool 95LSQUnit<Impl>::recvTimingResp(PacketPtr pkt) 96{ 97 auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState); 98 LSQRequest* req = senderState->request(); 99 assert(req != nullptr); 100 bool ret = true; 101 /* Check that the request is still alive before any further action. */ 102 if (senderState->alive()) { 103 ret = req->recvTimingResp(pkt); 104 } else { 105 senderState->outstanding--; 106 } 107 return ret; 108 109} 110 111template<class Impl> 112void 113LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 114{ 115 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 116 DynInstPtr inst = state->inst; 117 118 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 119 120 /* Notify the sender state that the access is complete (for ownership 121 * tracking). */ 122 state->complete(); 123 124 assert(!cpu->switchedOut()); 125 if (!inst->isSquashed()) { 126 if (state->needWB) { 127 // Only loads, store conditionals and atomics perform the writeback 128 // after receving the response from the memory 129 assert(inst->isLoad() || inst->isStoreConditional() || 130 inst->isAtomic()); 131 writeback(inst, state->request()->mainPacket()); 132 if (inst->isStore() || inst->isAtomic()) { 133 auto ss = dynamic_cast<SQSenderState*>(state); 134 ss->writebackDone(); 135 completeStore(ss->idx); 136 } 137 } else if (inst->isStore()) { 138 // This is a regular store (i.e., not store conditionals and 139 // atomics), so it can complete without writing back 140 completeStore(dynamic_cast<SQSenderState*>(state)->idx); 141 } 142 } 143} 144 145template <class Impl> 146LSQUnit<Impl>::LSQUnit(uint32_t lqEntries, uint32_t sqEntries) 147 : lsqID(-1), storeQueue(sqEntries+1), loadQueue(lqEntries+1), 148 loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 149 isStoreBlocked(false), storeInFlight(false), hasPendingRequest(false), 150 pendingRequest(nullptr) 151{ 152} 153 154template<class Impl> 155void 156LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 157 LSQ *lsq_ptr, unsigned id) 158{ 159 lsqID = id; 160 161 cpu = cpu_ptr; 162 iewStage = iew_ptr; 163 164 lsq = lsq_ptr; 165 166 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",lsqID); 167 168 depCheckShift = params->LSQDepCheckShift; 169 checkLoads = params->LSQCheckLoads; 170 needsTSO = params->needsTSO; 171 172 resetState(); 173} 174 175 176template<class Impl> 177void 178LSQUnit<Impl>::resetState() 179{ 180 loads = stores = storesToWB = 0; 181 182 183 storeWBIt = storeQueue.begin(); 184 185 retryPkt = NULL; 186 memDepViolator = NULL; 187 188 stalled = false; 189 190 cacheBlockMask = ~(cpu->cacheLineSize() - 1); 191} 192 193template<class Impl> 194std::string 195LSQUnit<Impl>::name() const 196{ 197 if (Impl::MaxThreads == 1) { 198 return iewStage->name() + ".lsq"; 199 } else { 200 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID); 201 } 202} 203 204template<class Impl> 205void 206LSQUnit<Impl>::regStats() 207{ 208 lsqForwLoads 209 .name(name() + ".forwLoads") 210 .desc("Number of loads that had data forwarded from stores"); 211 212 invAddrLoads 213 .name(name() + ".invAddrLoads") 214 .desc("Number of loads ignored due to an invalid address"); 215 216 lsqSquashedLoads 217 .name(name() + ".squashedLoads") 218 .desc("Number of loads squashed"); 219 220 lsqIgnoredResponses 221 .name(name() + ".ignoredResponses") 222 .desc("Number of memory responses ignored because the instruction is squashed"); 223 224 lsqMemOrderViolation 225 .name(name() + ".memOrderViolation") 226 .desc("Number of memory ordering violations"); 227 228 lsqSquashedStores 229 .name(name() + ".squashedStores") 230 .desc("Number of stores squashed"); 231 232 invAddrSwpfs 233 .name(name() + ".invAddrSwpfs") 234 .desc("Number of software prefetches ignored due to an invalid address"); 235 236 lsqBlockedLoads 237 .name(name() + ".blockedLoads") 238 .desc("Number of blocked loads due to partial load-store forwarding"); 239 240 lsqRescheduledLoads 241 .name(name() + ".rescheduledLoads") 242 .desc("Number of loads that were rescheduled"); 243 244 lsqCacheBlocked 245 .name(name() + ".cacheBlocked") 246 .desc("Number of times an access to memory failed due to the cache being blocked"); 247} 248 249template<class Impl> 250void 251LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 252{ 253 dcachePort = dcache_port; 254} 255 256template<class Impl> 257void 258LSQUnit<Impl>::drainSanityCheck() const 259{ 260 for (int i = 0; i < loadQueue.capacity(); ++i) 261 assert(!loadQueue[i].valid()); 262 263 assert(storesToWB == 0); 264 assert(!retryPkt); 265} 266 267template<class Impl> 268void 269LSQUnit<Impl>::takeOverFrom() 270{ 271 resetState(); 272} 273 274template <class Impl> 275void 276LSQUnit<Impl>::insert(const DynInstPtr &inst) 277{ 278 assert(inst->isMemRef()); 279 280 assert(inst->isLoad() || inst->isStore() || inst->isAtomic()); 281 282 if (inst->isLoad()) { 283 insertLoad(inst); 284 } else { 285 insertStore(inst); 286 } 287 288 inst->setInLSQ(); 289} 290 291template <class Impl> 292void 293LSQUnit<Impl>::insertLoad(const DynInstPtr &load_inst) 294{ 295 assert(!loadQueue.full()); 296 assert(loads < loadQueue.capacity()); 297 298 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 299 load_inst->pcState(), loadQueue.tail(), load_inst->seqNum); 300 301 /* Grow the queue. */ 302 loadQueue.advance_tail(); 303 304 load_inst->sqIt = storeQueue.end(); 305 306 assert(!loadQueue.back().valid()); 307 loadQueue.back().set(load_inst); 308 load_inst->lqIdx = loadQueue.tail(); 309 load_inst->lqIt = loadQueue.getIterator(load_inst->lqIdx); 310 311 ++loads; 312} 313 314template <class Impl> 315void 316LSQUnit<Impl>::insertStore(const DynInstPtr& store_inst) 317{ 318 // Make sure it is not full before inserting an instruction. 319 assert(!storeQueue.full()); 320 assert(stores < storeQueue.capacity()); 321 322 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 323 store_inst->pcState(), storeQueue.tail(), store_inst->seqNum); 324 storeQueue.advance_tail(); 325 326 store_inst->sqIdx = storeQueue.tail(); 327 store_inst->lqIdx = loadQueue.moduloAdd(loadQueue.tail(), 1); 328 store_inst->lqIt = loadQueue.end(); 329 330 storeQueue.back().set(store_inst); 331 332 ++stores; 333} 334 335template <class Impl> 336typename Impl::DynInstPtr 337LSQUnit<Impl>::getMemDepViolator() 338{ 339 DynInstPtr temp = memDepViolator; 340 341 memDepViolator = NULL; 342 343 return temp; 344} 345 346template <class Impl> 347unsigned 348LSQUnit<Impl>::numFreeLoadEntries() 349{ 350 //LQ has an extra dummy entry to differentiate 351 //empty/full conditions. Subtract 1 from the free entries. 352 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", 353 1 + loadQueue.capacity(), loads); 354 return loadQueue.capacity() - loads; 355} 356 357template <class Impl> 358unsigned 359LSQUnit<Impl>::numFreeStoreEntries() 360{ 361 //SQ has an extra dummy entry to differentiate 362 //empty/full conditions. Subtract 1 from the free entries. 363 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", 364 1 + storeQueue.capacity(), stores); 365 return storeQueue.capacity() - stores; 366 367 } 368 369template <class Impl> 370void 371LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 372{ 373 // Should only ever get invalidations in here 374 assert(pkt->isInvalidate()); 375 376 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 377 378 for (int x = 0; x < cpu->numContexts(); x++) { 379 ThreadContext *tc = cpu->getContext(x); 380 bool no_squash = cpu->thread[x]->noSquashFromTC; 381 cpu->thread[x]->noSquashFromTC = true; 382 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 383 cpu->thread[x]->noSquashFromTC = no_squash; 384 } 385 386 if (loadQueue.empty()) 387 return; 388 389 auto iter = loadQueue.begin(); 390 391 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 392 393 DynInstPtr ld_inst = iter->instruction(); 394 assert(ld_inst); 395 LSQRequest *req = iter->request(); 396 397 // Check that this snoop didn't just invalidate our lock flag 398 if (ld_inst->effAddrValid() && 399 req->isCacheBlockHit(invalidate_addr, cacheBlockMask) 400 && ld_inst->memReqFlags & Request::LLSC) 401 TheISA::handleLockedSnoopHit(ld_inst.get()); 402 403 bool force_squash = false; 404 405 while (++iter != loadQueue.end()) { 406 ld_inst = iter->instruction(); 407 assert(ld_inst); 408 req = iter->request(); 409 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) 410 continue; 411 412 DPRINTF(LSQUnit, "-- inst [sn:%lli] to pktAddr:%#x\n", 413 ld_inst->seqNum, invalidate_addr); 414 415 if (force_squash || 416 req->isCacheBlockHit(invalidate_addr, cacheBlockMask)) { 417 if (needsTSO) { 418 // If we have a TSO system, as all loads must be ordered with 419 // all other loads, this load as well as *all* subsequent loads 420 // need to be squashed to prevent possible load reordering. 421 force_squash = true; 422 } 423 if (ld_inst->possibleLoadViolation() || force_squash) { 424 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 425 pkt->getAddr(), ld_inst->seqNum); 426 427 // Mark the load for re-execution 428 ld_inst->fault = std::make_shared<ReExec>(); 429 } else { 430 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 431 pkt->getAddr(), ld_inst->seqNum); 432 433 // Make sure that we don't lose a snoop hitting a LOCKED 434 // address since the LOCK* flags don't get updated until 435 // commit. 436 if (ld_inst->memReqFlags & Request::LLSC) 437 TheISA::handleLockedSnoopHit(ld_inst.get()); 438 439 // If a older load checks this and it's true 440 // then we might have missed the snoop 441 // in which case we need to invalidate to be sure 442 ld_inst->hitExternalSnoop(true); 443 } 444 } 445 } 446 return; 447} 448 449template <class Impl> 450Fault 451LSQUnit<Impl>::checkViolations(typename LoadQueue::iterator& loadIt, 452 const DynInstPtr& inst) 453{ 454 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 455 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 456 457 /** @todo in theory you only need to check an instruction that has executed 458 * however, there isn't a good way in the pipeline at the moment to check 459 * all instructions that will execute before the store writes back. Thus, 460 * like the implementation that came before it, we're overly conservative. 461 */ 462 while (loadIt != loadQueue.end()) { 463 DynInstPtr ld_inst = loadIt->instruction(); 464 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) { 465 ++loadIt; 466 continue; 467 } 468 469 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 470 Addr ld_eff_addr2 = 471 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 472 473 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 474 if (inst->isLoad()) { 475 // If this load is to the same block as an external snoop 476 // invalidate that we've observed then the load needs to be 477 // squashed as it could have newer data 478 if (ld_inst->hitExternalSnoop()) { 479 if (!memDepViolator || 480 ld_inst->seqNum < memDepViolator->seqNum) { 481 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 482 "and [sn:%lli] at address %#x\n", 483 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 484 memDepViolator = ld_inst; 485 486 ++lsqMemOrderViolation; 487 488 return std::make_shared<GenericISA::M5PanicFault>( 489 "Detected fault with inst [sn:%lli] and " 490 "[sn:%lli] at address %#x\n", 491 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 492 } 493 } 494 495 // Otherwise, mark the load has a possible load violation 496 // and if we see a snoop before it's commited, we need to squash 497 ld_inst->possibleLoadViolation(true); 498 DPRINTF(LSQUnit, "Found possible load violation at addr: %#x" 499 " between instructions [sn:%lli] and [sn:%lli]\n", 500 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 501 } else { 502 // A load/store incorrectly passed this store. 503 // Check if we already have a violator, or if it's newer 504 // squash and refetch. 505 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 506 break; 507 508 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 509 "[sn:%lli] at address %#x\n", 510 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 511 memDepViolator = ld_inst; 512 513 ++lsqMemOrderViolation; 514 515 return std::make_shared<GenericISA::M5PanicFault>( 516 "Detected fault with " 517 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 518 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 519 } 520 } 521 522 ++loadIt; 523 } 524 return NoFault; 525} 526 527 528 529 530template <class Impl> 531Fault 532LSQUnit<Impl>::executeLoad(const DynInstPtr &inst) 533{ 534 using namespace TheISA; 535 // Execute a specific load. 536 Fault load_fault = NoFault; 537 538 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 539 inst->pcState(), inst->seqNum); 540 541 assert(!inst->isSquashed()); 542 543 load_fault = inst->initiateAcc(); 544 545 if (!inst->readMemAccPredicate()) { 546 assert(load_fault == NoFault); 547 assert(inst->readPredicate()); 548 inst->setExecuted(); 549 inst->completeAcc(nullptr); 550 iewStage->instToCommit(inst); 551 iewStage->activityThisCycle(); 552 return NoFault; 553 } 554 555 if (inst->isTranslationDelayed() && load_fault == NoFault) 556 return load_fault; 557 558 // If the instruction faulted or predicated false, then we need to send it 559 // along to commit without the instruction completing. 560 if (load_fault != NoFault || !inst->readPredicate()) { 561 // Send this instruction to commit, also make sure iew stage 562 // realizes there is activity. Mark it as executed unless it 563 // is a strictly ordered load that needs to hit the head of 564 // commit. 565 if (!inst->readPredicate()) 566 inst->forwardOldRegs(); 567 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 568 inst->seqNum, 569 (load_fault != NoFault ? "fault" : "predication")); 570 if (!(inst->hasRequest() && inst->strictlyOrdered()) || 571 inst->isAtCommit()) { 572 inst->setExecuted(); 573 } 574 iewStage->instToCommit(inst); 575 iewStage->activityThisCycle(); 576 } else { 577 if (inst->effAddrValid()) { 578 auto it = inst->lqIt; 579 ++it; 580 581 if (checkLoads) 582 return checkViolations(it, inst); 583 } 584 } 585 586 return load_fault; 587} 588 589template <class Impl> 590Fault 591LSQUnit<Impl>::executeStore(const DynInstPtr &store_inst) 592{ 593 using namespace TheISA; 594 // Make sure that a store exists. 595 assert(stores != 0); 596 597 int store_idx = store_inst->sqIdx; 598 599 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 600 store_inst->pcState(), store_inst->seqNum); 601 602 assert(!store_inst->isSquashed()); 603 604 // Check the recently completed loads to see if any match this store's 605 // address. If so, then we have a memory ordering violation. 606 typename LoadQueue::iterator loadIt = store_inst->lqIt; 607 608 Fault store_fault = store_inst->initiateAcc(); 609 610 if (store_inst->isTranslationDelayed() && 611 store_fault == NoFault) 612 return store_fault; 613 614 if (!store_inst->readPredicate()) { 615 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 616 store_inst->seqNum); 617 store_inst->forwardOldRegs(); 618 return store_fault; 619 } 620 621 if (storeQueue[store_idx].size() == 0) { 622 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 623 store_inst->pcState(), store_inst->seqNum); 624 625 return store_fault; 626 } 627 628 assert(store_fault == NoFault); 629 630 if (store_inst->isStoreConditional() || store_inst->isAtomic()) { 631 // Store conditionals and Atomics need to set themselves as able to 632 // writeback if we haven't had a fault by here. 633 storeQueue[store_idx].canWB() = true; 634 635 ++storesToWB; 636 } 637 638 return checkViolations(loadIt, store_inst); 639 640} 641 642template <class Impl> 643void 644LSQUnit<Impl>::commitLoad() 645{ 646 assert(loadQueue.front().valid()); 647 648 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 649 loadQueue.front().instruction()->pcState()); 650 651 loadQueue.front().clear(); 652 loadQueue.pop_front(); 653 654 --loads; 655} 656 657template <class Impl> 658void 659LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 660{ 661 assert(loads == 0 || loadQueue.front().valid()); 662 663 while (loads != 0 && loadQueue.front().instruction()->seqNum 664 <= youngest_inst) { 665 commitLoad(); 666 } 667} 668 669template <class Impl> 670void 671LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 672{ 673 assert(stores == 0 || storeQueue.front().valid()); 674 675 /* Forward iterate the store queue (age order). */ 676 for (auto& x : storeQueue) { 677 assert(x.valid()); 678 // Mark any stores that are now committed and have not yet 679 // been marked as able to write back. 680 if (!x.canWB()) { 681 if (x.instruction()->seqNum > youngest_inst) { 682 break; 683 } 684 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 685 "%s [sn:%lli]\n", 686 x.instruction()->pcState(), 687 x.instruction()->seqNum); 688 689 x.canWB() = true; 690 691 ++storesToWB; 692 } 693 } 694} 695 696template <class Impl> 697void 698LSQUnit<Impl>::writebackBlockedStore() 699{ 700 assert(isStoreBlocked); 701 storeWBIt->request()->sendPacketToCache(); 702 if (storeWBIt->request()->isSent()){ 703 storePostSend(); 704 } 705} 706 707template <class Impl> 708void 709LSQUnit<Impl>::writebackStores() 710{ 711 if (isStoreBlocked) { 712 DPRINTF(LSQUnit, "Writing back blocked store\n"); 713 writebackBlockedStore(); 714 } 715 716 while (storesToWB > 0 && 717 storeWBIt.dereferenceable() && 718 storeWBIt->valid() && 719 storeWBIt->canWB() && 720 ((!needsTSO) || (!storeInFlight)) && 721 lsq->cachePortAvailable(false)) { 722 723 if (isStoreBlocked) { 724 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 725 " is blocked!\n"); 726 break; 727 } 728 729 // Store didn't write any data so no need to write it back to 730 // memory. 731 if (storeWBIt->size() == 0) { 732 /* It is important that the preincrement happens at (or before) 733 * the call, as the the code of completeStore checks 734 * storeWBIt. */ 735 completeStore(storeWBIt++); 736 continue; 737 } 738 739 if (storeWBIt->instruction()->isDataPrefetch()) { 740 storeWBIt++; 741 continue; 742 } 743 744 assert(storeWBIt->hasRequest()); 745 assert(!storeWBIt->committed()); 746 747 DynInstPtr inst = storeWBIt->instruction(); 748 LSQRequest* req = storeWBIt->request(); 749 storeWBIt->committed() = true; 750 751 assert(!inst->memData); 752 inst->memData = new uint8_t[req->_size]; 753 754 if (storeWBIt->isAllZeros()) 755 memset(inst->memData, 0, req->_size); 756 else 757 memcpy(inst->memData, storeWBIt->data(), req->_size); 758 759 760 if (req->senderState() == nullptr) { 761 SQSenderState *state = new SQSenderState(storeWBIt); 762 state->isLoad = false; 763 state->needWB = false; 764 state->inst = inst; 765 766 req->senderState(state); 767 if (inst->isStoreConditional() || inst->isAtomic()) { 768 /* Only store conditionals and atomics need a writeback. */ 769 state->needWB = true; 770 } 771 } 772 req->buildPackets(); 773 774 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 775 "to Addr:%#x, data:%#x [sn:%lli]\n", 776 storeWBIt.idx(), inst->pcState(), 777 req->request()->getPaddr(), (int)*(inst->memData), 778 inst->seqNum); 779 780 // @todo: Remove this SC hack once the memory system handles it. 781 if (inst->isStoreConditional()) { 782 // Disable recording the result temporarily. Writing to 783 // misc regs normally updates the result, but this is not 784 // the desired behavior when handling store conditionals. 785 inst->recordResult(false); 786 bool success = TheISA::handleLockedWrite(inst.get(), 787 req->request(), cacheBlockMask); 788 inst->recordResult(true); 789 req->packetSent(); 790 791 if (!success) { 792 req->complete(); 793 // Instantly complete this store. 794 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 795 "Instantly completing it.\n", 796 inst->seqNum); 797 PacketPtr new_pkt = new Packet(*req->packet()); 798 WritebackEvent *wb = new WritebackEvent(inst, 799 new_pkt, this); 800 cpu->schedule(wb, curTick() + 1); 801 completeStore(storeWBIt); 802 if (!storeQueue.empty()) 803 storeWBIt++; 804 else 805 storeWBIt = storeQueue.end(); 806 continue; 807 } 808 } 809 810 if (req->request()->isMmappedIpr()) { 811 assert(!inst->isStoreConditional()); 812 ThreadContext *thread = cpu->tcBase(lsqID); 813 PacketPtr main_pkt = new Packet(req->mainRequest(), 814 MemCmd::WriteReq); 815 main_pkt->dataStatic(inst->memData); 816 req->handleIprWrite(thread, main_pkt); 817 delete main_pkt; 818 completeStore(storeWBIt); 819 storeWBIt++; 820 continue; 821 } 822 /* Send to cache */ 823 req->sendPacketToCache(); 824 825 /* If successful, do the post send */ 826 if (req->isSent()) { 827 storePostSend(); 828 } else { 829 DPRINTF(LSQUnit, "D-Cache became blocked when writing [sn:%lli], " 830 "will retry later\n", 831 inst->seqNum); 832 } 833 } 834 assert(stores >= 0 && storesToWB >= 0); 835} 836 837template <class Impl> 838void 839LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 840{ 841 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 842 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 843 844 while (loads != 0 && 845 loadQueue.back().instruction()->seqNum > squashed_num) { 846 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 847 "[sn:%lli]\n", 848 loadQueue.back().instruction()->pcState(), 849 loadQueue.back().instruction()->seqNum); 850 851 if (isStalled() && loadQueue.tail() == stallingLoadIdx) { 852 stalled = false; 853 stallingStoreIsn = 0; 854 stallingLoadIdx = 0; 855 } 856 857 // Clear the smart pointer to make sure it is decremented. 858 loadQueue.back().instruction()->setSquashed(); 859 loadQueue.back().clear(); 860 861 --loads; 862 863 loadQueue.pop_back(); 864 ++lsqSquashedLoads; 865 } 866 867 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 868 memDepViolator = NULL; 869 } 870 871 while (stores != 0 && 872 storeQueue.back().instruction()->seqNum > squashed_num) { 873 // Instructions marked as can WB are already committed. 874 if (storeQueue.back().canWB()) { 875 break; 876 } 877 878 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 879 "idx:%i [sn:%lli]\n", 880 storeQueue.back().instruction()->pcState(), 881 storeQueue.tail(), storeQueue.back().instruction()->seqNum); 882 883 // I don't think this can happen. It should have been cleared 884 // by the stalling load. 885 if (isStalled() && 886 storeQueue.back().instruction()->seqNum == stallingStoreIsn) { 887 panic("Is stalled should have been cleared by stalling load!\n"); 888 stalled = false; 889 stallingStoreIsn = 0; 890 } 891 892 // Clear the smart pointer to make sure it is decremented. 893 storeQueue.back().instruction()->setSquashed(); 894 895 // Must delete request now that it wasn't handed off to 896 // memory. This is quite ugly. @todo: Figure out the proper 897 // place to really handle request deletes. 898 storeQueue.back().clear(); 899 --stores; 900 901 storeQueue.pop_back(); 902 ++lsqSquashedStores; 903 } 904} 905 906template <class Impl> 907void 908LSQUnit<Impl>::storePostSend() 909{ 910 if (isStalled() && 911 storeWBIt->instruction()->seqNum == stallingStoreIsn) { 912 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 913 "load idx:%i\n", 914 stallingStoreIsn, stallingLoadIdx); 915 stalled = false; 916 stallingStoreIsn = 0; 917 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); 918 } 919 920 if (!storeWBIt->instruction()->isStoreConditional()) { 921 // The store is basically completed at this time. This 922 // only works so long as the checker doesn't try to 923 // verify the value in memory for stores. 924 storeWBIt->instruction()->setCompleted(); 925 926 if (cpu->checker) { 927 cpu->checker->verify(storeWBIt->instruction()); 928 } 929 } 930 931 if (needsTSO) { 932 storeInFlight = true; 933 } 934 935 storeWBIt++; 936} 937 938template <class Impl> 939void 940LSQUnit<Impl>::writeback(const DynInstPtr &inst, PacketPtr pkt) 941{ 942 iewStage->wakeCPU(); 943 944 // Squashed instructions do not need to complete their access. 945 if (inst->isSquashed()) { 946 assert(!inst->isStore()); 947 ++lsqIgnoredResponses; 948 return; 949 } 950 951 if (!inst->isExecuted()) { 952 inst->setExecuted(); 953 954 if (inst->fault == NoFault) { 955 // Complete access to copy data to proper place. 956 inst->completeAcc(pkt); 957 } else { 958 // If the instruction has an outstanding fault, we cannot complete 959 // the access as this discards the current fault. 960 961 // If we have an outstanding fault, the fault should only be of 962 // type ReExec. 963 assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr); 964 965 DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access " 966 "due to pending fault.\n", inst->seqNum); 967 } 968 } 969 970 // Need to insert instruction into queue to commit 971 iewStage->instToCommit(inst); 972 973 iewStage->activityThisCycle(); 974 975 // see if this load changed the PC 976 iewStage->checkMisprediction(inst); 977} 978 979template <class Impl> 980void 981LSQUnit<Impl>::completeStore(typename StoreQueue::iterator store_idx) 982{ 983 assert(store_idx->valid()); 984 store_idx->completed() = true; 985 --storesToWB; 986 // A bit conservative because a store completion may not free up entries, 987 // but hopefully avoids two store completions in one cycle from making 988 // the CPU tick twice. 989 cpu->wakeCPU(); 990 cpu->activityThisCycle(); 991 992 /* We 'need' a copy here because we may clear the entry from the 993 * store queue. */ 994 DynInstPtr store_inst = store_idx->instruction(); 995 if (store_idx == storeQueue.begin()) { 996 do { 997 storeQueue.front().clear(); 998 storeQueue.pop_front(); 999 --stores; 1000 } while (storeQueue.front().completed() && 1001 !storeQueue.empty()); 1002 1003 iewStage->updateLSQNextCycle = true; 1004 } 1005 1006 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1007 "idx:%i\n", 1008 store_inst->seqNum, store_idx.idx() - 1, storeQueue.head() - 1); 1009 1010#if TRACING_ON 1011 if (DTRACE(O3PipeView)) { 1012 store_inst->storeTick = 1013 curTick() - store_inst->fetchTick; 1014 } 1015#endif 1016 1017 if (isStalled() && 1018 store_inst->seqNum == stallingStoreIsn) { 1019 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1020 "load idx:%i\n", 1021 stallingStoreIsn, stallingLoadIdx); 1022 stalled = false; 1023 stallingStoreIsn = 0; 1024 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); 1025 } 1026 1027 store_inst->setCompleted(); 1028 1029 if (needsTSO) { 1030 storeInFlight = false; 1031 } 1032 1033 // Tell the checker we've completed this instruction. Some stores 1034 // may get reported twice to the checker, but the checker can 1035 // handle that case. 1036 // Store conditionals cannot be sent to the checker yet, they have 1037 // to update the misc registers first which should take place 1038 // when they commit 1039 if (cpu->checker && !store_inst->isStoreConditional()) { 1040 cpu->checker->verify(store_inst); 1041 } 1042} 1043 1044template <class Impl> 1045bool 1046LSQUnit<Impl>::trySendPacket(bool isLoad, PacketPtr data_pkt) 1047{ 1048 bool ret = true; 1049 bool cache_got_blocked = false; 1050 1051 auto state = dynamic_cast<LSQSenderState*>(data_pkt->senderState); 1052 1053 if (!lsq->cacheBlocked() && 1054 lsq->cachePortAvailable(isLoad)) { 1055 if (!dcachePort->sendTimingReq(data_pkt)) { 1056 ret = false; 1057 cache_got_blocked = true; 1058 } 1059 } else { 1060 ret = false; 1061 } 1062 1063 if (ret) { 1064 if (!isLoad) { 1065 isStoreBlocked = false; 1066 } 1067 lsq->cachePortBusy(isLoad); 1068 state->outstanding++; 1069 state->request()->packetSent(); 1070 } else { 1071 if (cache_got_blocked) { 1072 lsq->cacheBlocked(true); 1073 ++lsqCacheBlocked; 1074 } 1075 if (!isLoad) { 1076 assert(state->request() == storeWBIt->request()); 1077 isStoreBlocked = true; 1078 } 1079 state->request()->packetNotSent(); 1080 } 1081 return ret; 1082} 1083 1084template <class Impl> 1085void 1086LSQUnit<Impl>::recvRetry() 1087{ 1088 if (isStoreBlocked) { 1089 DPRINTF(LSQUnit, "Receiving retry: blocked store\n"); 1090 writebackBlockedStore(); 1091 } 1092} 1093 1094template <class Impl> 1095void 1096LSQUnit<Impl>::dumpInsts() const 1097{ 1098 cprintf("Load store queue: Dumping instructions.\n"); 1099 cprintf("Load queue size: %i\n", loads); 1100 cprintf("Load queue: "); 1101 1102 for (const auto& e: loadQueue) { 1103 const DynInstPtr &inst(e.instruction()); 1104 cprintf("%s.[sn:%llu] ", inst->pcState(), inst->seqNum); 1105 } 1106 cprintf("\n"); 1107 1108 cprintf("Store queue size: %i\n", stores); 1109 cprintf("Store queue: "); 1110 1111 for (const auto& e: storeQueue) { 1112 const DynInstPtr &inst(e.instruction()); 1113 cprintf("%s.[sn:%llu] ", inst->pcState(), inst->seqNum); 1114 } 1115 1116 cprintf("\n"); 1117} 1118 1119template <class Impl> 1120unsigned int 1121LSQUnit<Impl>::cacheLineSize() 1122{ 1123 return cpu->cacheLineSize(); 1124} 1125 1126#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__ 1127