1 2/* 3 * Copyright (c) 2010-2013 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 * Korey Sewell 43 */ 44 45#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 46#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 47 48#include "arch/generic/debugfaults.hh" 49#include "arch/locked_mem.hh" 50#include "base/str.hh" 51#include "config/the_isa.hh" 52#include "cpu/checker/cpu.hh" 53#include "cpu/o3/lsq.hh" 54#include "cpu/o3/lsq_unit.hh" 55#include "debug/Activity.hh" 56#include "debug/IEW.hh" 57#include "debug/LSQUnit.hh" 58#include "debug/O3PipeView.hh" 59#include "mem/packet.hh" 60#include "mem/request.hh" 61 62template<class Impl> 63LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 64 LSQUnit *lsq_ptr) 65 : Event(Default_Pri, AutoDelete), 66 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 67{ 68} 69 70template<class Impl> 71void 72LSQUnit<Impl>::WritebackEvent::process() 73{ 74 assert(!lsqPtr->cpu->switchedOut()); 75 76 lsqPtr->writeback(inst, pkt); 77 78 if (pkt->senderState) 79 delete pkt->senderState; 80 81 delete pkt->req; 82 delete pkt; 83} 84 85template<class Impl> 86const char * 87LSQUnit<Impl>::WritebackEvent::description() const 88{ 89 return "Store writeback"; 90} 91 92template<class Impl> 93void 94LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 95{ 96 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 97 DynInstPtr inst = state->inst; 98 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 99 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 100 101 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 102 103 // If this is a split access, wait until all packets are received. 104 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 105 delete pkt->req; 106 delete pkt; 107 return; 108 } 109 110 assert(!cpu->switchedOut()); 111 if (inst->isSquashed()) { 112 iewStage->decrWb(inst->seqNum); 113 } else { 114 if (!state->noWB) { 115 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 116 !state->isLoad) { 117 writeback(inst, pkt); 118 } else { 119 writeback(inst, state->mainPkt); 120 } 121 } 122 123 if (inst->isStore()) { 124 completeStore(state->idx); 125 } 126 } 127 128 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 129 delete state->mainPkt->req; 130 delete state->mainPkt; 131 } 132 133 pkt->req->setAccessLatency(); 134 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 135 136 delete state; 137 delete pkt->req; 138 delete pkt; 139} 140 141template <class Impl> 142LSQUnit<Impl>::LSQUnit() 143 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 144 isStoreBlocked(false), isLoadBlocked(false), 145 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 146{ 147} 148 149template<class Impl> 150void 151LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 152 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 153 unsigned id) 154{ 155 cpu = cpu_ptr; 156 iewStage = iew_ptr; 157
| 1 2/* 3 * Copyright (c) 2010-2013 ARM Limited 4 * All rights reserved 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2004-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Kevin Lim 42 * Korey Sewell 43 */ 44 45#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 46#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 47 48#include "arch/generic/debugfaults.hh" 49#include "arch/locked_mem.hh" 50#include "base/str.hh" 51#include "config/the_isa.hh" 52#include "cpu/checker/cpu.hh" 53#include "cpu/o3/lsq.hh" 54#include "cpu/o3/lsq_unit.hh" 55#include "debug/Activity.hh" 56#include "debug/IEW.hh" 57#include "debug/LSQUnit.hh" 58#include "debug/O3PipeView.hh" 59#include "mem/packet.hh" 60#include "mem/request.hh" 61 62template<class Impl> 63LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 64 LSQUnit *lsq_ptr) 65 : Event(Default_Pri, AutoDelete), 66 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 67{ 68} 69 70template<class Impl> 71void 72LSQUnit<Impl>::WritebackEvent::process() 73{ 74 assert(!lsqPtr->cpu->switchedOut()); 75 76 lsqPtr->writeback(inst, pkt); 77 78 if (pkt->senderState) 79 delete pkt->senderState; 80 81 delete pkt->req; 82 delete pkt; 83} 84 85template<class Impl> 86const char * 87LSQUnit<Impl>::WritebackEvent::description() const 88{ 89 return "Store writeback"; 90} 91 92template<class Impl> 93void 94LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 95{ 96 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 97 DynInstPtr inst = state->inst; 98 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 99 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 100 101 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 102 103 // If this is a split access, wait until all packets are received. 104 if (TheISA::HasUnalignedMemAcc && !state->complete()) { 105 delete pkt->req; 106 delete pkt; 107 return; 108 } 109 110 assert(!cpu->switchedOut()); 111 if (inst->isSquashed()) { 112 iewStage->decrWb(inst->seqNum); 113 } else { 114 if (!state->noWB) { 115 if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 116 !state->isLoad) { 117 writeback(inst, pkt); 118 } else { 119 writeback(inst, state->mainPkt); 120 } 121 } 122 123 if (inst->isStore()) { 124 completeStore(state->idx); 125 } 126 } 127 128 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 129 delete state->mainPkt->req; 130 delete state->mainPkt; 131 } 132 133 pkt->req->setAccessLatency(); 134 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 135 136 delete state; 137 delete pkt->req; 138 delete pkt; 139} 140 141template <class Impl> 142LSQUnit<Impl>::LSQUnit() 143 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 144 isStoreBlocked(false), isLoadBlocked(false), 145 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 146{ 147} 148 149template<class Impl> 150void 151LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 152 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 153 unsigned id) 154{ 155 cpu = cpu_ptr; 156 iewStage = iew_ptr; 157
|
164 // Add 1 for the sentinel entry (they are circular queues). 165 LQEntries = maxLQEntries + 1; 166 SQEntries = maxSQEntries + 1; 167 168 //Due to uint8_t index in LSQSenderState 169 assert(LQEntries <= 256); 170 assert(SQEntries <= 256); 171 172 loadQueue.resize(LQEntries); 173 storeQueue.resize(SQEntries); 174 175 depCheckShift = params->LSQDepCheckShift; 176 checkLoads = params->LSQCheckLoads; 177 cachePorts = params->cachePorts; 178 needsTSO = params->needsTSO; 179 180 resetState(); 181} 182 183 184template<class Impl> 185void 186LSQUnit<Impl>::resetState() 187{ 188 loads = stores = storesToWB = 0; 189 190 loadHead = loadTail = 0; 191 192 storeHead = storeWBIdx = storeTail = 0; 193 194 usedPorts = 0; 195 196 retryPkt = NULL; 197 memDepViolator = NULL; 198 199 blockedLoadSeqNum = 0; 200 201 stalled = false; 202 isLoadBlocked = false; 203 loadBlockedHandled = false; 204 205 cacheBlockMask = ~(cpu->cacheLineSize() - 1); 206} 207 208template<class Impl> 209std::string 210LSQUnit<Impl>::name() const 211{ 212 if (Impl::MaxThreads == 1) { 213 return iewStage->name() + ".lsq"; 214 } else { 215 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 216 } 217} 218 219template<class Impl> 220void 221LSQUnit<Impl>::regStats() 222{ 223 lsqForwLoads 224 .name(name() + ".forwLoads") 225 .desc("Number of loads that had data forwarded from stores"); 226 227 invAddrLoads 228 .name(name() + ".invAddrLoads") 229 .desc("Number of loads ignored due to an invalid address"); 230 231 lsqSquashedLoads 232 .name(name() + ".squashedLoads") 233 .desc("Number of loads squashed"); 234 235 lsqIgnoredResponses 236 .name(name() + ".ignoredResponses") 237 .desc("Number of memory responses ignored because the instruction is squashed"); 238 239 lsqMemOrderViolation 240 .name(name() + ".memOrderViolation") 241 .desc("Number of memory ordering violations"); 242 243 lsqSquashedStores 244 .name(name() + ".squashedStores") 245 .desc("Number of stores squashed"); 246 247 invAddrSwpfs 248 .name(name() + ".invAddrSwpfs") 249 .desc("Number of software prefetches ignored due to an invalid address"); 250 251 lsqBlockedLoads 252 .name(name() + ".blockedLoads") 253 .desc("Number of blocked loads due to partial load-store forwarding"); 254 255 lsqRescheduledLoads 256 .name(name() + ".rescheduledLoads") 257 .desc("Number of loads that were rescheduled"); 258 259 lsqCacheBlocked 260 .name(name() + ".cacheBlocked") 261 .desc("Number of times an access to memory failed due to the cache being blocked"); 262} 263 264template<class Impl> 265void 266LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 267{ 268 dcachePort = dcache_port; 269} 270 271template<class Impl> 272void 273LSQUnit<Impl>::clearLQ() 274{ 275 loadQueue.clear(); 276} 277 278template<class Impl> 279void 280LSQUnit<Impl>::clearSQ() 281{ 282 storeQueue.clear(); 283} 284 285template<class Impl> 286void 287LSQUnit<Impl>::drainSanityCheck() const 288{ 289 for (int i = 0; i < loadQueue.size(); ++i) 290 assert(!loadQueue[i]); 291 292 assert(storesToWB == 0); 293 assert(!retryPkt); 294} 295 296template<class Impl> 297void 298LSQUnit<Impl>::takeOverFrom() 299{ 300 resetState(); 301} 302 303template<class Impl> 304void 305LSQUnit<Impl>::resizeLQ(unsigned size) 306{ 307 unsigned size_plus_sentinel = size + 1; 308 assert(size_plus_sentinel >= LQEntries); 309 310 if (size_plus_sentinel > LQEntries) { 311 while (size_plus_sentinel > loadQueue.size()) { 312 DynInstPtr dummy; 313 loadQueue.push_back(dummy); 314 LQEntries++; 315 } 316 } else { 317 LQEntries = size_plus_sentinel; 318 } 319 320 assert(LQEntries <= 256); 321} 322 323template<class Impl> 324void 325LSQUnit<Impl>::resizeSQ(unsigned size) 326{ 327 unsigned size_plus_sentinel = size + 1; 328 if (size_plus_sentinel > SQEntries) { 329 while (size_plus_sentinel > storeQueue.size()) { 330 SQEntry dummy; 331 storeQueue.push_back(dummy); 332 SQEntries++; 333 } 334 } else { 335 SQEntries = size_plus_sentinel; 336 } 337 338 assert(SQEntries <= 256); 339} 340 341template <class Impl> 342void 343LSQUnit<Impl>::insert(DynInstPtr &inst) 344{ 345 assert(inst->isMemRef()); 346 347 assert(inst->isLoad() || inst->isStore()); 348 349 if (inst->isLoad()) { 350 insertLoad(inst); 351 } else { 352 insertStore(inst); 353 } 354 355 inst->setInLSQ(); 356} 357 358template <class Impl> 359void 360LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 361{ 362 assert((loadTail + 1) % LQEntries != loadHead); 363 assert(loads < LQEntries); 364 365 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 366 load_inst->pcState(), loadTail, load_inst->seqNum); 367 368 load_inst->lqIdx = loadTail; 369 370 if (stores == 0) { 371 load_inst->sqIdx = -1; 372 } else { 373 load_inst->sqIdx = storeTail; 374 } 375 376 loadQueue[loadTail] = load_inst; 377 378 incrLdIdx(loadTail); 379 380 ++loads; 381} 382 383template <class Impl> 384void 385LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 386{ 387 // Make sure it is not full before inserting an instruction. 388 assert((storeTail + 1) % SQEntries != storeHead); 389 assert(stores < SQEntries); 390 391 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 392 store_inst->pcState(), storeTail, store_inst->seqNum); 393 394 store_inst->sqIdx = storeTail; 395 store_inst->lqIdx = loadTail; 396 397 storeQueue[storeTail] = SQEntry(store_inst); 398 399 incrStIdx(storeTail); 400 401 ++stores; 402} 403 404template <class Impl> 405typename Impl::DynInstPtr 406LSQUnit<Impl>::getMemDepViolator() 407{ 408 DynInstPtr temp = memDepViolator; 409 410 memDepViolator = NULL; 411 412 return temp; 413} 414 415template <class Impl> 416unsigned 417LSQUnit<Impl>::numFreeEntries() 418{ 419 unsigned free_lq_entries = LQEntries - loads; 420 unsigned free_sq_entries = SQEntries - stores; 421 422 // Both the LQ and SQ entries have an extra dummy entry to differentiate 423 // empty/full conditions. Subtract 1 from the free entries. 424 if (free_lq_entries < free_sq_entries) { 425 return free_lq_entries - 1; 426 } else { 427 return free_sq_entries - 1; 428 } 429} 430 431template <class Impl> 432void 433LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 434{ 435 int load_idx = loadHead; 436 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 437 438 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 439 // address. The CPU can speculatively execute a LL operation after a pending 440 // SC operation in the pipeline and that can make the cache monitor the CPU 441 // is connected to valid while it really shouldn't be. 442 for (int x = 0; x < cpu->numContexts(); x++) { 443 ThreadContext *tc = cpu->getContext(x); 444 bool no_squash = cpu->thread[x]->noSquashFromTC; 445 cpu->thread[x]->noSquashFromTC = true; 446 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 447 cpu->thread[x]->noSquashFromTC = no_squash; 448 } 449 450 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 451 452 DynInstPtr ld_inst = loadQueue[load_idx]; 453 if (ld_inst) { 454 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 455 // Check that this snoop didn't just invalidate our lock flag 456 if (ld_inst->effAddrValid() && load_addr == invalidate_addr && 457 ld_inst->memReqFlags & Request::LLSC) 458 TheISA::handleLockedSnoopHit(ld_inst.get()); 459 } 460 461 // If this is the only load in the LSQ we don't care 462 if (load_idx == loadTail) 463 return; 464 465 incrLdIdx(load_idx); 466 467 bool force_squash = false; 468 469 while (load_idx != loadTail) { 470 DynInstPtr ld_inst = loadQueue[load_idx]; 471 472 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 473 incrLdIdx(load_idx); 474 continue; 475 } 476 477 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 478 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 479 ld_inst->seqNum, load_addr, invalidate_addr); 480 481 if (load_addr == invalidate_addr || force_squash) { 482 if (needsTSO) { 483 // If we have a TSO system, as all loads must be ordered with 484 // all other loads, this load as well as *all* subsequent loads 485 // need to be squashed to prevent possible load reordering. 486 force_squash = true; 487 } 488 if (ld_inst->possibleLoadViolation() || force_squash) { 489 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 490 pkt->getAddr(), ld_inst->seqNum); 491 492 // Mark the load for re-execution 493 ld_inst->fault = new ReExec; 494 } else { 495 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 496 pkt->getAddr(), ld_inst->seqNum); 497 498 // Make sure that we don't lose a snoop hitting a LOCKED 499 // address since the LOCK* flags don't get updated until 500 // commit. 501 if (ld_inst->memReqFlags & Request::LLSC) 502 TheISA::handleLockedSnoopHit(ld_inst.get()); 503 504 // If a older load checks this and it's true 505 // then we might have missed the snoop 506 // in which case we need to invalidate to be sure 507 ld_inst->hitExternalSnoop(true); 508 } 509 } 510 incrLdIdx(load_idx); 511 } 512 return; 513} 514 515template <class Impl> 516Fault 517LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 518{ 519 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 520 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 521 522 /** @todo in theory you only need to check an instruction that has executed 523 * however, there isn't a good way in the pipeline at the moment to check 524 * all instructions that will execute before the store writes back. Thus, 525 * like the implementation that came before it, we're overly conservative. 526 */ 527 while (load_idx != loadTail) { 528 DynInstPtr ld_inst = loadQueue[load_idx]; 529 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 530 incrLdIdx(load_idx); 531 continue; 532 } 533 534 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 535 Addr ld_eff_addr2 = 536 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 537 538 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 539 if (inst->isLoad()) { 540 // If this load is to the same block as an external snoop 541 // invalidate that we've observed then the load needs to be 542 // squashed as it could have newer data 543 if (ld_inst->hitExternalSnoop()) { 544 if (!memDepViolator || 545 ld_inst->seqNum < memDepViolator->seqNum) { 546 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 547 "and [sn:%lli] at address %#x\n", 548 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 549 memDepViolator = ld_inst; 550 551 ++lsqMemOrderViolation; 552 553 return new GenericISA::M5PanicFault( 554 "Detected fault with inst [sn:%lli] and " 555 "[sn:%lli] at address %#x\n", 556 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 557 } 558 } 559 560 // Otherwise, mark the load has a possible load violation 561 // and if we see a snoop before it's commited, we need to squash 562 ld_inst->possibleLoadViolation(true); 563 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 564 " between instructions [sn:%lli] and [sn:%lli]\n", 565 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 566 } else { 567 // A load/store incorrectly passed this store. 568 // Check if we already have a violator, or if it's newer 569 // squash and refetch. 570 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 571 break; 572 573 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 574 "[sn:%lli] at address %#x\n", 575 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 576 memDepViolator = ld_inst; 577 578 ++lsqMemOrderViolation; 579 580 return new GenericISA::M5PanicFault("Detected fault with " 581 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 582 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 583 } 584 } 585 586 incrLdIdx(load_idx); 587 } 588 return NoFault; 589} 590 591 592 593 594template <class Impl> 595Fault 596LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 597{ 598 using namespace TheISA; 599 // Execute a specific load. 600 Fault load_fault = NoFault; 601 602 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 603 inst->pcState(), inst->seqNum); 604 605 assert(!inst->isSquashed()); 606 607 load_fault = inst->initiateAcc(); 608 609 if (inst->isTranslationDelayed() && 610 load_fault == NoFault) 611 return load_fault; 612 613 // If the instruction faulted or predicated false, then we need to send it 614 // along to commit without the instruction completing. 615 if (load_fault != NoFault || inst->readPredicate() == false) { 616 // Send this instruction to commit, also make sure iew stage 617 // realizes there is activity. 618 // Mark it as executed unless it is an uncached load that 619 // needs to hit the head of commit. 620 if (inst->readPredicate() == false) 621 inst->forwardOldRegs(); 622 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 623 inst->seqNum, 624 (load_fault != NoFault ? "fault" : "predication")); 625 if (!(inst->hasRequest() && inst->uncacheable()) || 626 inst->isAtCommit()) { 627 inst->setExecuted(); 628 } 629 iewStage->instToCommit(inst); 630 iewStage->activityThisCycle(); 631 } else if (!loadBlocked()) { 632 assert(inst->effAddrValid()); 633 int load_idx = inst->lqIdx; 634 incrLdIdx(load_idx); 635 636 if (checkLoads) 637 return checkViolations(load_idx, inst); 638 } 639 640 return load_fault; 641} 642 643template <class Impl> 644Fault 645LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 646{ 647 using namespace TheISA; 648 // Make sure that a store exists. 649 assert(stores != 0); 650 651 int store_idx = store_inst->sqIdx; 652 653 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 654 store_inst->pcState(), store_inst->seqNum); 655 656 assert(!store_inst->isSquashed()); 657 658 // Check the recently completed loads to see if any match this store's 659 // address. If so, then we have a memory ordering violation. 660 int load_idx = store_inst->lqIdx; 661 662 Fault store_fault = store_inst->initiateAcc(); 663 664 if (store_inst->isTranslationDelayed() && 665 store_fault == NoFault) 666 return store_fault; 667 668 if (store_inst->readPredicate() == false) 669 store_inst->forwardOldRegs(); 670 671 if (storeQueue[store_idx].size == 0) { 672 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 673 store_inst->pcState(), store_inst->seqNum); 674 675 return store_fault; 676 } else if (store_inst->readPredicate() == false) { 677 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 678 store_inst->seqNum); 679 return store_fault; 680 } 681 682 assert(store_fault == NoFault); 683 684 if (store_inst->isStoreConditional()) { 685 // Store conditionals need to set themselves as able to 686 // writeback if we haven't had a fault by here. 687 storeQueue[store_idx].canWB = true; 688 689 ++storesToWB; 690 } 691 692 return checkViolations(load_idx, store_inst); 693 694} 695 696template <class Impl> 697void 698LSQUnit<Impl>::commitLoad() 699{ 700 assert(loadQueue[loadHead]); 701 702 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 703 loadQueue[loadHead]->pcState()); 704 705 loadQueue[loadHead] = NULL; 706 707 incrLdIdx(loadHead); 708 709 --loads; 710} 711 712template <class Impl> 713void 714LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 715{ 716 assert(loads == 0 || loadQueue[loadHead]); 717 718 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 719 commitLoad(); 720 } 721} 722 723template <class Impl> 724void 725LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 726{ 727 assert(stores == 0 || storeQueue[storeHead].inst); 728 729 int store_idx = storeHead; 730 731 while (store_idx != storeTail) { 732 assert(storeQueue[store_idx].inst); 733 // Mark any stores that are now committed and have not yet 734 // been marked as able to write back. 735 if (!storeQueue[store_idx].canWB) { 736 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 737 break; 738 } 739 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 740 "%s [sn:%lli]\n", 741 storeQueue[store_idx].inst->pcState(), 742 storeQueue[store_idx].inst->seqNum); 743 744 storeQueue[store_idx].canWB = true; 745 746 ++storesToWB; 747 } 748 749 incrStIdx(store_idx); 750 } 751} 752 753template <class Impl> 754void 755LSQUnit<Impl>::writebackPendingStore() 756{ 757 if (hasPendingPkt) { 758 assert(pendingPkt != NULL); 759 760 // If the cache is blocked, this will store the packet for retry. 761 if (sendStore(pendingPkt)) { 762 storePostSend(pendingPkt); 763 } 764 pendingPkt = NULL; 765 hasPendingPkt = false; 766 } 767} 768 769template <class Impl> 770void 771LSQUnit<Impl>::writebackStores() 772{ 773 // First writeback the second packet from any split store that didn't 774 // complete last cycle because there weren't enough cache ports available. 775 if (TheISA::HasUnalignedMemAcc) { 776 writebackPendingStore(); 777 } 778 779 while (storesToWB > 0 && 780 storeWBIdx != storeTail && 781 storeQueue[storeWBIdx].inst && 782 storeQueue[storeWBIdx].canWB && 783 ((!needsTSO) || (!storeInFlight)) && 784 usedPorts < cachePorts) { 785 786 if (isStoreBlocked || lsq->cacheBlocked()) { 787 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 788 " is blocked!\n"); 789 break; 790 } 791 792 // Store didn't write any data so no need to write it back to 793 // memory. 794 if (storeQueue[storeWBIdx].size == 0) { 795 completeStore(storeWBIdx); 796 797 incrStIdx(storeWBIdx); 798 799 continue; 800 } 801 802 ++usedPorts; 803 804 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 805 incrStIdx(storeWBIdx); 806 807 continue; 808 } 809 810 assert(storeQueue[storeWBIdx].req); 811 assert(!storeQueue[storeWBIdx].committed); 812 813 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 814 assert(storeQueue[storeWBIdx].sreqLow); 815 assert(storeQueue[storeWBIdx].sreqHigh); 816 } 817 818 DynInstPtr inst = storeQueue[storeWBIdx].inst; 819 820 Request *req = storeQueue[storeWBIdx].req; 821 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 822 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 823 824 storeQueue[storeWBIdx].committed = true; 825 826 assert(!inst->memData); 827 inst->memData = new uint8_t[req->getSize()]; 828 829 if (storeQueue[storeWBIdx].isAllZeros) 830 memset(inst->memData, 0, req->getSize()); 831 else 832 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 833 834 MemCmd command = 835 req->isSwap() ? MemCmd::SwapReq : 836 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 837 PacketPtr data_pkt; 838 PacketPtr snd_data_pkt = NULL; 839 840 LSQSenderState *state = new LSQSenderState; 841 state->isLoad = false; 842 state->idx = storeWBIdx; 843 state->inst = inst; 844 845 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 846 847 // Build a single data packet if the store isn't split. 848 data_pkt = new Packet(req, command); 849 data_pkt->dataStatic(inst->memData); 850 data_pkt->senderState = state; 851 } else { 852 // Create two packets if the store is split in two. 853 data_pkt = new Packet(sreqLow, command); 854 snd_data_pkt = new Packet(sreqHigh, command); 855 856 data_pkt->dataStatic(inst->memData); 857 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 858 859 data_pkt->senderState = state; 860 snd_data_pkt->senderState = state; 861 862 state->isSplit = true; 863 state->outstanding = 2; 864 865 // Can delete the main request now. 866 delete req; 867 req = sreqLow; 868 } 869 870 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 871 "to Addr:%#x, data:%#x [sn:%lli]\n", 872 storeWBIdx, inst->pcState(), 873 req->getPaddr(), (int)*(inst->memData), 874 inst->seqNum); 875 876 // @todo: Remove this SC hack once the memory system handles it. 877 if (inst->isStoreConditional()) { 878 assert(!storeQueue[storeWBIdx].isSplit); 879 // Disable recording the result temporarily. Writing to 880 // misc regs normally updates the result, but this is not 881 // the desired behavior when handling store conditionals. 882 inst->recordResult(false); 883 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); 884 inst->recordResult(true); 885 886 if (!success) { 887 // Instantly complete this store. 888 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 889 "Instantly completing it.\n", 890 inst->seqNum); 891 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 892 cpu->schedule(wb, curTick() + 1); 893 if (cpu->checker) { 894 // Make sure to set the LLSC data for verification 895 // if checker is loaded 896 inst->reqToVerify->setExtraData(0); 897 inst->completeAcc(data_pkt); 898 } 899 completeStore(storeWBIdx); 900 incrStIdx(storeWBIdx); 901 continue; 902 } 903 } else { 904 // Non-store conditionals do not need a writeback. 905 state->noWB = true; 906 } 907 908 bool split = 909 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 910 911 ThreadContext *thread = cpu->tcBase(lsqID); 912 913 if (req->isMmappedIpr()) { 914 assert(!inst->isStoreConditional()); 915 TheISA::handleIprWrite(thread, data_pkt); 916 delete data_pkt; 917 if (split) { 918 assert(snd_data_pkt->req->isMmappedIpr()); 919 TheISA::handleIprWrite(thread, snd_data_pkt); 920 delete snd_data_pkt; 921 delete sreqLow; 922 delete sreqHigh; 923 } 924 delete state; 925 delete req; 926 completeStore(storeWBIdx); 927 incrStIdx(storeWBIdx); 928 } else if (!sendStore(data_pkt)) { 929 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 930 "retry later\n", 931 inst->seqNum); 932 933 // Need to store the second packet, if split. 934 if (split) { 935 state->pktToSend = true; 936 state->pendingPacket = snd_data_pkt; 937 } 938 } else { 939 940 // If split, try to send the second packet too 941 if (split) { 942 assert(snd_data_pkt); 943 944 // Ensure there are enough ports to use. 945 if (usedPorts < cachePorts) { 946 ++usedPorts; 947 if (sendStore(snd_data_pkt)) { 948 storePostSend(snd_data_pkt); 949 } else { 950 DPRINTF(IEW, "D-Cache became blocked when writing" 951 " [sn:%lli] second packet, will retry later\n", 952 inst->seqNum); 953 } 954 } else { 955 956 // Store the packet for when there's free ports. 957 assert(pendingPkt == NULL); 958 pendingPkt = snd_data_pkt; 959 hasPendingPkt = true; 960 } 961 } else { 962 963 // Not a split store. 964 storePostSend(data_pkt); 965 } 966 } 967 } 968 969 // Not sure this should set it to 0. 970 usedPorts = 0; 971 972 assert(stores >= 0 && storesToWB >= 0); 973} 974 975/*template <class Impl> 976void 977LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 978{ 979 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 980 mshrSeqNums.end(), 981 seqNum); 982 983 if (mshr_it != mshrSeqNums.end()) { 984 mshrSeqNums.erase(mshr_it); 985 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 986 } 987}*/ 988 989template <class Impl> 990void 991LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 992{ 993 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 994 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 995 996 int load_idx = loadTail; 997 decrLdIdx(load_idx); 998 999 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 1000 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 1001 "[sn:%lli]\n", 1002 loadQueue[load_idx]->pcState(), 1003 loadQueue[load_idx]->seqNum); 1004 1005 if (isStalled() && load_idx == stallingLoadIdx) { 1006 stalled = false; 1007 stallingStoreIsn = 0; 1008 stallingLoadIdx = 0; 1009 } 1010 1011 // Clear the smart pointer to make sure it is decremented. 1012 loadQueue[load_idx]->setSquashed(); 1013 loadQueue[load_idx] = NULL; 1014 --loads; 1015 1016 // Inefficient! 1017 loadTail = load_idx; 1018 1019 decrLdIdx(load_idx); 1020 ++lsqSquashedLoads; 1021 } 1022 1023 if (isLoadBlocked) { 1024 if (squashed_num < blockedLoadSeqNum) { 1025 isLoadBlocked = false; 1026 loadBlockedHandled = false; 1027 blockedLoadSeqNum = 0; 1028 } 1029 } 1030 1031 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1032 memDepViolator = NULL; 1033 } 1034 1035 int store_idx = storeTail; 1036 decrStIdx(store_idx); 1037 1038 while (stores != 0 && 1039 storeQueue[store_idx].inst->seqNum > squashed_num) { 1040 // Instructions marked as can WB are already committed. 1041 if (storeQueue[store_idx].canWB) { 1042 break; 1043 } 1044 1045 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1046 "idx:%i [sn:%lli]\n", 1047 storeQueue[store_idx].inst->pcState(), 1048 store_idx, storeQueue[store_idx].inst->seqNum); 1049 1050 // I don't think this can happen. It should have been cleared 1051 // by the stalling load. 1052 if (isStalled() && 1053 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1054 panic("Is stalled should have been cleared by stalling load!\n"); 1055 stalled = false; 1056 stallingStoreIsn = 0; 1057 } 1058 1059 // Clear the smart pointer to make sure it is decremented. 1060 storeQueue[store_idx].inst->setSquashed(); 1061 storeQueue[store_idx].inst = NULL; 1062 storeQueue[store_idx].canWB = 0; 1063 1064 // Must delete request now that it wasn't handed off to 1065 // memory. This is quite ugly. @todo: Figure out the proper 1066 // place to really handle request deletes. 1067 delete storeQueue[store_idx].req; 1068 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1069 delete storeQueue[store_idx].sreqLow; 1070 delete storeQueue[store_idx].sreqHigh; 1071 1072 storeQueue[store_idx].sreqLow = NULL; 1073 storeQueue[store_idx].sreqHigh = NULL; 1074 } 1075 1076 storeQueue[store_idx].req = NULL; 1077 --stores; 1078 1079 // Inefficient! 1080 storeTail = store_idx; 1081 1082 decrStIdx(store_idx); 1083 ++lsqSquashedStores; 1084 } 1085} 1086 1087template <class Impl> 1088void 1089LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1090{ 1091 if (isStalled() && 1092 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1093 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1094 "load idx:%i\n", 1095 stallingStoreIsn, stallingLoadIdx); 1096 stalled = false; 1097 stallingStoreIsn = 0; 1098 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1099 } 1100 1101 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1102 // The store is basically completed at this time. This 1103 // only works so long as the checker doesn't try to 1104 // verify the value in memory for stores. 1105 storeQueue[storeWBIdx].inst->setCompleted(); 1106 1107 if (cpu->checker) { 1108 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1109 } 1110 } 1111 1112 if (needsTSO) { 1113 storeInFlight = true; 1114 } 1115 1116 incrStIdx(storeWBIdx); 1117} 1118 1119template <class Impl> 1120void 1121LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1122{ 1123 iewStage->wakeCPU(); 1124 1125 // Squashed instructions do not need to complete their access. 1126 if (inst->isSquashed()) { 1127 iewStage->decrWb(inst->seqNum); 1128 assert(!inst->isStore()); 1129 ++lsqIgnoredResponses; 1130 return; 1131 } 1132 1133 if (!inst->isExecuted()) { 1134 inst->setExecuted(); 1135 1136 // Complete access to copy data to proper place. 1137 inst->completeAcc(pkt); 1138 } 1139 1140 // Need to insert instruction into queue to commit 1141 iewStage->instToCommit(inst); 1142 1143 iewStage->activityThisCycle(); 1144 1145 // see if this load changed the PC 1146 iewStage->checkMisprediction(inst); 1147} 1148 1149template <class Impl> 1150void 1151LSQUnit<Impl>::completeStore(int store_idx) 1152{ 1153 assert(storeQueue[store_idx].inst); 1154 storeQueue[store_idx].completed = true; 1155 --storesToWB; 1156 // A bit conservative because a store completion may not free up entries, 1157 // but hopefully avoids two store completions in one cycle from making 1158 // the CPU tick twice. 1159 cpu->wakeCPU(); 1160 cpu->activityThisCycle(); 1161 1162 if (store_idx == storeHead) { 1163 do { 1164 incrStIdx(storeHead); 1165 1166 --stores; 1167 } while (storeQueue[storeHead].completed && 1168 storeHead != storeTail); 1169 1170 iewStage->updateLSQNextCycle = true; 1171 } 1172 1173 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1174 "idx:%i\n", 1175 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1176 1177#if TRACING_ON 1178 if (DTRACE(O3PipeView)) { 1179 storeQueue[store_idx].inst->storeTick = 1180 curTick() - storeQueue[store_idx].inst->fetchTick; 1181 } 1182#endif 1183 1184 if (isStalled() && 1185 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1186 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1187 "load idx:%i\n", 1188 stallingStoreIsn, stallingLoadIdx); 1189 stalled = false; 1190 stallingStoreIsn = 0; 1191 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1192 } 1193 1194 storeQueue[store_idx].inst->setCompleted(); 1195 1196 if (needsTSO) { 1197 storeInFlight = false; 1198 } 1199 1200 // Tell the checker we've completed this instruction. Some stores 1201 // may get reported twice to the checker, but the checker can 1202 // handle that case. 1203 if (cpu->checker) { 1204 cpu->checker->verify(storeQueue[store_idx].inst); 1205 } 1206} 1207 1208template <class Impl> 1209bool 1210LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1211{ 1212 if (!dcachePort->sendTimingReq(data_pkt)) { 1213 // Need to handle becoming blocked on a store. 1214 isStoreBlocked = true; 1215 ++lsqCacheBlocked; 1216 assert(retryPkt == NULL); 1217 retryPkt = data_pkt; 1218 lsq->setRetryTid(lsqID); 1219 return false; 1220 } 1221 return true; 1222} 1223 1224template <class Impl> 1225void 1226LSQUnit<Impl>::recvRetry() 1227{ 1228 if (isStoreBlocked) { 1229 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1230 assert(retryPkt != NULL); 1231 1232 LSQSenderState *state = 1233 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1234 1235 if (dcachePort->sendTimingReq(retryPkt)) { 1236 // Don't finish the store unless this is the last packet. 1237 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 1238 state->pendingPacket == retryPkt) { 1239 state->pktToSend = false; 1240 storePostSend(retryPkt); 1241 } 1242 retryPkt = NULL; 1243 isStoreBlocked = false; 1244 lsq->setRetryTid(InvalidThreadID); 1245 1246 // Send any outstanding packet. 1247 if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 1248 assert(state->pendingPacket); 1249 if (sendStore(state->pendingPacket)) { 1250 storePostSend(state->pendingPacket); 1251 } 1252 } 1253 } else { 1254 // Still blocked! 1255 ++lsqCacheBlocked; 1256 lsq->setRetryTid(lsqID); 1257 } 1258 } else if (isLoadBlocked) { 1259 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 1260 "no need to resend packet.\n"); 1261 } else { 1262 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 1263 } 1264} 1265 1266template <class Impl> 1267inline void 1268LSQUnit<Impl>::incrStIdx(int &store_idx) const 1269{ 1270 if (++store_idx >= SQEntries) 1271 store_idx = 0; 1272} 1273 1274template <class Impl> 1275inline void 1276LSQUnit<Impl>::decrStIdx(int &store_idx) const 1277{ 1278 if (--store_idx < 0) 1279 store_idx += SQEntries; 1280} 1281 1282template <class Impl> 1283inline void 1284LSQUnit<Impl>::incrLdIdx(int &load_idx) const 1285{ 1286 if (++load_idx >= LQEntries) 1287 load_idx = 0; 1288} 1289 1290template <class Impl> 1291inline void 1292LSQUnit<Impl>::decrLdIdx(int &load_idx) const 1293{ 1294 if (--load_idx < 0) 1295 load_idx += LQEntries; 1296} 1297 1298template <class Impl> 1299void 1300LSQUnit<Impl>::dumpInsts() const 1301{ 1302 cprintf("Load store queue: Dumping instructions.\n"); 1303 cprintf("Load queue size: %i\n", loads); 1304 cprintf("Load queue: "); 1305 1306 int load_idx = loadHead; 1307 1308 while (load_idx != loadTail && loadQueue[load_idx]) { 1309 const DynInstPtr &inst(loadQueue[load_idx]); 1310 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1311 1312 incrLdIdx(load_idx); 1313 } 1314 cprintf("\n"); 1315 1316 cprintf("Store queue size: %i\n", stores); 1317 cprintf("Store queue: "); 1318 1319 int store_idx = storeHead; 1320 1321 while (store_idx != storeTail && storeQueue[store_idx].inst) { 1322 const DynInstPtr &inst(storeQueue[store_idx].inst); 1323 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1324 1325 incrStIdx(store_idx); 1326 } 1327 1328 cprintf("\n"); 1329} 1330 1331#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
| 164 // Add 1 for the sentinel entry (they are circular queues). 165 LQEntries = maxLQEntries + 1; 166 SQEntries = maxSQEntries + 1; 167 168 //Due to uint8_t index in LSQSenderState 169 assert(LQEntries <= 256); 170 assert(SQEntries <= 256); 171 172 loadQueue.resize(LQEntries); 173 storeQueue.resize(SQEntries); 174 175 depCheckShift = params->LSQDepCheckShift; 176 checkLoads = params->LSQCheckLoads; 177 cachePorts = params->cachePorts; 178 needsTSO = params->needsTSO; 179 180 resetState(); 181} 182 183 184template<class Impl> 185void 186LSQUnit<Impl>::resetState() 187{ 188 loads = stores = storesToWB = 0; 189 190 loadHead = loadTail = 0; 191 192 storeHead = storeWBIdx = storeTail = 0; 193 194 usedPorts = 0; 195 196 retryPkt = NULL; 197 memDepViolator = NULL; 198 199 blockedLoadSeqNum = 0; 200 201 stalled = false; 202 isLoadBlocked = false; 203 loadBlockedHandled = false; 204 205 cacheBlockMask = ~(cpu->cacheLineSize() - 1); 206} 207 208template<class Impl> 209std::string 210LSQUnit<Impl>::name() const 211{ 212 if (Impl::MaxThreads == 1) { 213 return iewStage->name() + ".lsq"; 214 } else { 215 return iewStage->name() + ".lsq.thread" + to_string(lsqID); 216 } 217} 218 219template<class Impl> 220void 221LSQUnit<Impl>::regStats() 222{ 223 lsqForwLoads 224 .name(name() + ".forwLoads") 225 .desc("Number of loads that had data forwarded from stores"); 226 227 invAddrLoads 228 .name(name() + ".invAddrLoads") 229 .desc("Number of loads ignored due to an invalid address"); 230 231 lsqSquashedLoads 232 .name(name() + ".squashedLoads") 233 .desc("Number of loads squashed"); 234 235 lsqIgnoredResponses 236 .name(name() + ".ignoredResponses") 237 .desc("Number of memory responses ignored because the instruction is squashed"); 238 239 lsqMemOrderViolation 240 .name(name() + ".memOrderViolation") 241 .desc("Number of memory ordering violations"); 242 243 lsqSquashedStores 244 .name(name() + ".squashedStores") 245 .desc("Number of stores squashed"); 246 247 invAddrSwpfs 248 .name(name() + ".invAddrSwpfs") 249 .desc("Number of software prefetches ignored due to an invalid address"); 250 251 lsqBlockedLoads 252 .name(name() + ".blockedLoads") 253 .desc("Number of blocked loads due to partial load-store forwarding"); 254 255 lsqRescheduledLoads 256 .name(name() + ".rescheduledLoads") 257 .desc("Number of loads that were rescheduled"); 258 259 lsqCacheBlocked 260 .name(name() + ".cacheBlocked") 261 .desc("Number of times an access to memory failed due to the cache being blocked"); 262} 263 264template<class Impl> 265void 266LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 267{ 268 dcachePort = dcache_port; 269} 270 271template<class Impl> 272void 273LSQUnit<Impl>::clearLQ() 274{ 275 loadQueue.clear(); 276} 277 278template<class Impl> 279void 280LSQUnit<Impl>::clearSQ() 281{ 282 storeQueue.clear(); 283} 284 285template<class Impl> 286void 287LSQUnit<Impl>::drainSanityCheck() const 288{ 289 for (int i = 0; i < loadQueue.size(); ++i) 290 assert(!loadQueue[i]); 291 292 assert(storesToWB == 0); 293 assert(!retryPkt); 294} 295 296template<class Impl> 297void 298LSQUnit<Impl>::takeOverFrom() 299{ 300 resetState(); 301} 302 303template<class Impl> 304void 305LSQUnit<Impl>::resizeLQ(unsigned size) 306{ 307 unsigned size_plus_sentinel = size + 1; 308 assert(size_plus_sentinel >= LQEntries); 309 310 if (size_plus_sentinel > LQEntries) { 311 while (size_plus_sentinel > loadQueue.size()) { 312 DynInstPtr dummy; 313 loadQueue.push_back(dummy); 314 LQEntries++; 315 } 316 } else { 317 LQEntries = size_plus_sentinel; 318 } 319 320 assert(LQEntries <= 256); 321} 322 323template<class Impl> 324void 325LSQUnit<Impl>::resizeSQ(unsigned size) 326{ 327 unsigned size_plus_sentinel = size + 1; 328 if (size_plus_sentinel > SQEntries) { 329 while (size_plus_sentinel > storeQueue.size()) { 330 SQEntry dummy; 331 storeQueue.push_back(dummy); 332 SQEntries++; 333 } 334 } else { 335 SQEntries = size_plus_sentinel; 336 } 337 338 assert(SQEntries <= 256); 339} 340 341template <class Impl> 342void 343LSQUnit<Impl>::insert(DynInstPtr &inst) 344{ 345 assert(inst->isMemRef()); 346 347 assert(inst->isLoad() || inst->isStore()); 348 349 if (inst->isLoad()) { 350 insertLoad(inst); 351 } else { 352 insertStore(inst); 353 } 354 355 inst->setInLSQ(); 356} 357 358template <class Impl> 359void 360LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 361{ 362 assert((loadTail + 1) % LQEntries != loadHead); 363 assert(loads < LQEntries); 364 365 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 366 load_inst->pcState(), loadTail, load_inst->seqNum); 367 368 load_inst->lqIdx = loadTail; 369 370 if (stores == 0) { 371 load_inst->sqIdx = -1; 372 } else { 373 load_inst->sqIdx = storeTail; 374 } 375 376 loadQueue[loadTail] = load_inst; 377 378 incrLdIdx(loadTail); 379 380 ++loads; 381} 382 383template <class Impl> 384void 385LSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 386{ 387 // Make sure it is not full before inserting an instruction. 388 assert((storeTail + 1) % SQEntries != storeHead); 389 assert(stores < SQEntries); 390 391 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 392 store_inst->pcState(), storeTail, store_inst->seqNum); 393 394 store_inst->sqIdx = storeTail; 395 store_inst->lqIdx = loadTail; 396 397 storeQueue[storeTail] = SQEntry(store_inst); 398 399 incrStIdx(storeTail); 400 401 ++stores; 402} 403 404template <class Impl> 405typename Impl::DynInstPtr 406LSQUnit<Impl>::getMemDepViolator() 407{ 408 DynInstPtr temp = memDepViolator; 409 410 memDepViolator = NULL; 411 412 return temp; 413} 414 415template <class Impl> 416unsigned 417LSQUnit<Impl>::numFreeEntries() 418{ 419 unsigned free_lq_entries = LQEntries - loads; 420 unsigned free_sq_entries = SQEntries - stores; 421 422 // Both the LQ and SQ entries have an extra dummy entry to differentiate 423 // empty/full conditions. Subtract 1 from the free entries. 424 if (free_lq_entries < free_sq_entries) { 425 return free_lq_entries - 1; 426 } else { 427 return free_sq_entries - 1; 428 } 429} 430 431template <class Impl> 432void 433LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 434{ 435 int load_idx = loadHead; 436 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 437 438 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 439 // address. The CPU can speculatively execute a LL operation after a pending 440 // SC operation in the pipeline and that can make the cache monitor the CPU 441 // is connected to valid while it really shouldn't be. 442 for (int x = 0; x < cpu->numContexts(); x++) { 443 ThreadContext *tc = cpu->getContext(x); 444 bool no_squash = cpu->thread[x]->noSquashFromTC; 445 cpu->thread[x]->noSquashFromTC = true; 446 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 447 cpu->thread[x]->noSquashFromTC = no_squash; 448 } 449 450 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 451 452 DynInstPtr ld_inst = loadQueue[load_idx]; 453 if (ld_inst) { 454 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 455 // Check that this snoop didn't just invalidate our lock flag 456 if (ld_inst->effAddrValid() && load_addr == invalidate_addr && 457 ld_inst->memReqFlags & Request::LLSC) 458 TheISA::handleLockedSnoopHit(ld_inst.get()); 459 } 460 461 // If this is the only load in the LSQ we don't care 462 if (load_idx == loadTail) 463 return; 464 465 incrLdIdx(load_idx); 466 467 bool force_squash = false; 468 469 while (load_idx != loadTail) { 470 DynInstPtr ld_inst = loadQueue[load_idx]; 471 472 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 473 incrLdIdx(load_idx); 474 continue; 475 } 476 477 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 478 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 479 ld_inst->seqNum, load_addr, invalidate_addr); 480 481 if (load_addr == invalidate_addr || force_squash) { 482 if (needsTSO) { 483 // If we have a TSO system, as all loads must be ordered with 484 // all other loads, this load as well as *all* subsequent loads 485 // need to be squashed to prevent possible load reordering. 486 force_squash = true; 487 } 488 if (ld_inst->possibleLoadViolation() || force_squash) { 489 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 490 pkt->getAddr(), ld_inst->seqNum); 491 492 // Mark the load for re-execution 493 ld_inst->fault = new ReExec; 494 } else { 495 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 496 pkt->getAddr(), ld_inst->seqNum); 497 498 // Make sure that we don't lose a snoop hitting a LOCKED 499 // address since the LOCK* flags don't get updated until 500 // commit. 501 if (ld_inst->memReqFlags & Request::LLSC) 502 TheISA::handleLockedSnoopHit(ld_inst.get()); 503 504 // If a older load checks this and it's true 505 // then we might have missed the snoop 506 // in which case we need to invalidate to be sure 507 ld_inst->hitExternalSnoop(true); 508 } 509 } 510 incrLdIdx(load_idx); 511 } 512 return; 513} 514 515template <class Impl> 516Fault 517LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 518{ 519 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 520 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 521 522 /** @todo in theory you only need to check an instruction that has executed 523 * however, there isn't a good way in the pipeline at the moment to check 524 * all instructions that will execute before the store writes back. Thus, 525 * like the implementation that came before it, we're overly conservative. 526 */ 527 while (load_idx != loadTail) { 528 DynInstPtr ld_inst = loadQueue[load_idx]; 529 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 530 incrLdIdx(load_idx); 531 continue; 532 } 533 534 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 535 Addr ld_eff_addr2 = 536 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 537 538 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 539 if (inst->isLoad()) { 540 // If this load is to the same block as an external snoop 541 // invalidate that we've observed then the load needs to be 542 // squashed as it could have newer data 543 if (ld_inst->hitExternalSnoop()) { 544 if (!memDepViolator || 545 ld_inst->seqNum < memDepViolator->seqNum) { 546 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 547 "and [sn:%lli] at address %#x\n", 548 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 549 memDepViolator = ld_inst; 550 551 ++lsqMemOrderViolation; 552 553 return new GenericISA::M5PanicFault( 554 "Detected fault with inst [sn:%lli] and " 555 "[sn:%lli] at address %#x\n", 556 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 557 } 558 } 559 560 // Otherwise, mark the load has a possible load violation 561 // and if we see a snoop before it's commited, we need to squash 562 ld_inst->possibleLoadViolation(true); 563 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 564 " between instructions [sn:%lli] and [sn:%lli]\n", 565 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 566 } else { 567 // A load/store incorrectly passed this store. 568 // Check if we already have a violator, or if it's newer 569 // squash and refetch. 570 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 571 break; 572 573 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 574 "[sn:%lli] at address %#x\n", 575 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 576 memDepViolator = ld_inst; 577 578 ++lsqMemOrderViolation; 579 580 return new GenericISA::M5PanicFault("Detected fault with " 581 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 582 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 583 } 584 } 585 586 incrLdIdx(load_idx); 587 } 588 return NoFault; 589} 590 591 592 593 594template <class Impl> 595Fault 596LSQUnit<Impl>::executeLoad(DynInstPtr &inst) 597{ 598 using namespace TheISA; 599 // Execute a specific load. 600 Fault load_fault = NoFault; 601 602 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 603 inst->pcState(), inst->seqNum); 604 605 assert(!inst->isSquashed()); 606 607 load_fault = inst->initiateAcc(); 608 609 if (inst->isTranslationDelayed() && 610 load_fault == NoFault) 611 return load_fault; 612 613 // If the instruction faulted or predicated false, then we need to send it 614 // along to commit without the instruction completing. 615 if (load_fault != NoFault || inst->readPredicate() == false) { 616 // Send this instruction to commit, also make sure iew stage 617 // realizes there is activity. 618 // Mark it as executed unless it is an uncached load that 619 // needs to hit the head of commit. 620 if (inst->readPredicate() == false) 621 inst->forwardOldRegs(); 622 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 623 inst->seqNum, 624 (load_fault != NoFault ? "fault" : "predication")); 625 if (!(inst->hasRequest() && inst->uncacheable()) || 626 inst->isAtCommit()) { 627 inst->setExecuted(); 628 } 629 iewStage->instToCommit(inst); 630 iewStage->activityThisCycle(); 631 } else if (!loadBlocked()) { 632 assert(inst->effAddrValid()); 633 int load_idx = inst->lqIdx; 634 incrLdIdx(load_idx); 635 636 if (checkLoads) 637 return checkViolations(load_idx, inst); 638 } 639 640 return load_fault; 641} 642 643template <class Impl> 644Fault 645LSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 646{ 647 using namespace TheISA; 648 // Make sure that a store exists. 649 assert(stores != 0); 650 651 int store_idx = store_inst->sqIdx; 652 653 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 654 store_inst->pcState(), store_inst->seqNum); 655 656 assert(!store_inst->isSquashed()); 657 658 // Check the recently completed loads to see if any match this store's 659 // address. If so, then we have a memory ordering violation. 660 int load_idx = store_inst->lqIdx; 661 662 Fault store_fault = store_inst->initiateAcc(); 663 664 if (store_inst->isTranslationDelayed() && 665 store_fault == NoFault) 666 return store_fault; 667 668 if (store_inst->readPredicate() == false) 669 store_inst->forwardOldRegs(); 670 671 if (storeQueue[store_idx].size == 0) { 672 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 673 store_inst->pcState(), store_inst->seqNum); 674 675 return store_fault; 676 } else if (store_inst->readPredicate() == false) { 677 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 678 store_inst->seqNum); 679 return store_fault; 680 } 681 682 assert(store_fault == NoFault); 683 684 if (store_inst->isStoreConditional()) { 685 // Store conditionals need to set themselves as able to 686 // writeback if we haven't had a fault by here. 687 storeQueue[store_idx].canWB = true; 688 689 ++storesToWB; 690 } 691 692 return checkViolations(load_idx, store_inst); 693 694} 695 696template <class Impl> 697void 698LSQUnit<Impl>::commitLoad() 699{ 700 assert(loadQueue[loadHead]); 701 702 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 703 loadQueue[loadHead]->pcState()); 704 705 loadQueue[loadHead] = NULL; 706 707 incrLdIdx(loadHead); 708 709 --loads; 710} 711 712template <class Impl> 713void 714LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 715{ 716 assert(loads == 0 || loadQueue[loadHead]); 717 718 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 719 commitLoad(); 720 } 721} 722 723template <class Impl> 724void 725LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 726{ 727 assert(stores == 0 || storeQueue[storeHead].inst); 728 729 int store_idx = storeHead; 730 731 while (store_idx != storeTail) { 732 assert(storeQueue[store_idx].inst); 733 // Mark any stores that are now committed and have not yet 734 // been marked as able to write back. 735 if (!storeQueue[store_idx].canWB) { 736 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 737 break; 738 } 739 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 740 "%s [sn:%lli]\n", 741 storeQueue[store_idx].inst->pcState(), 742 storeQueue[store_idx].inst->seqNum); 743 744 storeQueue[store_idx].canWB = true; 745 746 ++storesToWB; 747 } 748 749 incrStIdx(store_idx); 750 } 751} 752 753template <class Impl> 754void 755LSQUnit<Impl>::writebackPendingStore() 756{ 757 if (hasPendingPkt) { 758 assert(pendingPkt != NULL); 759 760 // If the cache is blocked, this will store the packet for retry. 761 if (sendStore(pendingPkt)) { 762 storePostSend(pendingPkt); 763 } 764 pendingPkt = NULL; 765 hasPendingPkt = false; 766 } 767} 768 769template <class Impl> 770void 771LSQUnit<Impl>::writebackStores() 772{ 773 // First writeback the second packet from any split store that didn't 774 // complete last cycle because there weren't enough cache ports available. 775 if (TheISA::HasUnalignedMemAcc) { 776 writebackPendingStore(); 777 } 778 779 while (storesToWB > 0 && 780 storeWBIdx != storeTail && 781 storeQueue[storeWBIdx].inst && 782 storeQueue[storeWBIdx].canWB && 783 ((!needsTSO) || (!storeInFlight)) && 784 usedPorts < cachePorts) { 785 786 if (isStoreBlocked || lsq->cacheBlocked()) { 787 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 788 " is blocked!\n"); 789 break; 790 } 791 792 // Store didn't write any data so no need to write it back to 793 // memory. 794 if (storeQueue[storeWBIdx].size == 0) { 795 completeStore(storeWBIdx); 796 797 incrStIdx(storeWBIdx); 798 799 continue; 800 } 801 802 ++usedPorts; 803 804 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 805 incrStIdx(storeWBIdx); 806 807 continue; 808 } 809 810 assert(storeQueue[storeWBIdx].req); 811 assert(!storeQueue[storeWBIdx].committed); 812 813 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 814 assert(storeQueue[storeWBIdx].sreqLow); 815 assert(storeQueue[storeWBIdx].sreqHigh); 816 } 817 818 DynInstPtr inst = storeQueue[storeWBIdx].inst; 819 820 Request *req = storeQueue[storeWBIdx].req; 821 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 822 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 823 824 storeQueue[storeWBIdx].committed = true; 825 826 assert(!inst->memData); 827 inst->memData = new uint8_t[req->getSize()]; 828 829 if (storeQueue[storeWBIdx].isAllZeros) 830 memset(inst->memData, 0, req->getSize()); 831 else 832 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 833 834 MemCmd command = 835 req->isSwap() ? MemCmd::SwapReq : 836 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 837 PacketPtr data_pkt; 838 PacketPtr snd_data_pkt = NULL; 839 840 LSQSenderState *state = new LSQSenderState; 841 state->isLoad = false; 842 state->idx = storeWBIdx; 843 state->inst = inst; 844 845 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 846 847 // Build a single data packet if the store isn't split. 848 data_pkt = new Packet(req, command); 849 data_pkt->dataStatic(inst->memData); 850 data_pkt->senderState = state; 851 } else { 852 // Create two packets if the store is split in two. 853 data_pkt = new Packet(sreqLow, command); 854 snd_data_pkt = new Packet(sreqHigh, command); 855 856 data_pkt->dataStatic(inst->memData); 857 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 858 859 data_pkt->senderState = state; 860 snd_data_pkt->senderState = state; 861 862 state->isSplit = true; 863 state->outstanding = 2; 864 865 // Can delete the main request now. 866 delete req; 867 req = sreqLow; 868 } 869 870 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 871 "to Addr:%#x, data:%#x [sn:%lli]\n", 872 storeWBIdx, inst->pcState(), 873 req->getPaddr(), (int)*(inst->memData), 874 inst->seqNum); 875 876 // @todo: Remove this SC hack once the memory system handles it. 877 if (inst->isStoreConditional()) { 878 assert(!storeQueue[storeWBIdx].isSplit); 879 // Disable recording the result temporarily. Writing to 880 // misc regs normally updates the result, but this is not 881 // the desired behavior when handling store conditionals. 882 inst->recordResult(false); 883 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); 884 inst->recordResult(true); 885 886 if (!success) { 887 // Instantly complete this store. 888 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 889 "Instantly completing it.\n", 890 inst->seqNum); 891 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 892 cpu->schedule(wb, curTick() + 1); 893 if (cpu->checker) { 894 // Make sure to set the LLSC data for verification 895 // if checker is loaded 896 inst->reqToVerify->setExtraData(0); 897 inst->completeAcc(data_pkt); 898 } 899 completeStore(storeWBIdx); 900 incrStIdx(storeWBIdx); 901 continue; 902 } 903 } else { 904 // Non-store conditionals do not need a writeback. 905 state->noWB = true; 906 } 907 908 bool split = 909 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 910 911 ThreadContext *thread = cpu->tcBase(lsqID); 912 913 if (req->isMmappedIpr()) { 914 assert(!inst->isStoreConditional()); 915 TheISA::handleIprWrite(thread, data_pkt); 916 delete data_pkt; 917 if (split) { 918 assert(snd_data_pkt->req->isMmappedIpr()); 919 TheISA::handleIprWrite(thread, snd_data_pkt); 920 delete snd_data_pkt; 921 delete sreqLow; 922 delete sreqHigh; 923 } 924 delete state; 925 delete req; 926 completeStore(storeWBIdx); 927 incrStIdx(storeWBIdx); 928 } else if (!sendStore(data_pkt)) { 929 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 930 "retry later\n", 931 inst->seqNum); 932 933 // Need to store the second packet, if split. 934 if (split) { 935 state->pktToSend = true; 936 state->pendingPacket = snd_data_pkt; 937 } 938 } else { 939 940 // If split, try to send the second packet too 941 if (split) { 942 assert(snd_data_pkt); 943 944 // Ensure there are enough ports to use. 945 if (usedPorts < cachePorts) { 946 ++usedPorts; 947 if (sendStore(snd_data_pkt)) { 948 storePostSend(snd_data_pkt); 949 } else { 950 DPRINTF(IEW, "D-Cache became blocked when writing" 951 " [sn:%lli] second packet, will retry later\n", 952 inst->seqNum); 953 } 954 } else { 955 956 // Store the packet for when there's free ports. 957 assert(pendingPkt == NULL); 958 pendingPkt = snd_data_pkt; 959 hasPendingPkt = true; 960 } 961 } else { 962 963 // Not a split store. 964 storePostSend(data_pkt); 965 } 966 } 967 } 968 969 // Not sure this should set it to 0. 970 usedPorts = 0; 971 972 assert(stores >= 0 && storesToWB >= 0); 973} 974 975/*template <class Impl> 976void 977LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 978{ 979 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 980 mshrSeqNums.end(), 981 seqNum); 982 983 if (mshr_it != mshrSeqNums.end()) { 984 mshrSeqNums.erase(mshr_it); 985 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 986 } 987}*/ 988 989template <class Impl> 990void 991LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 992{ 993 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 994 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 995 996 int load_idx = loadTail; 997 decrLdIdx(load_idx); 998 999 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 1000 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 1001 "[sn:%lli]\n", 1002 loadQueue[load_idx]->pcState(), 1003 loadQueue[load_idx]->seqNum); 1004 1005 if (isStalled() && load_idx == stallingLoadIdx) { 1006 stalled = false; 1007 stallingStoreIsn = 0; 1008 stallingLoadIdx = 0; 1009 } 1010 1011 // Clear the smart pointer to make sure it is decremented. 1012 loadQueue[load_idx]->setSquashed(); 1013 loadQueue[load_idx] = NULL; 1014 --loads; 1015 1016 // Inefficient! 1017 loadTail = load_idx; 1018 1019 decrLdIdx(load_idx); 1020 ++lsqSquashedLoads; 1021 } 1022 1023 if (isLoadBlocked) { 1024 if (squashed_num < blockedLoadSeqNum) { 1025 isLoadBlocked = false; 1026 loadBlockedHandled = false; 1027 blockedLoadSeqNum = 0; 1028 } 1029 } 1030 1031 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1032 memDepViolator = NULL; 1033 } 1034 1035 int store_idx = storeTail; 1036 decrStIdx(store_idx); 1037 1038 while (stores != 0 && 1039 storeQueue[store_idx].inst->seqNum > squashed_num) { 1040 // Instructions marked as can WB are already committed. 1041 if (storeQueue[store_idx].canWB) { 1042 break; 1043 } 1044 1045 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1046 "idx:%i [sn:%lli]\n", 1047 storeQueue[store_idx].inst->pcState(), 1048 store_idx, storeQueue[store_idx].inst->seqNum); 1049 1050 // I don't think this can happen. It should have been cleared 1051 // by the stalling load. 1052 if (isStalled() && 1053 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1054 panic("Is stalled should have been cleared by stalling load!\n"); 1055 stalled = false; 1056 stallingStoreIsn = 0; 1057 } 1058 1059 // Clear the smart pointer to make sure it is decremented. 1060 storeQueue[store_idx].inst->setSquashed(); 1061 storeQueue[store_idx].inst = NULL; 1062 storeQueue[store_idx].canWB = 0; 1063 1064 // Must delete request now that it wasn't handed off to 1065 // memory. This is quite ugly. @todo: Figure out the proper 1066 // place to really handle request deletes. 1067 delete storeQueue[store_idx].req; 1068 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1069 delete storeQueue[store_idx].sreqLow; 1070 delete storeQueue[store_idx].sreqHigh; 1071 1072 storeQueue[store_idx].sreqLow = NULL; 1073 storeQueue[store_idx].sreqHigh = NULL; 1074 } 1075 1076 storeQueue[store_idx].req = NULL; 1077 --stores; 1078 1079 // Inefficient! 1080 storeTail = store_idx; 1081 1082 decrStIdx(store_idx); 1083 ++lsqSquashedStores; 1084 } 1085} 1086 1087template <class Impl> 1088void 1089LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1090{ 1091 if (isStalled() && 1092 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1093 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1094 "load idx:%i\n", 1095 stallingStoreIsn, stallingLoadIdx); 1096 stalled = false; 1097 stallingStoreIsn = 0; 1098 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1099 } 1100 1101 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1102 // The store is basically completed at this time. This 1103 // only works so long as the checker doesn't try to 1104 // verify the value in memory for stores. 1105 storeQueue[storeWBIdx].inst->setCompleted(); 1106 1107 if (cpu->checker) { 1108 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1109 } 1110 } 1111 1112 if (needsTSO) { 1113 storeInFlight = true; 1114 } 1115 1116 incrStIdx(storeWBIdx); 1117} 1118 1119template <class Impl> 1120void 1121LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1122{ 1123 iewStage->wakeCPU(); 1124 1125 // Squashed instructions do not need to complete their access. 1126 if (inst->isSquashed()) { 1127 iewStage->decrWb(inst->seqNum); 1128 assert(!inst->isStore()); 1129 ++lsqIgnoredResponses; 1130 return; 1131 } 1132 1133 if (!inst->isExecuted()) { 1134 inst->setExecuted(); 1135 1136 // Complete access to copy data to proper place. 1137 inst->completeAcc(pkt); 1138 } 1139 1140 // Need to insert instruction into queue to commit 1141 iewStage->instToCommit(inst); 1142 1143 iewStage->activityThisCycle(); 1144 1145 // see if this load changed the PC 1146 iewStage->checkMisprediction(inst); 1147} 1148 1149template <class Impl> 1150void 1151LSQUnit<Impl>::completeStore(int store_idx) 1152{ 1153 assert(storeQueue[store_idx].inst); 1154 storeQueue[store_idx].completed = true; 1155 --storesToWB; 1156 // A bit conservative because a store completion may not free up entries, 1157 // but hopefully avoids two store completions in one cycle from making 1158 // the CPU tick twice. 1159 cpu->wakeCPU(); 1160 cpu->activityThisCycle(); 1161 1162 if (store_idx == storeHead) { 1163 do { 1164 incrStIdx(storeHead); 1165 1166 --stores; 1167 } while (storeQueue[storeHead].completed && 1168 storeHead != storeTail); 1169 1170 iewStage->updateLSQNextCycle = true; 1171 } 1172 1173 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1174 "idx:%i\n", 1175 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1176 1177#if TRACING_ON 1178 if (DTRACE(O3PipeView)) { 1179 storeQueue[store_idx].inst->storeTick = 1180 curTick() - storeQueue[store_idx].inst->fetchTick; 1181 } 1182#endif 1183 1184 if (isStalled() && 1185 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1186 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1187 "load idx:%i\n", 1188 stallingStoreIsn, stallingLoadIdx); 1189 stalled = false; 1190 stallingStoreIsn = 0; 1191 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1192 } 1193 1194 storeQueue[store_idx].inst->setCompleted(); 1195 1196 if (needsTSO) { 1197 storeInFlight = false; 1198 } 1199 1200 // Tell the checker we've completed this instruction. Some stores 1201 // may get reported twice to the checker, but the checker can 1202 // handle that case. 1203 if (cpu->checker) { 1204 cpu->checker->verify(storeQueue[store_idx].inst); 1205 } 1206} 1207 1208template <class Impl> 1209bool 1210LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1211{ 1212 if (!dcachePort->sendTimingReq(data_pkt)) { 1213 // Need to handle becoming blocked on a store. 1214 isStoreBlocked = true; 1215 ++lsqCacheBlocked; 1216 assert(retryPkt == NULL); 1217 retryPkt = data_pkt; 1218 lsq->setRetryTid(lsqID); 1219 return false; 1220 } 1221 return true; 1222} 1223 1224template <class Impl> 1225void 1226LSQUnit<Impl>::recvRetry() 1227{ 1228 if (isStoreBlocked) { 1229 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1230 assert(retryPkt != NULL); 1231 1232 LSQSenderState *state = 1233 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1234 1235 if (dcachePort->sendTimingReq(retryPkt)) { 1236 // Don't finish the store unless this is the last packet. 1237 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 1238 state->pendingPacket == retryPkt) { 1239 state->pktToSend = false; 1240 storePostSend(retryPkt); 1241 } 1242 retryPkt = NULL; 1243 isStoreBlocked = false; 1244 lsq->setRetryTid(InvalidThreadID); 1245 1246 // Send any outstanding packet. 1247 if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 1248 assert(state->pendingPacket); 1249 if (sendStore(state->pendingPacket)) { 1250 storePostSend(state->pendingPacket); 1251 } 1252 } 1253 } else { 1254 // Still blocked! 1255 ++lsqCacheBlocked; 1256 lsq->setRetryTid(lsqID); 1257 } 1258 } else if (isLoadBlocked) { 1259 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 1260 "no need to resend packet.\n"); 1261 } else { 1262 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 1263 } 1264} 1265 1266template <class Impl> 1267inline void 1268LSQUnit<Impl>::incrStIdx(int &store_idx) const 1269{ 1270 if (++store_idx >= SQEntries) 1271 store_idx = 0; 1272} 1273 1274template <class Impl> 1275inline void 1276LSQUnit<Impl>::decrStIdx(int &store_idx) const 1277{ 1278 if (--store_idx < 0) 1279 store_idx += SQEntries; 1280} 1281 1282template <class Impl> 1283inline void 1284LSQUnit<Impl>::incrLdIdx(int &load_idx) const 1285{ 1286 if (++load_idx >= LQEntries) 1287 load_idx = 0; 1288} 1289 1290template <class Impl> 1291inline void 1292LSQUnit<Impl>::decrLdIdx(int &load_idx) const 1293{ 1294 if (--load_idx < 0) 1295 load_idx += LQEntries; 1296} 1297 1298template <class Impl> 1299void 1300LSQUnit<Impl>::dumpInsts() const 1301{ 1302 cprintf("Load store queue: Dumping instructions.\n"); 1303 cprintf("Load queue size: %i\n", loads); 1304 cprintf("Load queue: "); 1305 1306 int load_idx = loadHead; 1307 1308 while (load_idx != loadTail && loadQueue[load_idx]) { 1309 const DynInstPtr &inst(loadQueue[load_idx]); 1310 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1311 1312 incrLdIdx(load_idx); 1313 } 1314 cprintf("\n"); 1315 1316 cprintf("Store queue size: %i\n", stores); 1317 cprintf("Store queue: "); 1318 1319 int store_idx = storeHead; 1320 1321 while (store_idx != storeTail && storeQueue[store_idx].inst) { 1322 const DynInstPtr &inst(storeQueue[store_idx].inst); 1323 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1324 1325 incrStIdx(store_idx); 1326 } 1327 1328 cprintf("\n"); 1329} 1330 1331#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
|