1 2/*
|
3 * Copyright (c) 2010-2014, 2017 ARM Limited
|
3 * Copyright (c) 2010-2014, 2017-2018 ARM Limited |
4 * Copyright (c) 2013 Advanced Micro Devices, Inc. 5 * All rights reserved 6 * 7 * The license below extends only to copyright in the software and shall 8 * not be construed as granting a license to any other intellectual 9 * property including but not limited to intellectual property relating 10 * to a hardware implementation of the functionality of the software 11 * licensed hereunder. You may use the software subject to the license 12 * terms below provided that you ensure that this notice is replicated 13 * unmodified and in its entirety in all distributions of the software, 14 * modified or unmodified, in source code or in binary form. 15 * 16 * Copyright (c) 2004-2005 The Regents of The University of Michigan 17 * All rights reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions are 21 * met: redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer; 23 * redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution; 26 * neither the name of the copyright holders nor the names of its 27 * contributors may be used to endorse or promote products derived from 28 * this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Authors: Kevin Lim 43 * Korey Sewell 44 */ 45 46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 47#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 48 49#include "arch/generic/debugfaults.hh" 50#include "arch/locked_mem.hh" 51#include "base/str.hh" 52#include "config/the_isa.hh" 53#include "cpu/checker/cpu.hh" 54#include "cpu/o3/lsq.hh" 55#include "cpu/o3/lsq_unit.hh" 56#include "debug/Activity.hh" 57#include "debug/IEW.hh" 58#include "debug/LSQUnit.hh" 59#include "debug/O3PipeView.hh" 60#include "mem/packet.hh" 61#include "mem/request.hh" 62 63template<class Impl> 64LSQUnit<Impl>::WritebackEvent::WritebackEvent(const DynInstPtr &_inst, 65 PacketPtr _pkt, LSQUnit *lsq_ptr) 66 : Event(Default_Pri, AutoDelete), 67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 68{
|
69 assert(_inst->savedReq); 70 _inst->savedReq->writebackScheduled(); |
71} 72 73template<class Impl> 74void 75LSQUnit<Impl>::WritebackEvent::process() 76{ 77 assert(!lsqPtr->cpu->switchedOut()); 78 79 lsqPtr->writeback(inst, pkt); 80
|
79 if (pkt->senderState)
80 delete pkt->senderState;
81
|
81 assert(inst->savedReq); 82 inst->savedReq->writebackDone(); |
83 delete pkt; 84} 85 86template<class Impl> 87const char * 88LSQUnit<Impl>::WritebackEvent::description() const 89{ 90 return "Store writeback"; 91} 92
|
93template <class Impl> 94bool 95LSQUnit<Impl>::recvTimingResp(PacketPtr pkt) 96{ 97 auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState); 98 LSQRequest* req = senderState->request(); 99 assert(req != nullptr); 100 bool ret = true; 101 /* Check that the request is still alive before any further action. */ 102 if (senderState->alive()) { 103 ret = req->recvTimingResp(pkt); 104 } else { 105 senderState->outstanding--; 106 } 107 return ret; 108 109} 110 |
111template<class Impl> 112void 113LSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 114{ 115 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 116 DynInstPtr inst = state->inst;
|
98 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
99 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
|
117
|
101 if (state->cacheBlocked) {
102 // This is the first half of a previous split load,
103 // where the 2nd half blocked, ignore this response
104 DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier "
105 "blocked split load recieved. Ignoring.\n", inst->seqNum);
106 delete state;
107 return;
108 }
|
118 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); |
119
|
110 // If this is a split access, wait until all packets are received.
111 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
112 return;
113 }
|
120 /* Notify the sender state that the access is complete (for ownership 121 * tracking). */ 122 state->complete(); |
123 124 assert(!cpu->switchedOut()); 125 if (!inst->isSquashed()) {
|
117 if (!state->noWB) {
|
126 if (state->needWB) { |
127 // Only loads and store conditionals perform the writeback 128 // after receving the response from the memory 129 assert(inst->isLoad() || inst->isStoreConditional());
|
121 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
122 !state->isLoad) {
123 writeback(inst, pkt);
124 } else {
125 writeback(inst, state->mainPkt);
|
130 writeback(inst, state->request()->mainPacket()); 131 if (inst->isStore()) { 132 auto ss = dynamic_cast<SQSenderState*>(state); 133 ss->writebackDone(); 134 completeStore(ss->idx); |
135 }
|
136 } else if (inst->isStore()) { 137 completeStore(dynamic_cast<SQSenderState*>(state)->idx); |
138 }
|
128
129 if (inst->isStore()) {
130 completeStore(state->idx);
131 }
|
139 }
|
133
134 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
135 delete state->mainPkt;
136 }
137
138 pkt->req->setAccessLatency();
139 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
140
141 delete state;
|
140} 141 142template <class Impl> 143LSQUnit<Impl>::LSQUnit(uint32_t lqEntries, uint32_t sqEntries) 144 : lsqID(-1), storeQueue(sqEntries+1), loadQueue(lqEntries+1),
|
147 LQEntries(lqEntries+1), SQEntries(sqEntries+1),
|
145 loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
|
149 isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false),
150 pendingPkt(nullptr)
|
146 isStoreBlocked(false), storeInFlight(false), hasPendingRequest(false), 147 pendingRequest(nullptr) |
148{ 149} 150 151template<class Impl> 152void 153LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 154 LSQ *lsq_ptr, unsigned id) 155{ 156 lsqID = id; 157 158 cpu = cpu_ptr; 159 iewStage = iew_ptr; 160 161 lsq = lsq_ptr; 162 163 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",lsqID); 164 165 depCheckShift = params->LSQDepCheckShift; 166 checkLoads = params->LSQCheckLoads;
|
170 cacheStorePorts = params->cacheStorePorts;
|
167 needsTSO = params->needsTSO; 168 169 resetState(); 170} 171 172 173template<class Impl> 174void 175LSQUnit<Impl>::resetState() 176{ 177 loads = stores = storesToWB = 0; 178
|
183 loadHead = loadTail = 0;
|
179
|
185 storeHead = storeWBIdx = storeTail = 0;
|
180 storeWBIt = storeQueue.begin(); |
181
|
187 usedStorePorts = 0;
188
|
182 retryPkt = NULL; 183 memDepViolator = NULL; 184 185 stalled = false; 186 187 cacheBlockMask = ~(cpu->cacheLineSize() - 1); 188} 189 190template<class Impl> 191std::string 192LSQUnit<Impl>::name() const 193{ 194 if (Impl::MaxThreads == 1) { 195 return iewStage->name() + ".lsq"; 196 } else { 197 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID); 198 } 199} 200 201template<class Impl> 202void 203LSQUnit<Impl>::regStats() 204{ 205 lsqForwLoads 206 .name(name() + ".forwLoads") 207 .desc("Number of loads that had data forwarded from stores"); 208 209 invAddrLoads 210 .name(name() + ".invAddrLoads") 211 .desc("Number of loads ignored due to an invalid address"); 212 213 lsqSquashedLoads 214 .name(name() + ".squashedLoads") 215 .desc("Number of loads squashed"); 216 217 lsqIgnoredResponses 218 .name(name() + ".ignoredResponses") 219 .desc("Number of memory responses ignored because the instruction is squashed"); 220 221 lsqMemOrderViolation 222 .name(name() + ".memOrderViolation") 223 .desc("Number of memory ordering violations"); 224 225 lsqSquashedStores 226 .name(name() + ".squashedStores") 227 .desc("Number of stores squashed"); 228 229 invAddrSwpfs 230 .name(name() + ".invAddrSwpfs") 231 .desc("Number of software prefetches ignored due to an invalid address"); 232 233 lsqBlockedLoads 234 .name(name() + ".blockedLoads") 235 .desc("Number of blocked loads due to partial load-store forwarding"); 236 237 lsqRescheduledLoads 238 .name(name() + ".rescheduledLoads") 239 .desc("Number of loads that were rescheduled"); 240 241 lsqCacheBlocked 242 .name(name() + ".cacheBlocked") 243 .desc("Number of times an access to memory failed due to the cache being blocked"); 244} 245 246template<class Impl> 247void 248LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 249{ 250 dcachePort = dcache_port; 251} 252 253template<class Impl> 254void
|
262LSQUnit<Impl>::clearLQ()
263{
264 loadQueue.clear();
265}
266
267template<class Impl>
268void
269LSQUnit<Impl>::clearSQ()
270{
271 storeQueue.clear();
272}
273
274template<class Impl>
275void
|
255LSQUnit<Impl>::drainSanityCheck() const 256{
|
278 for (int i = 0; i < loadQueue.size(); ++i)
279 assert(!loadQueue[i]);
|
257 for (int i = 0; i < loadQueue.capacity(); ++i) 258 assert(!loadQueue[i].valid()); |
259 260 assert(storesToWB == 0); 261 assert(!retryPkt); 262} 263 264template<class Impl> 265void 266LSQUnit<Impl>::takeOverFrom() 267{ 268 resetState(); 269} 270
|
292template<class Impl>
293void
294LSQUnit<Impl>::resizeLQ(unsigned size)
295{
296 unsigned size_plus_sentinel = size + 1;
297 assert(size_plus_sentinel >= LQEntries);
298
299 if (size_plus_sentinel > LQEntries) {
300 while (size_plus_sentinel > loadQueue.size()) {
301 DynInstPtr dummy;
302 loadQueue.push_back(dummy);
303 LQEntries++;
304 }
305 } else {
306 LQEntries = size_plus_sentinel;
307 }
308
309 assert(LQEntries <= 256);
310}
311
312template<class Impl>
313void
314LSQUnit<Impl>::resizeSQ(unsigned size)
315{
316 unsigned size_plus_sentinel = size + 1;
317 if (size_plus_sentinel > SQEntries) {
318 while (size_plus_sentinel > storeQueue.size()) {
319 SQEntry dummy;
320 storeQueue.push_back(dummy);
321 SQEntries++;
322 }
323 } else {
324 SQEntries = size_plus_sentinel;
325 }
326
327 assert(SQEntries <= 256);
328}
329
|
271template <class Impl> 272void 273LSQUnit<Impl>::insert(const DynInstPtr &inst) 274{ 275 assert(inst->isMemRef()); 276 277 assert(inst->isLoad() || inst->isStore()); 278 279 if (inst->isLoad()) { 280 insertLoad(inst); 281 } else { 282 insertStore(inst); 283 } 284 285 inst->setInLSQ(); 286} 287 288template <class Impl> 289void 290LSQUnit<Impl>::insertLoad(const DynInstPtr &load_inst) 291{
|
351 assert((loadTail + 1) % LQEntries != loadHead);
352 assert(loads < LQEntries);
|
292 assert(!loadQueue.full()); 293 assert(loads < loadQueue.capacity()); |
294 295 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
|
355 load_inst->pcState(), loadTail, load_inst->seqNum);
|
296 load_inst->pcState(), loadQueue.tail(), load_inst->seqNum); |
297
|
357 load_inst->lqIdx = loadTail;
|
298 /* Grow the queue. */ 299 loadQueue.advance_tail(); |
300
|
359 if (stores == 0) {
360 load_inst->sqIdx = -1;
361 } else {
362 load_inst->sqIdx = storeTail;
363 }
|
301 load_inst->sqIt = storeQueue.end(); |
302
|
365 loadQueue[loadTail] = load_inst;
|
303 assert(!loadQueue.back().valid()); 304 loadQueue.back().set(load_inst); 305 load_inst->lqIdx = loadQueue.tail(); 306 load_inst->lqIt = loadQueue.getIterator(load_inst->lqIdx); |
307
|
367 incrLdIdx(loadTail);
368
|
308 ++loads; 309} 310 311template <class Impl> 312void
|
374LSQUnit<Impl>::insertStore(const DynInstPtr &store_inst)
|
313LSQUnit<Impl>::insertStore(const DynInstPtr& store_inst) |
314{ 315 // Make sure it is not full before inserting an instruction.
|
377 assert((storeTail + 1) % SQEntries != storeHead);
378 assert(stores < SQEntries);
|
316 assert(!storeQueue.full()); 317 assert(stores < storeQueue.capacity()); |
318 319 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
|
381 store_inst->pcState(), storeTail, store_inst->seqNum);
|
320 store_inst->pcState(), storeQueue.tail(), store_inst->seqNum); 321 storeQueue.advance_tail(); |
322
|
383 store_inst->sqIdx = storeTail;
384 store_inst->lqIdx = loadTail;
|
323 store_inst->sqIdx = storeQueue.tail(); 324 store_inst->lqIdx = loadQueue.moduloAdd(loadQueue.tail(), 1); 325 store_inst->lqIt = loadQueue.end(); |
326
|
386 storeQueue[storeTail] = SQEntry(store_inst);
|
327 storeQueue.back().set(store_inst); |
328
|
388 incrStIdx(storeTail);
389
|
329 ++stores; 330} 331 332template <class Impl> 333typename Impl::DynInstPtr 334LSQUnit<Impl>::getMemDepViolator() 335{ 336 DynInstPtr temp = memDepViolator; 337 338 memDepViolator = NULL; 339 340 return temp; 341} 342 343template <class Impl> 344unsigned 345LSQUnit<Impl>::numFreeLoadEntries() 346{ 347 //LQ has an extra dummy entry to differentiate 348 //empty/full conditions. Subtract 1 from the free entries.
|
410 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
411 return LQEntries - loads - 1;
|
349 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", 350 1 + loadQueue.capacity(), loads); 351 return loadQueue.capacity() - loads; |
352} 353 354template <class Impl> 355unsigned 356LSQUnit<Impl>::numFreeStoreEntries() 357{ 358 //SQ has an extra dummy entry to differentiate 359 //empty/full conditions. Subtract 1 from the free entries.
|
420 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
421 return SQEntries - stores - 1;
|
360 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", 361 1 + storeQueue.capacity(), stores); 362 return storeQueue.capacity() - stores; |
363 364 } 365 366template <class Impl> 367void 368LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 369{ 370 // Should only ever get invalidations in here 371 assert(pkt->isInvalidate()); 372
|
432 int load_idx = loadHead;
|
373 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 374
|
435 // Only Invalidate packet calls checkSnoop
436 assert(pkt->isInvalidate());
|
375 for (int x = 0; x < cpu->numContexts(); x++) { 376 ThreadContext *tc = cpu->getContext(x); 377 bool no_squash = cpu->thread[x]->noSquashFromTC; 378 cpu->thread[x]->noSquashFromTC = true; 379 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 380 cpu->thread[x]->noSquashFromTC = no_squash; 381 } 382
|
445 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
|
383 if (loadQueue.empty()) 384 return; |
385
|
447 DynInstPtr ld_inst = loadQueue[load_idx];
448 if (ld_inst) {
449 Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
450 Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
|
386 auto iter = loadQueue.begin(); |
387
|
452 // Check that this snoop didn't just invalidate our lock flag
453 if (ld_inst->effAddrValid() && (load_addr_low == invalidate_addr
454 || load_addr_high == invalidate_addr)
455 && ld_inst->memReqFlags & Request::LLSC)
456 TheISA::handleLockedSnoopHit(ld_inst.get());
457 }
|
388 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; |
389
|
459 // If this is the only load in the LSQ we don't care
460 if (load_idx == loadTail)
461 return;
|
390 DynInstPtr ld_inst = iter->instruction(); 391 assert(ld_inst); 392 LSQRequest *req = iter->request(); |
393
|
463 incrLdIdx(load_idx);
|
394 // Check that this snoop didn't just invalidate our lock flag 395 if (ld_inst->effAddrValid() && 396 req->isCacheBlockHit(invalidate_addr, cacheBlockMask) 397 && ld_inst->memReqFlags & Request::LLSC) 398 TheISA::handleLockedSnoopHit(ld_inst.get()); |
399 400 bool force_squash = false; 401
|
467 while (load_idx != loadTail) {
468 DynInstPtr ld_inst = loadQueue[load_idx];
469
470 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
471 incrLdIdx(load_idx);
|
402 while (++iter != loadQueue.end()) { 403 ld_inst = iter->instruction(); 404 assert(ld_inst); 405 req = iter->request(); 406 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) |
407 continue;
|
473 }
|
408
|
475 Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
476 Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
|
409 DPRINTF(LSQUnit, "-- inst [sn:%lli] to pktAddr:%#x\n", 410 ld_inst->seqNum, invalidate_addr); |
411
|
478 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
479 ld_inst->seqNum, load_addr_low, invalidate_addr);
480
481 if ((load_addr_low == invalidate_addr
482 || load_addr_high == invalidate_addr) || force_squash) {
|
412 if (force_squash || 413 req->isCacheBlockHit(invalidate_addr, cacheBlockMask)) { |
414 if (needsTSO) { 415 // If we have a TSO system, as all loads must be ordered with 416 // all other loads, this load as well as *all* subsequent loads 417 // need to be squashed to prevent possible load reordering. 418 force_squash = true; 419 } 420 if (ld_inst->possibleLoadViolation() || force_squash) { 421 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 422 pkt->getAddr(), ld_inst->seqNum); 423 424 // Mark the load for re-execution 425 ld_inst->fault = std::make_shared<ReExec>(); 426 } else { 427 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 428 pkt->getAddr(), ld_inst->seqNum); 429 430 // Make sure that we don't lose a snoop hitting a LOCKED 431 // address since the LOCK* flags don't get updated until 432 // commit. 433 if (ld_inst->memReqFlags & Request::LLSC) 434 TheISA::handleLockedSnoopHit(ld_inst.get()); 435 436 // If a older load checks this and it's true 437 // then we might have missed the snoop 438 // in which case we need to invalidate to be sure 439 ld_inst->hitExternalSnoop(true); 440 } 441 }
|
511 incrLdIdx(load_idx);
|
442 } 443 return; 444} 445 446template <class Impl> 447Fault
|
518LSQUnit<Impl>::checkViolations(int load_idx, const DynInstPtr &inst)
|
448LSQUnit<Impl>::checkViolations(typename LoadQueue::iterator& loadIt, 449 const DynInstPtr& inst) |
450{ 451 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 452 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 453 454 /** @todo in theory you only need to check an instruction that has executed 455 * however, there isn't a good way in the pipeline at the moment to check 456 * all instructions that will execute before the store writes back. Thus, 457 * like the implementation that came before it, we're overly conservative. 458 */
|
528 while (load_idx != loadTail) {
529 DynInstPtr ld_inst = loadQueue[load_idx];
|
459 while (loadIt != loadQueue.end()) { 460 DynInstPtr ld_inst = loadIt->instruction(); |
461 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
|
531 incrLdIdx(load_idx);
|
462 ++loadIt; |
463 continue; 464 } 465 466 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 467 Addr ld_eff_addr2 = 468 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 469 470 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 471 if (inst->isLoad()) { 472 // If this load is to the same block as an external snoop 473 // invalidate that we've observed then the load needs to be 474 // squashed as it could have newer data 475 if (ld_inst->hitExternalSnoop()) { 476 if (!memDepViolator || 477 ld_inst->seqNum < memDepViolator->seqNum) { 478 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 479 "and [sn:%lli] at address %#x\n", 480 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 481 memDepViolator = ld_inst; 482 483 ++lsqMemOrderViolation; 484 485 return std::make_shared<GenericISA::M5PanicFault>( 486 "Detected fault with inst [sn:%lli] and " 487 "[sn:%lli] at address %#x\n", 488 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 489 } 490 } 491 492 // Otherwise, mark the load has a possible load violation 493 // and if we see a snoop before it's commited, we need to squash 494 ld_inst->possibleLoadViolation(true); 495 DPRINTF(LSQUnit, "Found possible load violation at addr: %#x" 496 " between instructions [sn:%lli] and [sn:%lli]\n", 497 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 498 } else { 499 // A load/store incorrectly passed this store. 500 // Check if we already have a violator, or if it's newer 501 // squash and refetch. 502 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 503 break; 504 505 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 506 "[sn:%lli] at address %#x\n", 507 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 508 memDepViolator = ld_inst; 509 510 ++lsqMemOrderViolation; 511 512 return std::make_shared<GenericISA::M5PanicFault>( 513 "Detected fault with " 514 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 515 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 516 } 517 } 518
|
588 incrLdIdx(load_idx);
|
519 ++loadIt; |
520 } 521 return NoFault; 522} 523 524 525 526 527template <class Impl> 528Fault 529LSQUnit<Impl>::executeLoad(const DynInstPtr &inst) 530{ 531 using namespace TheISA; 532 // Execute a specific load. 533 Fault load_fault = NoFault; 534 535 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 536 inst->pcState(), inst->seqNum); 537 538 assert(!inst->isSquashed()); 539 540 load_fault = inst->initiateAcc(); 541
|
611 if (inst->isTranslationDelayed() &&
612 load_fault == NoFault)
|
542 if (inst->isTranslationDelayed() && load_fault == NoFault) |
543 return load_fault; 544 545 // If the instruction faulted or predicated false, then we need to send it 546 // along to commit without the instruction completing. 547 if (load_fault != NoFault || !inst->readPredicate()) { 548 // Send this instruction to commit, also make sure iew stage 549 // realizes there is activity. Mark it as executed unless it 550 // is a strictly ordered load that needs to hit the head of 551 // commit. 552 if (!inst->readPredicate()) 553 inst->forwardOldRegs(); 554 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 555 inst->seqNum, 556 (load_fault != NoFault ? "fault" : "predication")); 557 if (!(inst->hasRequest() && inst->strictlyOrdered()) || 558 inst->isAtCommit()) { 559 inst->setExecuted(); 560 } 561 iewStage->instToCommit(inst); 562 iewStage->activityThisCycle(); 563 } else {
|
634 assert(inst->effAddrValid());
635 int load_idx = inst->lqIdx;
636 incrLdIdx(load_idx);
|
564 if (inst->effAddrValid()) { 565 auto it = inst->lqIt; 566 ++it; |
567
|
638 if (checkLoads)
639 return checkViolations(load_idx, inst);
|
568 if (checkLoads) 569 return checkViolations(it, inst); 570 } |
571 } 572 573 return load_fault; 574} 575 576template <class Impl> 577Fault 578LSQUnit<Impl>::executeStore(const DynInstPtr &store_inst) 579{ 580 using namespace TheISA; 581 // Make sure that a store exists. 582 assert(stores != 0); 583 584 int store_idx = store_inst->sqIdx; 585 586 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 587 store_inst->pcState(), store_inst->seqNum); 588 589 assert(!store_inst->isSquashed()); 590 591 // Check the recently completed loads to see if any match this store's 592 // address. If so, then we have a memory ordering violation.
|
662 int load_idx = store_inst->lqIdx;
|
593 typename LoadQueue::iterator loadIt = store_inst->lqIt; |
594 595 Fault store_fault = store_inst->initiateAcc(); 596 597 if (store_inst->isTranslationDelayed() && 598 store_fault == NoFault) 599 return store_fault; 600 601 if (!store_inst->readPredicate()) { 602 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 603 store_inst->seqNum); 604 store_inst->forwardOldRegs(); 605 return store_fault; 606 } 607
|
677 if (storeQueue[store_idx].size == 0) {
|
608 if (storeQueue[store_idx].size() == 0) { |
609 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 610 store_inst->pcState(), store_inst->seqNum); 611 612 return store_fault; 613 } 614 615 assert(store_fault == NoFault); 616 617 if (store_inst->isStoreConditional()) { 618 // Store conditionals need to set themselves as able to 619 // writeback if we haven't had a fault by here.
|
689 storeQueue[store_idx].canWB = true;
|
620 storeQueue[store_idx].canWB() = true; |
621 622 ++storesToWB; 623 } 624
|
694 return checkViolations(load_idx, store_inst);
|
625 return checkViolations(loadIt, store_inst); |
626 627} 628 629template <class Impl> 630void 631LSQUnit<Impl>::commitLoad() 632{
|
702 assert(loadQueue[loadHead]);
|
633 assert(loadQueue.front().valid()); |
634 635 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
|
705 loadQueue[loadHead]->pcState());
|
636 loadQueue.front().instruction()->pcState()); |
637
|
707 loadQueue[loadHead] = NULL;
|
638 loadQueue.front().clear(); 639 loadQueue.pop_front(); |
640
|
709 incrLdIdx(loadHead);
710
|
641 --loads; 642} 643 644template <class Impl> 645void 646LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 647{
|
718 assert(loads == 0 || loadQueue[loadHead]);
|
648 assert(loads == 0 || loadQueue.front().valid()); |
649
|
720 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
|
650 while (loads != 0 && loadQueue.front().instruction()->seqNum 651 <= youngest_inst) { |
652 commitLoad(); 653 } 654} 655 656template <class Impl> 657void 658LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 659{
|
729 assert(stores == 0 || storeQueue[storeHead].inst);
|
660 assert(stores == 0 || storeQueue.front().valid()); |
661
|
731 int store_idx = storeHead;
732
733 while (store_idx != storeTail) {
734 assert(storeQueue[store_idx].inst);
|
662 /* Forward iterate the store queue (age order). */ 663 for (auto& x : storeQueue) { 664 assert(x.valid()); |
665 // Mark any stores that are now committed and have not yet 666 // been marked as able to write back.
|
737 if (!storeQueue[store_idx].canWB) {
738 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
|
667 if (!x.canWB()) { 668 if (x.instruction()->seqNum > youngest_inst) { |
669 break; 670 } 671 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 672 "%s [sn:%lli]\n",
|
743 storeQueue[store_idx].inst->pcState(),
744 storeQueue[store_idx].inst->seqNum);
|
673 x.instruction()->pcState(), 674 x.instruction()->seqNum); |
675
|
746 storeQueue[store_idx].canWB = true;
|
676 x.canWB() = true; |
677 678 ++storesToWB; 679 }
|
750
751 incrStIdx(store_idx);
|
680 } 681} 682 683template <class Impl> 684void
|
757LSQUnit<Impl>::writebackPendingStore()
|
685LSQUnit<Impl>::writebackBlockedStore() |
686{
|
759 if (hasPendingPkt) {
760 assert(pendingPkt != NULL);
761
762 // If the cache is blocked, this will store the packet for retry.
763 if (sendStore(pendingPkt)) {
764 storePostSend(pendingPkt);
765 }
766 pendingPkt = NULL;
767 hasPendingPkt = false;
|
687 assert(isStoreBlocked); 688 storeWBIt->request()->sendPacketToCache(); 689 if (storeWBIt->request()->isSent()){ 690 storePostSend(); |
691 } 692} 693 694template <class Impl> 695void 696LSQUnit<Impl>::writebackStores() 697{
|
775 // First writeback the second packet from any split store that didn't
776 // complete last cycle because there weren't enough cache ports available.
777 if (TheISA::HasUnalignedMemAcc) {
778 writebackPendingStore();
|
698 if (isStoreBlocked) { 699 DPRINTF(LSQUnit, "Writing back blocked store\n"); 700 writebackBlockedStore(); |
701 } 702 703 while (storesToWB > 0 &&
|
782 storeWBIdx != storeTail &&
783 storeQueue[storeWBIdx].inst &&
784 storeQueue[storeWBIdx].canWB &&
|
704 storeWBIt.dereferenceable() && 705 storeWBIt->valid() && 706 storeWBIt->canWB() && |
707 ((!needsTSO) || (!storeInFlight)) &&
|
786 usedStorePorts < cacheStorePorts) {
|
708 lsq->storePortAvailable()) { |
709 710 if (isStoreBlocked) { 711 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 712 " is blocked!\n"); 713 break; 714 } 715 716 // Store didn't write any data so no need to write it back to 717 // memory.
|
796 if (storeQueue[storeWBIdx].size == 0) {
797 completeStore(storeWBIdx);
798
799 incrStIdx(storeWBIdx);
800
|
718 if (storeWBIt->size() == 0) { 719 /* It is important that the preincrement happens at (or before) 720 * the call, as the the code of completeStore checks 721 * storeWBIt. */ 722 completeStore(storeWBIt++); |
723 continue; 724 } 725
|
804 ++usedStorePorts;
805
806 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
807 incrStIdx(storeWBIdx);
808
|
726 if (storeWBIt->instruction()->isDataPrefetch()) { 727 storeWBIt++; |
728 continue; 729 } 730
|
812 assert(storeQueue[storeWBIdx].req);
813 assert(!storeQueue[storeWBIdx].committed);
|
731 assert(storeWBIt->hasRequest()); 732 assert(!storeWBIt->committed()); |
733
|
815 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
816 assert(storeQueue[storeWBIdx].sreqLow);
817 assert(storeQueue[storeWBIdx].sreqHigh);
818 }
|
734 DynInstPtr inst = storeWBIt->instruction(); 735 LSQRequest* req = storeWBIt->request(); 736 storeWBIt->committed() = true; |
737
|
820 DynInstPtr inst = storeQueue[storeWBIdx].inst;
821
822 RequestPtr &req = storeQueue[storeWBIdx].req;
823 const RequestPtr &sreqLow = storeQueue[storeWBIdx].sreqLow;
824 const RequestPtr &sreqHigh = storeQueue[storeWBIdx].sreqHigh;
825
826 storeQueue[storeWBIdx].committed = true;
827
|
738 assert(!inst->memData);
|
829 inst->memData = new uint8_t[req->getSize()];
|
739 inst->memData = new uint8_t[req->_size]; |
740
|
831 if (storeQueue[storeWBIdx].isAllZeros)
832 memset(inst->memData, 0, req->getSize());
|
741 if (storeWBIt->isAllZeros()) 742 memset(inst->memData, 0, req->_size); |
743 else
|
834 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
|
744 memcpy(inst->memData, storeWBIt->data(), req->_size); |
745
|
836 PacketPtr data_pkt;
837 PacketPtr snd_data_pkt = NULL;
|
746
|
839 LSQSenderState *state = new LSQSenderState;
840 state->isLoad = false;
841 state->idx = storeWBIdx;
842 state->inst = inst;
|
747 if (req->senderState() == nullptr) { 748 SQSenderState *state = new SQSenderState(storeWBIt); 749 state->isLoad = false; 750 state->needWB = false; 751 state->inst = inst; |
752
|
844 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
845
846 // Build a single data packet if the store isn't split.
847 data_pkt = Packet::createWrite(req);
848 data_pkt->dataStatic(inst->memData);
849 data_pkt->senderState = state;
850 } else {
851 // Create two packets if the store is split in two.
852 data_pkt = Packet::createWrite(sreqLow);
853 snd_data_pkt = Packet::createWrite(sreqHigh);
854
855 data_pkt->dataStatic(inst->memData);
856 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
857
858 data_pkt->senderState = state;
859 snd_data_pkt->senderState = state;
860
861 state->isSplit = true;
862 state->outstanding = 2;
863
864 // Can delete the main request now.
865 req = sreqLow;
|
753 req->senderState(state); 754 if (inst->isStoreConditional()) { 755 /* Only store conditionals need a writeback. */ 756 state->needWB = true; 757 } |
758 }
|
759 req->buildPackets(); |
760 761 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 762 "to Addr:%#x, data:%#x [sn:%lli]\n",
|
870 storeWBIdx, inst->pcState(),
871 req->getPaddr(), (int)*(inst->memData),
|
763 storeWBIt.idx(), inst->pcState(), 764 req->request()->getPaddr(), (int)*(inst->memData), |
765 inst->seqNum); 766 767 // @todo: Remove this SC hack once the memory system handles it. 768 if (inst->isStoreConditional()) {
|
876 assert(!storeQueue[storeWBIdx].isSplit);
|
769 // Disable recording the result temporarily. Writing to 770 // misc regs normally updates the result, but this is not 771 // the desired behavior when handling store conditionals. 772 inst->recordResult(false);
|
881 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
|
773 bool success = TheISA::handleLockedWrite(inst.get(), 774 req->request(), cacheBlockMask); |
775 inst->recordResult(true);
|
776 req->packetSent(); |
777 778 if (!success) {
|
779 req->complete(); |
780 // Instantly complete this store. 781 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 782 "Instantly completing it.\n", 783 inst->seqNum);
|
889 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
|
784 PacketPtr new_pkt = new Packet(*req->packet()); 785 WritebackEvent *wb = new WritebackEvent(inst, 786 new_pkt, this); |
787 cpu->schedule(wb, curTick() + 1);
|
891 completeStore(storeWBIdx);
892 incrStIdx(storeWBIdx);
|
788 completeStore(storeWBIt); 789 if (!storeQueue.empty()) 790 storeWBIt++; 791 else 792 storeWBIt = storeQueue.end(); |
793 continue; 794 }
|
895 } else {
896 // Non-store conditionals do not need a writeback.
897 state->noWB = true;
|
795 } 796
|
900 bool split =
901 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
902
903 ThreadContext *thread = cpu->tcBase(lsqID);
904
905 if (req->isMmappedIpr()) {
|
797 if (req->request()->isMmappedIpr()) { |
798 assert(!inst->isStoreConditional());
|
907 TheISA::handleIprWrite(thread, data_pkt);
908 delete data_pkt;
909 if (split) {
910 assert(snd_data_pkt->req->isMmappedIpr());
911 TheISA::handleIprWrite(thread, snd_data_pkt);
912 delete snd_data_pkt;
913 }
914 delete state;
915 completeStore(storeWBIdx);
916 incrStIdx(storeWBIdx);
917 } else if (!sendStore(data_pkt)) {
918 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
919 "retry later\n",
920 inst->seqNum);
|
799 ThreadContext *thread = cpu->tcBase(lsqID); 800 PacketPtr main_pkt = new Packet(req->mainRequest(), 801 MemCmd::WriteReq); 802 main_pkt->dataStatic(inst->memData); 803 req->handleIprWrite(thread, main_pkt); 804 delete main_pkt; 805 completeStore(storeWBIt); 806 storeWBIt++; 807 continue; 808 } 809 /* Send to cache */ 810 req->sendPacketToCache(); |
811
|
922 // Need to store the second packet, if split.
923 if (split) {
924 state->pktToSend = true;
925 state->pendingPacket = snd_data_pkt;
926 }
|
812 /* If successful, do the post send */ 813 if (req->isSent()) { 814 storePostSend(); |
815 } else {
|
928
929 // If split, try to send the second packet too
930 if (split) {
931 assert(snd_data_pkt);
932
933 // Ensure there are enough ports to use.
934 if (usedStorePorts < cacheStorePorts) {
935 ++usedStorePorts;
936 if (sendStore(snd_data_pkt)) {
937 storePostSend(snd_data_pkt);
938 } else {
939 DPRINTF(IEW, "D-Cache became blocked when writing"
940 " [sn:%lli] second packet, will retry later\n",
941 inst->seqNum);
942 }
943 } else {
944
945 // Store the packet for when there's free ports.
946 assert(pendingPkt == NULL);
947 pendingPkt = snd_data_pkt;
948 hasPendingPkt = true;
949 }
950 } else {
951
952 // Not a split store.
953 storePostSend(data_pkt);
954 }
|
816 DPRINTF(LSQUnit, "D-Cache became blocked when writing [sn:%lli], " 817 "will retry later\n", 818 inst->seqNum); |
819 } 820 }
|
957
958 // Not sure this should set it to 0.
959 usedStorePorts = 0;
960
|
821 assert(stores >= 0 && storesToWB >= 0); 822} 823
|
964/*template <class Impl>
965void
966LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
967{
968 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
969 mshrSeqNums.end(),
970 seqNum);
971
972 if (mshr_it != mshrSeqNums.end()) {
973 mshrSeqNums.erase(mshr_it);
974 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
975 }
976}*/
977
|
824template <class Impl> 825void 826LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 827{ 828 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 829 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 830
|
985 int load_idx = loadTail;
986 decrLdIdx(load_idx);
987
988 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
|
831 while (loads != 0 && 832 loadQueue.back().instruction()->seqNum > squashed_num) { |
833 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 834 "[sn:%lli]\n",
|
991 loadQueue[load_idx]->pcState(),
992 loadQueue[load_idx]->seqNum);
|
835 loadQueue.back().instruction()->pcState(), 836 loadQueue.back().instruction()->seqNum); |
837
|
994 if (isStalled() && load_idx == stallingLoadIdx) {
|
838 if (isStalled() && loadQueue.tail() == stallingLoadIdx) { |
839 stalled = false; 840 stallingStoreIsn = 0; 841 stallingLoadIdx = 0; 842 } 843 844 // Clear the smart pointer to make sure it is decremented.
|
1001 loadQueue[load_idx]->setSquashed();
1002 loadQueue[load_idx] = NULL;
|
845 loadQueue.back().instruction()->setSquashed(); 846 loadQueue.back().clear(); 847 |
848 --loads; 849
|
1005 // Inefficient!
1006 loadTail = load_idx;
1007
1008 decrLdIdx(load_idx);
|
850 loadQueue.pop_back(); |
851 ++lsqSquashedLoads; 852 } 853 854 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 855 memDepViolator = NULL; 856 } 857
|
1016 int store_idx = storeTail;
1017 decrStIdx(store_idx);
1018
|
858 while (stores != 0 &&
|
1020 storeQueue[store_idx].inst->seqNum > squashed_num) {
|
859 storeQueue.back().instruction()->seqNum > squashed_num) { |
860 // Instructions marked as can WB are already committed.
|
1022 if (storeQueue[store_idx].canWB) {
|
861 if (storeQueue.back().canWB()) { |
862 break; 863 } 864 865 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 866 "idx:%i [sn:%lli]\n",
|
1028 storeQueue[store_idx].inst->pcState(),
1029 store_idx, storeQueue[store_idx].inst->seqNum);
|
867 storeQueue.back().instruction()->pcState(), 868 storeQueue.tail(), storeQueue.back().instruction()->seqNum); |
869 870 // I don't think this can happen. It should have been cleared 871 // by the stalling load. 872 if (isStalled() &&
|
1034 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
|
873 storeQueue.back().instruction()->seqNum == stallingStoreIsn) { |
874 panic("Is stalled should have been cleared by stalling load!\n"); 875 stalled = false; 876 stallingStoreIsn = 0; 877 } 878 879 // Clear the smart pointer to make sure it is decremented.
|
1041 storeQueue[store_idx].inst->setSquashed();
1042 storeQueue[store_idx].inst = NULL;
1043 storeQueue[store_idx].canWB = 0;
|
880 storeQueue.back().instruction()->setSquashed(); |
881 882 // Must delete request now that it wasn't handed off to 883 // memory. This is quite ugly. @todo: Figure out the proper 884 // place to really handle request deletes.
|
1048 storeQueue[store_idx].req.reset();
1049 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1050 storeQueue[store_idx].sreqLow.reset();
1051 storeQueue[store_idx].sreqHigh.reset();
1052 }
1053
|
885 storeQueue.back().clear(); |
886 --stores; 887
|
1056 // Inefficient!
1057 storeTail = store_idx;
1058
1059 decrStIdx(store_idx);
|
888 storeQueue.pop_back(); |
889 ++lsqSquashedStores; 890 } 891} 892 893template <class Impl> 894void
|
1066LSQUnit<Impl>::storePostSend(PacketPtr pkt)
|
895LSQUnit::storePostSend() |
896{ 897 if (isStalled() &&
|
1069 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
|
898 storeWBIt->instruction()->seqNum == stallingStoreIsn) { |
899 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 900 "load idx:%i\n", 901 stallingStoreIsn, stallingLoadIdx); 902 stalled = false; 903 stallingStoreIsn = 0;
|
1075 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
|
904 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); |
905 } 906
|
1078 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
|
907 if (!storeWBIt->instruction()->isStoreConditional()) { |
908 // The store is basically completed at this time. This 909 // only works so long as the checker doesn't try to 910 // verify the value in memory for stores.
|
1082 storeQueue[storeWBIdx].inst->setCompleted();
|
911 storeWBIt->instruction()->setCompleted(); |
912 913 if (cpu->checker) {
|
1085 cpu->checker->verify(storeQueue[storeWBIdx].inst);
|
914 cpu->checker->verify(storeWBIt->instruction()); |
915 } 916 } 917 918 if (needsTSO) { 919 storeInFlight = true; 920 } 921
|
1093 incrStIdx(storeWBIdx);
|
922 storeWBIt++; |
923} 924 925template <class Impl> 926void 927LSQUnit<Impl>::writeback(const DynInstPtr &inst, PacketPtr pkt) 928{ 929 iewStage->wakeCPU(); 930 931 // Squashed instructions do not need to complete their access. 932 if (inst->isSquashed()) { 933 assert(!inst->isStore()); 934 ++lsqIgnoredResponses; 935 return; 936 } 937 938 if (!inst->isExecuted()) { 939 inst->setExecuted(); 940 941 if (inst->fault == NoFault) { 942 // Complete access to copy data to proper place. 943 inst->completeAcc(pkt); 944 } else { 945 // If the instruction has an outstanding fault, we cannot complete 946 // the access as this discards the current fault. 947 948 // If we have an outstanding fault, the fault should only be of 949 // type ReExec. 950 assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr); 951 952 DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access " 953 "due to pending fault.\n", inst->seqNum); 954 } 955 } 956 957 // Need to insert instruction into queue to commit 958 iewStage->instToCommit(inst); 959 960 iewStage->activityThisCycle(); 961 962 // see if this load changed the PC 963 iewStage->checkMisprediction(inst); 964} 965 966template <class Impl> 967void
|
1139LSQUnit<Impl>::completeStore(int store_idx)
|
968LSQUnit<Impl>::completeStore(typename StoreQueue::iterator store_idx) |
969{
|
1141 assert(storeQueue[store_idx].inst);
1142 storeQueue[store_idx].completed = true;
|
970 assert(store_idx->valid()); 971 store_idx->completed() = true; |
972 --storesToWB; 973 // A bit conservative because a store completion may not free up entries, 974 // but hopefully avoids two store completions in one cycle from making 975 // the CPU tick twice. 976 cpu->wakeCPU(); 977 cpu->activityThisCycle(); 978
|
1150 if (store_idx == storeHead) {
|
979 /* We 'need' a copy here because we may clear the entry from the 980 * store queue. */ 981 DynInstPtr store_inst = store_idx->instruction(); 982 if (store_idx == storeQueue.begin()) { |
983 do {
|
1152 incrStIdx(storeHead);
1153
|
984 storeQueue.front().clear(); 985 storeQueue.pop_front(); |
986 --stores;
|
1155 } while (storeQueue[storeHead].completed &&
1156 storeHead != storeTail);
|
987 } while (storeQueue.front().completed() && 988 !storeQueue.empty()); |
989 990 iewStage->updateLSQNextCycle = true; 991 } 992 993 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 994 "idx:%i\n",
|
1163 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
|
995 store_inst->seqNum, store_idx.idx() - 1, storeQueue.head() - 1); |
996 997#if TRACING_ON 998 if (DTRACE(O3PipeView)) {
|
1167 storeQueue[store_idx].inst->storeTick =
1168 curTick() - storeQueue[store_idx].inst->fetchTick;
|
999 store_idx->instruction()->storeTick = 1000 curTick() - store_idx->instruction()->fetchTick; |
1001 } 1002#endif 1003 1004 if (isStalled() &&
|
1173 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
|
1005 store_inst->seqNum == stallingStoreIsn) { |
1006 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1007 "load idx:%i\n", 1008 stallingStoreIsn, stallingLoadIdx); 1009 stalled = false; 1010 stallingStoreIsn = 0;
|
1179 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
|
1011 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); |
1012 } 1013
|
1182 storeQueue[store_idx].inst->setCompleted();
|
1014 store_inst->setCompleted(); |
1015 1016 if (needsTSO) { 1017 storeInFlight = false; 1018 } 1019 1020 // Tell the checker we've completed this instruction. Some stores 1021 // may get reported twice to the checker, but the checker can 1022 // handle that case.
|
1191
|
1023 // Store conditionals cannot be sent to the checker yet, they have 1024 // to update the misc registers first which should take place 1025 // when they commit
|
1195 if (cpu->checker && !storeQueue[store_idx].inst->isStoreConditional()) {
1196 cpu->checker->verify(storeQueue[store_idx].inst);
|
1026 if (cpu->checker && !store_inst->isStoreConditional()) { 1027 cpu->checker->verify(store_inst); |
1028 } 1029} 1030 1031template <class Impl> 1032bool
|
1202LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
|
1033LSQUnit<Impl>::trySendPacket(bool isLoad, PacketPtr data_pkt) |
1034{
|
1204 if (!dcachePort->sendTimingReq(data_pkt)) {
1205 // Need to handle becoming blocked on a store.
1206 isStoreBlocked = true;
1207 ++lsqCacheBlocked;
1208 assert(retryPkt == NULL);
1209 retryPkt = data_pkt;
1210 return false;
1211 }
1212 return true;
1213}
|
1035 bool ret = true; 1036 bool cache_got_blocked = false; |
1037
|
1215template <class Impl>
1216void
1217LSQUnit<Impl>::recvRetry()
1218{
1219 if (isStoreBlocked) {
1220 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1221 assert(retryPkt != NULL);
|
1038 auto state = dynamic_cast<LSQSenderState*>(data_pkt->senderState); |
1039
|
1223 LSQSenderState *state =
1224 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
|
1040 if (!lsq->cacheBlocked() && (isLoad || lsq->storePortAvailable())) { 1041 if (!dcachePort->sendTimingReq(data_pkt)) { 1042 ret = false; 1043 cache_got_blocked = true; 1044 } 1045 } else { 1046 ret = false; 1047 } |
1048
|
1226 if (dcachePort->sendTimingReq(retryPkt)) {
1227 // Don't finish the store unless this is the last packet.
1228 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1229 state->pendingPacket == retryPkt) {
1230 state->pktToSend = false;
1231 storePostSend(retryPkt);
1232 }
1233 retryPkt = NULL;
|
1049 if (ret) { 1050 if (!isLoad) { 1051 lsq->storePortBusy(); |
1052 isStoreBlocked = false;
|
1235
1236 // Send any outstanding packet.
1237 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1238 assert(state->pendingPacket);
1239 if (sendStore(state->pendingPacket)) {
1240 storePostSend(state->pendingPacket);
1241 }
1242 }
1243 } else {
1244 // Still blocked!
|
1053 } 1054 state->outstanding++; 1055 state->request()->packetSent(); 1056 } else { 1057 if (cache_got_blocked) { 1058 lsq->cacheBlocked(true); |
1059 ++lsqCacheBlocked; 1060 }
|
1061 if (!isLoad) { 1062 assert(state->request() == storeWBIt->request()); 1063 isStoreBlocked = true; 1064 } 1065 state->request()->packetNotSent(); |
1066 }
|
1248}
|
1067
|
1250template <class Impl>
1251inline void
1252LSQUnit<Impl>::incrStIdx(int &store_idx) const
1253{
1254 if (++store_idx >= SQEntries)
1255 store_idx = 0;
|
1068 return ret; |
1069} 1070 1071template <class Impl>
|
1259inline void
1260LSQUnit<Impl>::decrStIdx(int &store_idx) const
|
1072void 1073LSQUnit<Impl>::recvRetry() |
1074{
|
1262 if (--store_idx < 0)
1263 store_idx += SQEntries;
|
1075 if (isStoreBlocked) { 1076 DPRINTF(LSQUnit, "Receiving retry: blocked store\n"); 1077 writebackBlockedStore(); 1078 } |
1079} 1080 1081template <class Impl>
|
1267inline void
1268LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1269{
1270 if (++load_idx >= LQEntries)
1271 load_idx = 0;
1272}
1273
1274template <class Impl>
1275inline void
1276LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1277{
1278 if (--load_idx < 0)
1279 load_idx += LQEntries;
1280}
1281
1282template <class Impl>
|
1082void 1083LSQUnit<Impl>::dumpInsts() const 1084{ 1085 cprintf("Load store queue: Dumping instructions.\n"); 1086 cprintf("Load queue size: %i\n", loads); 1087 cprintf("Load queue: "); 1088
|
1290 int load_idx = loadHead;
1291
1292 while (load_idx != loadTail && loadQueue[load_idx]) {
1293 const DynInstPtr &inst(loadQueue[load_idx]);
|
1089 for (const auto& e: loadQueue) { 1090 const DynInstPtr &inst(e.instruction()); |
1091 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
|
1295
1296 incrLdIdx(load_idx);
|
1092 } 1093 cprintf("\n"); 1094 1095 cprintf("Store queue size: %i\n", stores); 1096 cprintf("Store queue: "); 1097
|
1303 int store_idx = storeHead;
1304
1305 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1306 const DynInstPtr &inst(storeQueue[store_idx].inst);
|
1098 for (const auto& e: storeQueue) { 1099 const DynInstPtr &inst(e.instruction()); |
1100 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
|
1308
1309 incrStIdx(store_idx);
|
1101 } 1102 1103 cprintf("\n"); 1104} 1105
|
1106template <class Impl> 1107unsigned int 1108LSQUnit<Impl>::cacheLineSize() 1109{ 1110 return cpu->cacheLineSize(); 1111} 1112 |
1113#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
|