Deleted Added
sdiff udiff text old ( 10824:308771bd2647 ) new ( 11097:da477ae38907 )
full compact
1
2/*
3 * Copyright (c) 2010-2014 ARM Limited
4 * Copyright (c) 2013 Advanced Micro Devices, Inc.
5 * All rights reserved
6 *
7 * The license below extends only to copyright in the software and shall
8 * not be construed as granting a license to any other intellectual
9 * property including but not limited to intellectual property relating
10 * to a hardware implementation of the functionality of the software
11 * licensed hereunder. You may use the software subject to the license
12 * terms below provided that you ensure that this notice is replicated
13 * unmodified and in its entirety in all distributions of the software,
14 * modified or unmodified, in source code or in binary form.
15 *
16 * Copyright (c) 2004-2005 The Regents of The University of Michigan
17 * All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are
21 * met: redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer;
23 * redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution;
26 * neither the name of the copyright holders nor the names of its
27 * contributors may be used to endorse or promote products derived from
28 * this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Authors: Kevin Lim
43 * Korey Sewell
44 */
45
46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
47#define __CPU_O3_LSQ_UNIT_IMPL_HH__
48
49#include "arch/generic/debugfaults.hh"
50#include "arch/locked_mem.hh"
51#include "base/str.hh"
52#include "config/the_isa.hh"
53#include "cpu/checker/cpu.hh"
54#include "cpu/o3/lsq.hh"
55#include "cpu/o3/lsq_unit.hh"
56#include "debug/Activity.hh"
57#include "debug/IEW.hh"
58#include "debug/LSQUnit.hh"
59#include "debug/O3PipeView.hh"
60#include "mem/packet.hh"
61#include "mem/request.hh"
62
63template<class Impl>
64LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
65 LSQUnit *lsq_ptr)
66 : Event(Default_Pri, AutoDelete),
67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
68{
69}
70
71template<class Impl>
72void
73LSQUnit<Impl>::WritebackEvent::process()
74{
75 assert(!lsqPtr->cpu->switchedOut());
76
77 lsqPtr->writeback(inst, pkt);
78
79 if (pkt->senderState)
80 delete pkt->senderState;
81
82 delete pkt->req;
83 delete pkt;
84}
85
86template<class Impl>
87const char *
88LSQUnit<Impl>::WritebackEvent::description() const
89{
90 return "Store writeback";
91}
92
93template<class Impl>
94void
95LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
96{
97 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
98 DynInstPtr inst = state->inst;
99 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
100 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
101
102 if (state->cacheBlocked) {
103 // This is the first half of a previous split load,
104 // where the 2nd half blocked, ignore this response
105 DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier "
106 "blocked split load recieved. Ignoring.\n", inst->seqNum);
107 delete state;
108 return;
109 }
110
111 // If this is a split access, wait until all packets are received.
112 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
113 return;
114 }
115
116 assert(!cpu->switchedOut());
117 if (!inst->isSquashed()) {
118 if (!state->noWB) {
119 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
120 !state->isLoad) {
121 writeback(inst, pkt);
122 } else {
123 writeback(inst, state->mainPkt);
124 }
125 }
126
127 if (inst->isStore()) {
128 completeStore(state->idx);
129 }
130 }
131
132 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
133 delete state->mainPkt->req;
134 delete state->mainPkt;
135 }
136
137 pkt->req->setAccessLatency();
138 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
139
140 delete state;
141}
142
143template <class Impl>
144LSQUnit<Impl>::LSQUnit()
145 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
146 isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false)
147{
148}
149
150template<class Impl>
151void
152LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
153 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
154 unsigned id)
155{
156 cpu = cpu_ptr;
157 iewStage = iew_ptr;
158
159 lsq = lsq_ptr;
160
161 lsqID = id;
162
163 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
164
165 // Add 1 for the sentinel entry (they are circular queues).
166 LQEntries = maxLQEntries + 1;
167 SQEntries = maxSQEntries + 1;
168
169 //Due to uint8_t index in LSQSenderState
170 assert(LQEntries <= 256);
171 assert(SQEntries <= 256);
172
173 loadQueue.resize(LQEntries);
174 storeQueue.resize(SQEntries);
175
176 depCheckShift = params->LSQDepCheckShift;
177 checkLoads = params->LSQCheckLoads;
178 cachePorts = params->cachePorts;
179 needsTSO = params->needsTSO;
180
181 resetState();
182}
183
184
185template<class Impl>
186void
187LSQUnit<Impl>::resetState()
188{
189 loads = stores = storesToWB = 0;
190
191 loadHead = loadTail = 0;
192
193 storeHead = storeWBIdx = storeTail = 0;
194
195 usedPorts = 0;
196
197 retryPkt = NULL;
198 memDepViolator = NULL;
199
200 stalled = false;
201
202 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
203}
204
205template<class Impl>
206std::string
207LSQUnit<Impl>::name() const
208{
209 if (Impl::MaxThreads == 1) {
210 return iewStage->name() + ".lsq";
211 } else {
212 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID);
213 }
214}
215
216template<class Impl>
217void
218LSQUnit<Impl>::regStats()
219{
220 lsqForwLoads
221 .name(name() + ".forwLoads")
222 .desc("Number of loads that had data forwarded from stores");
223
224 invAddrLoads
225 .name(name() + ".invAddrLoads")
226 .desc("Number of loads ignored due to an invalid address");
227
228 lsqSquashedLoads
229 .name(name() + ".squashedLoads")
230 .desc("Number of loads squashed");
231
232 lsqIgnoredResponses
233 .name(name() + ".ignoredResponses")
234 .desc("Number of memory responses ignored because the instruction is squashed");
235
236 lsqMemOrderViolation
237 .name(name() + ".memOrderViolation")
238 .desc("Number of memory ordering violations");
239
240 lsqSquashedStores
241 .name(name() + ".squashedStores")
242 .desc("Number of stores squashed");
243
244 invAddrSwpfs
245 .name(name() + ".invAddrSwpfs")
246 .desc("Number of software prefetches ignored due to an invalid address");
247
248 lsqBlockedLoads
249 .name(name() + ".blockedLoads")
250 .desc("Number of blocked loads due to partial load-store forwarding");
251
252 lsqRescheduledLoads
253 .name(name() + ".rescheduledLoads")
254 .desc("Number of loads that were rescheduled");
255
256 lsqCacheBlocked
257 .name(name() + ".cacheBlocked")
258 .desc("Number of times an access to memory failed due to the cache being blocked");
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
264{
265 dcachePort = dcache_port;
266}
267
268template<class Impl>
269void
270LSQUnit<Impl>::clearLQ()
271{
272 loadQueue.clear();
273}
274
275template<class Impl>
276void
277LSQUnit<Impl>::clearSQ()
278{
279 storeQueue.clear();
280}
281
282template<class Impl>
283void
284LSQUnit<Impl>::drainSanityCheck() const
285{
286 for (int i = 0; i < loadQueue.size(); ++i)
287 assert(!loadQueue[i]);
288
289 assert(storesToWB == 0);
290 assert(!retryPkt);
291}
292
293template<class Impl>
294void
295LSQUnit<Impl>::takeOverFrom()
296{
297 resetState();
298}
299
300template<class Impl>
301void
302LSQUnit<Impl>::resizeLQ(unsigned size)
303{
304 unsigned size_plus_sentinel = size + 1;
305 assert(size_plus_sentinel >= LQEntries);
306
307 if (size_plus_sentinel > LQEntries) {
308 while (size_plus_sentinel > loadQueue.size()) {
309 DynInstPtr dummy;
310 loadQueue.push_back(dummy);
311 LQEntries++;
312 }
313 } else {
314 LQEntries = size_plus_sentinel;
315 }
316
317 assert(LQEntries <= 256);
318}
319
320template<class Impl>
321void
322LSQUnit<Impl>::resizeSQ(unsigned size)
323{
324 unsigned size_plus_sentinel = size + 1;
325 if (size_plus_sentinel > SQEntries) {
326 while (size_plus_sentinel > storeQueue.size()) {
327 SQEntry dummy;
328 storeQueue.push_back(dummy);
329 SQEntries++;
330 }
331 } else {
332 SQEntries = size_plus_sentinel;
333 }
334
335 assert(SQEntries <= 256);
336}
337
338template <class Impl>
339void
340LSQUnit<Impl>::insert(DynInstPtr &inst)
341{
342 assert(inst->isMemRef());
343
344 assert(inst->isLoad() || inst->isStore());
345
346 if (inst->isLoad()) {
347 insertLoad(inst);
348 } else {
349 insertStore(inst);
350 }
351
352 inst->setInLSQ();
353}
354
355template <class Impl>
356void
357LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
358{
359 assert((loadTail + 1) % LQEntries != loadHead);
360 assert(loads < LQEntries);
361
362 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
363 load_inst->pcState(), loadTail, load_inst->seqNum);
364
365 load_inst->lqIdx = loadTail;
366
367 if (stores == 0) {
368 load_inst->sqIdx = -1;
369 } else {
370 load_inst->sqIdx = storeTail;
371 }
372
373 loadQueue[loadTail] = load_inst;
374
375 incrLdIdx(loadTail);
376
377 ++loads;
378}
379
380template <class Impl>
381void
382LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
383{
384 // Make sure it is not full before inserting an instruction.
385 assert((storeTail + 1) % SQEntries != storeHead);
386 assert(stores < SQEntries);
387
388 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
389 store_inst->pcState(), storeTail, store_inst->seqNum);
390
391 store_inst->sqIdx = storeTail;
392 store_inst->lqIdx = loadTail;
393
394 storeQueue[storeTail] = SQEntry(store_inst);
395
396 incrStIdx(storeTail);
397
398 ++stores;
399}
400
401template <class Impl>
402typename Impl::DynInstPtr
403LSQUnit<Impl>::getMemDepViolator()
404{
405 DynInstPtr temp = memDepViolator;
406
407 memDepViolator = NULL;
408
409 return temp;
410}
411
412template <class Impl>
413unsigned
414LSQUnit<Impl>::numFreeLoadEntries()
415{
416 //LQ has an extra dummy entry to differentiate
417 //empty/full conditions. Subtract 1 from the free entries.
418 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
419 return LQEntries - loads - 1;
420}
421
422template <class Impl>
423unsigned
424LSQUnit<Impl>::numFreeStoreEntries()
425{
426 //SQ has an extra dummy entry to differentiate
427 //empty/full conditions. Subtract 1 from the free entries.
428 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
429 return SQEntries - stores - 1;
430
431 }
432
433template <class Impl>
434void
435LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
436{
437 int load_idx = loadHead;
438 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
439
440 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
441 // address. The CPU can speculatively execute a LL operation after a pending
442 // SC operation in the pipeline and that can make the cache monitor the CPU
443 // is connected to valid while it really shouldn't be.
444 for (int x = 0; x < cpu->numContexts(); x++) {
445 ThreadContext *tc = cpu->getContext(x);
446 bool no_squash = cpu->thread[x]->noSquashFromTC;
447 cpu->thread[x]->noSquashFromTC = true;
448 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
449 cpu->thread[x]->noSquashFromTC = no_squash;
450 }
451
452 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
453
454 DynInstPtr ld_inst = loadQueue[load_idx];
455 if (ld_inst) {
456 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
457 // Check that this snoop didn't just invalidate our lock flag
458 if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
459 ld_inst->memReqFlags & Request::LLSC)
460 TheISA::handleLockedSnoopHit(ld_inst.get());
461 }
462
463 // If this is the only load in the LSQ we don't care
464 if (load_idx == loadTail)
465 return;
466
467 incrLdIdx(load_idx);
468
469 bool force_squash = false;
470
471 while (load_idx != loadTail) {
472 DynInstPtr ld_inst = loadQueue[load_idx];
473
474 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
475 incrLdIdx(load_idx);
476 continue;
477 }
478
479 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
480 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
481 ld_inst->seqNum, load_addr, invalidate_addr);
482
483 if (load_addr == invalidate_addr || force_squash) {
484 if (needsTSO) {
485 // If we have a TSO system, as all loads must be ordered with
486 // all other loads, this load as well as *all* subsequent loads
487 // need to be squashed to prevent possible load reordering.
488 force_squash = true;
489 }
490 if (ld_inst->possibleLoadViolation() || force_squash) {
491 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
492 pkt->getAddr(), ld_inst->seqNum);
493
494 // Mark the load for re-execution
495 ld_inst->fault = std::make_shared<ReExec>();
496 } else {
497 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
498 pkt->getAddr(), ld_inst->seqNum);
499
500 // Make sure that we don't lose a snoop hitting a LOCKED
501 // address since the LOCK* flags don't get updated until
502 // commit.
503 if (ld_inst->memReqFlags & Request::LLSC)
504 TheISA::handleLockedSnoopHit(ld_inst.get());
505
506 // If a older load checks this and it's true
507 // then we might have missed the snoop
508 // in which case we need to invalidate to be sure
509 ld_inst->hitExternalSnoop(true);
510 }
511 }
512 incrLdIdx(load_idx);
513 }
514 return;
515}
516
517template <class Impl>
518Fault
519LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
520{
521 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
522 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
523
524 /** @todo in theory you only need to check an instruction that has executed
525 * however, there isn't a good way in the pipeline at the moment to check
526 * all instructions that will execute before the store writes back. Thus,
527 * like the implementation that came before it, we're overly conservative.
528 */
529 while (load_idx != loadTail) {
530 DynInstPtr ld_inst = loadQueue[load_idx];
531 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
532 incrLdIdx(load_idx);
533 continue;
534 }
535
536 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
537 Addr ld_eff_addr2 =
538 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
539
540 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
541 if (inst->isLoad()) {
542 // If this load is to the same block as an external snoop
543 // invalidate that we've observed then the load needs to be
544 // squashed as it could have newer data
545 if (ld_inst->hitExternalSnoop()) {
546 if (!memDepViolator ||
547 ld_inst->seqNum < memDepViolator->seqNum) {
548 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
549 "and [sn:%lli] at address %#x\n",
550 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
551 memDepViolator = ld_inst;
552
553 ++lsqMemOrderViolation;
554
555 return std::make_shared<GenericISA::M5PanicFault>(
556 "Detected fault with inst [sn:%lli] and "
557 "[sn:%lli] at address %#x\n",
558 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
559 }
560 }
561
562 // Otherwise, mark the load has a possible load violation
563 // and if we see a snoop before it's commited, we need to squash
564 ld_inst->possibleLoadViolation(true);
565 DPRINTF(LSQUnit, "Found possible load violation at addr: %#x"
566 " between instructions [sn:%lli] and [sn:%lli]\n",
567 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
568 } else {
569 // A load/store incorrectly passed this store.
570 // Check if we already have a violator, or if it's newer
571 // squash and refetch.
572 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
573 break;
574
575 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
576 "[sn:%lli] at address %#x\n",
577 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
578 memDepViolator = ld_inst;
579
580 ++lsqMemOrderViolation;
581
582 return std::make_shared<GenericISA::M5PanicFault>(
583 "Detected fault with "
584 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
585 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
586 }
587 }
588
589 incrLdIdx(load_idx);
590 }
591 return NoFault;
592}
593
594
595
596
597template <class Impl>
598Fault
599LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
600{
601 using namespace TheISA;
602 // Execute a specific load.
603 Fault load_fault = NoFault;
604
605 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
606 inst->pcState(), inst->seqNum);
607
608 assert(!inst->isSquashed());
609
610 load_fault = inst->initiateAcc();
611
612 if (inst->isTranslationDelayed() &&
613 load_fault == NoFault)
614 return load_fault;
615
616 // If the instruction faulted or predicated false, then we need to send it
617 // along to commit without the instruction completing.
618 if (load_fault != NoFault || !inst->readPredicate()) {
619 // Send this instruction to commit, also make sure iew stage
620 // realizes there is activity. Mark it as executed unless it
621 // is a strictly ordered load that needs to hit the head of
622 // commit.
623 if (!inst->readPredicate())
624 inst->forwardOldRegs();
625 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
626 inst->seqNum,
627 (load_fault != NoFault ? "fault" : "predication"));
628 if (!(inst->hasRequest() && inst->strictlyOrdered()) ||
629 inst->isAtCommit()) {
630 inst->setExecuted();
631 }
632 iewStage->instToCommit(inst);
633 iewStage->activityThisCycle();
634 } else {
635 assert(inst->effAddrValid());
636 int load_idx = inst->lqIdx;
637 incrLdIdx(load_idx);
638
639 if (checkLoads)
640 return checkViolations(load_idx, inst);
641 }
642
643 return load_fault;
644}
645
646template <class Impl>
647Fault
648LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
649{
650 using namespace TheISA;
651 // Make sure that a store exists.
652 assert(stores != 0);
653
654 int store_idx = store_inst->sqIdx;
655
656 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
657 store_inst->pcState(), store_inst->seqNum);
658
659 assert(!store_inst->isSquashed());
660
661 // Check the recently completed loads to see if any match this store's
662 // address. If so, then we have a memory ordering violation.
663 int load_idx = store_inst->lqIdx;
664
665 Fault store_fault = store_inst->initiateAcc();
666
667 if (store_inst->isTranslationDelayed() &&
668 store_fault == NoFault)
669 return store_fault;
670
671 if (!store_inst->readPredicate())
672 store_inst->forwardOldRegs();
673
674 if (storeQueue[store_idx].size == 0) {
675 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
676 store_inst->pcState(), store_inst->seqNum);
677
678 return store_fault;
679 } else if (!store_inst->readPredicate()) {
680 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
681 store_inst->seqNum);
682 return store_fault;
683 }
684
685 assert(store_fault == NoFault);
686
687 if (store_inst->isStoreConditional()) {
688 // Store conditionals need to set themselves as able to
689 // writeback if we haven't had a fault by here.
690 storeQueue[store_idx].canWB = true;
691
692 ++storesToWB;
693 }
694
695 return checkViolations(load_idx, store_inst);
696
697}
698
699template <class Impl>
700void
701LSQUnit<Impl>::commitLoad()
702{
703 assert(loadQueue[loadHead]);
704
705 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
706 loadQueue[loadHead]->pcState());
707
708 loadQueue[loadHead] = NULL;
709
710 incrLdIdx(loadHead);
711
712 --loads;
713}
714
715template <class Impl>
716void
717LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
718{
719 assert(loads == 0 || loadQueue[loadHead]);
720
721 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
722 commitLoad();
723 }
724}
725
726template <class Impl>
727void
728LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
729{
730 assert(stores == 0 || storeQueue[storeHead].inst);
731
732 int store_idx = storeHead;
733
734 while (store_idx != storeTail) {
735 assert(storeQueue[store_idx].inst);
736 // Mark any stores that are now committed and have not yet
737 // been marked as able to write back.
738 if (!storeQueue[store_idx].canWB) {
739 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
740 break;
741 }
742 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
743 "%s [sn:%lli]\n",
744 storeQueue[store_idx].inst->pcState(),
745 storeQueue[store_idx].inst->seqNum);
746
747 storeQueue[store_idx].canWB = true;
748
749 ++storesToWB;
750 }
751
752 incrStIdx(store_idx);
753 }
754}
755
756template <class Impl>
757void
758LSQUnit<Impl>::writebackPendingStore()
759{
760 if (hasPendingPkt) {
761 assert(pendingPkt != NULL);
762
763 // If the cache is blocked, this will store the packet for retry.
764 if (sendStore(pendingPkt)) {
765 storePostSend(pendingPkt);
766 }
767 pendingPkt = NULL;
768 hasPendingPkt = false;
769 }
770}
771
772template <class Impl>
773void
774LSQUnit<Impl>::writebackStores()
775{
776 // First writeback the second packet from any split store that didn't
777 // complete last cycle because there weren't enough cache ports available.
778 if (TheISA::HasUnalignedMemAcc) {
779 writebackPendingStore();
780 }
781
782 while (storesToWB > 0 &&
783 storeWBIdx != storeTail &&
784 storeQueue[storeWBIdx].inst &&
785 storeQueue[storeWBIdx].canWB &&
786 ((!needsTSO) || (!storeInFlight)) &&
787 usedPorts < cachePorts) {
788
789 if (isStoreBlocked) {
790 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
791 " is blocked!\n");
792 break;
793 }
794
795 // Store didn't write any data so no need to write it back to
796 // memory.
797 if (storeQueue[storeWBIdx].size == 0) {
798 completeStore(storeWBIdx);
799
800 incrStIdx(storeWBIdx);
801
802 continue;
803 }
804
805 ++usedPorts;
806
807 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
808 incrStIdx(storeWBIdx);
809
810 continue;
811 }
812
813 assert(storeQueue[storeWBIdx].req);
814 assert(!storeQueue[storeWBIdx].committed);
815
816 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
817 assert(storeQueue[storeWBIdx].sreqLow);
818 assert(storeQueue[storeWBIdx].sreqHigh);
819 }
820
821 DynInstPtr inst = storeQueue[storeWBIdx].inst;
822
823 Request *req = storeQueue[storeWBIdx].req;
824 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
825 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
826
827 storeQueue[storeWBIdx].committed = true;
828
829 assert(!inst->memData);
830 inst->memData = new uint8_t[req->getSize()];
831
832 if (storeQueue[storeWBIdx].isAllZeros)
833 memset(inst->memData, 0, req->getSize());
834 else
835 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
836
837 PacketPtr data_pkt;
838 PacketPtr snd_data_pkt = NULL;
839
840 LSQSenderState *state = new LSQSenderState;
841 state->isLoad = false;
842 state->idx = storeWBIdx;
843 state->inst = inst;
844
845 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
846
847 // Build a single data packet if the store isn't split.
848 data_pkt = Packet::createWrite(req);
849 data_pkt->dataStatic(inst->memData);
850 data_pkt->senderState = state;
851 } else {
852 // Create two packets if the store is split in two.
853 data_pkt = Packet::createWrite(sreqLow);
854 snd_data_pkt = Packet::createWrite(sreqHigh);
855
856 data_pkt->dataStatic(inst->memData);
857 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
858
859 data_pkt->senderState = state;
860 snd_data_pkt->senderState = state;
861
862 state->isSplit = true;
863 state->outstanding = 2;
864
865 // Can delete the main request now.
866 delete req;
867 req = sreqLow;
868 }
869
870 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
871 "to Addr:%#x, data:%#x [sn:%lli]\n",
872 storeWBIdx, inst->pcState(),
873 req->getPaddr(), (int)*(inst->memData),
874 inst->seqNum);
875
876 // @todo: Remove this SC hack once the memory system handles it.
877 if (inst->isStoreConditional()) {
878 assert(!storeQueue[storeWBIdx].isSplit);
879 // Disable recording the result temporarily. Writing to
880 // misc regs normally updates the result, but this is not
881 // the desired behavior when handling store conditionals.
882 inst->recordResult(false);
883 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
884 inst->recordResult(true);
885
886 if (!success) {
887 // Instantly complete this store.
888 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
889 "Instantly completing it.\n",
890 inst->seqNum);
891 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
892 cpu->schedule(wb, curTick() + 1);
893 if (cpu->checker) {
894 // Make sure to set the LLSC data for verification
895 // if checker is loaded
896 inst->reqToVerify->setExtraData(0);
897 inst->completeAcc(data_pkt);
898 }
899 completeStore(storeWBIdx);
900 incrStIdx(storeWBIdx);
901 continue;
902 }
903 } else {
904 // Non-store conditionals do not need a writeback.
905 state->noWB = true;
906 }
907
908 bool split =
909 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
910
911 ThreadContext *thread = cpu->tcBase(lsqID);
912
913 if (req->isMmappedIpr()) {
914 assert(!inst->isStoreConditional());
915 TheISA::handleIprWrite(thread, data_pkt);
916 delete data_pkt;
917 if (split) {
918 assert(snd_data_pkt->req->isMmappedIpr());
919 TheISA::handleIprWrite(thread, snd_data_pkt);
920 delete snd_data_pkt;
921 delete sreqLow;
922 delete sreqHigh;
923 }
924 delete state;
925 delete req;
926 completeStore(storeWBIdx);
927 incrStIdx(storeWBIdx);
928 } else if (!sendStore(data_pkt)) {
929 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
930 "retry later\n",
931 inst->seqNum);
932
933 // Need to store the second packet, if split.
934 if (split) {
935 state->pktToSend = true;
936 state->pendingPacket = snd_data_pkt;
937 }
938 } else {
939
940 // If split, try to send the second packet too
941 if (split) {
942 assert(snd_data_pkt);
943
944 // Ensure there are enough ports to use.
945 if (usedPorts < cachePorts) {
946 ++usedPorts;
947 if (sendStore(snd_data_pkt)) {
948 storePostSend(snd_data_pkt);
949 } else {
950 DPRINTF(IEW, "D-Cache became blocked when writing"
951 " [sn:%lli] second packet, will retry later\n",
952 inst->seqNum);
953 }
954 } else {
955
956 // Store the packet for when there's free ports.
957 assert(pendingPkt == NULL);
958 pendingPkt = snd_data_pkt;
959 hasPendingPkt = true;
960 }
961 } else {
962
963 // Not a split store.
964 storePostSend(data_pkt);
965 }
966 }
967 }
968
969 // Not sure this should set it to 0.
970 usedPorts = 0;
971
972 assert(stores >= 0 && storesToWB >= 0);
973}
974
975/*template <class Impl>
976void
977LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
978{
979 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
980 mshrSeqNums.end(),
981 seqNum);
982
983 if (mshr_it != mshrSeqNums.end()) {
984 mshrSeqNums.erase(mshr_it);
985 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
986 }
987}*/
988
989template <class Impl>
990void
991LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
992{
993 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
994 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
995
996 int load_idx = loadTail;
997 decrLdIdx(load_idx);
998
999 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
1000 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
1001 "[sn:%lli]\n",
1002 loadQueue[load_idx]->pcState(),
1003 loadQueue[load_idx]->seqNum);
1004
1005 if (isStalled() && load_idx == stallingLoadIdx) {
1006 stalled = false;
1007 stallingStoreIsn = 0;
1008 stallingLoadIdx = 0;
1009 }
1010
1011 // Clear the smart pointer to make sure it is decremented.
1012 loadQueue[load_idx]->setSquashed();
1013 loadQueue[load_idx] = NULL;
1014 --loads;
1015
1016 // Inefficient!
1017 loadTail = load_idx;
1018
1019 decrLdIdx(load_idx);
1020 ++lsqSquashedLoads;
1021 }
1022
1023 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1024 memDepViolator = NULL;
1025 }
1026
1027 int store_idx = storeTail;
1028 decrStIdx(store_idx);
1029
1030 while (stores != 0 &&
1031 storeQueue[store_idx].inst->seqNum > squashed_num) {
1032 // Instructions marked as can WB are already committed.
1033 if (storeQueue[store_idx].canWB) {
1034 break;
1035 }
1036
1037 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1038 "idx:%i [sn:%lli]\n",
1039 storeQueue[store_idx].inst->pcState(),
1040 store_idx, storeQueue[store_idx].inst->seqNum);
1041
1042 // I don't think this can happen. It should have been cleared
1043 // by the stalling load.
1044 if (isStalled() &&
1045 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1046 panic("Is stalled should have been cleared by stalling load!\n");
1047 stalled = false;
1048 stallingStoreIsn = 0;
1049 }
1050
1051 // Clear the smart pointer to make sure it is decremented.
1052 storeQueue[store_idx].inst->setSquashed();
1053 storeQueue[store_idx].inst = NULL;
1054 storeQueue[store_idx].canWB = 0;
1055
1056 // Must delete request now that it wasn't handed off to
1057 // memory. This is quite ugly. @todo: Figure out the proper
1058 // place to really handle request deletes.
1059 delete storeQueue[store_idx].req;
1060 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1061 delete storeQueue[store_idx].sreqLow;
1062 delete storeQueue[store_idx].sreqHigh;
1063
1064 storeQueue[store_idx].sreqLow = NULL;
1065 storeQueue[store_idx].sreqHigh = NULL;
1066 }
1067
1068 storeQueue[store_idx].req = NULL;
1069 --stores;
1070
1071 // Inefficient!
1072 storeTail = store_idx;
1073
1074 decrStIdx(store_idx);
1075 ++lsqSquashedStores;
1076 }
1077}
1078
1079template <class Impl>
1080void
1081LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1082{
1083 if (isStalled() &&
1084 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1085 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1086 "load idx:%i\n",
1087 stallingStoreIsn, stallingLoadIdx);
1088 stalled = false;
1089 stallingStoreIsn = 0;
1090 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1091 }
1092
1093 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1094 // The store is basically completed at this time. This
1095 // only works so long as the checker doesn't try to
1096 // verify the value in memory for stores.
1097 storeQueue[storeWBIdx].inst->setCompleted();
1098
1099 if (cpu->checker) {
1100 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1101 }
1102 }
1103
1104 if (needsTSO) {
1105 storeInFlight = true;
1106 }
1107
1108 incrStIdx(storeWBIdx);
1109}
1110
1111template <class Impl>
1112void
1113LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1114{
1115 iewStage->wakeCPU();
1116
1117 // Squashed instructions do not need to complete their access.
1118 if (inst->isSquashed()) {
1119 assert(!inst->isStore());
1120 ++lsqIgnoredResponses;
1121 return;
1122 }
1123
1124 if (!inst->isExecuted()) {
1125 inst->setExecuted();
1126
1127 if (inst->fault == NoFault) {
1128 // Complete access to copy data to proper place.
1129 inst->completeAcc(pkt);
1130 } else {
1131 // If the instruction has an outstanding fault, we cannot complete
1132 // the access as this discards the current fault.
1133
1134 // If we have an outstanding fault, the fault should only be of
1135 // type ReExec.
1136 assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr);
1137
1138 DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access "
1139 "due to pending fault.\n", inst->seqNum);
1140 }
1141 }
1142
1143 // Need to insert instruction into queue to commit
1144 iewStage->instToCommit(inst);
1145
1146 iewStage->activityThisCycle();
1147
1148 // see if this load changed the PC
1149 iewStage->checkMisprediction(inst);
1150}
1151
1152template <class Impl>
1153void
1154LSQUnit<Impl>::completeStore(int store_idx)
1155{
1156 assert(storeQueue[store_idx].inst);
1157 storeQueue[store_idx].completed = true;
1158 --storesToWB;
1159 // A bit conservative because a store completion may not free up entries,
1160 // but hopefully avoids two store completions in one cycle from making
1161 // the CPU tick twice.
1162 cpu->wakeCPU();
1163 cpu->activityThisCycle();
1164
1165 if (store_idx == storeHead) {
1166 do {
1167 incrStIdx(storeHead);
1168
1169 --stores;
1170 } while (storeQueue[storeHead].completed &&
1171 storeHead != storeTail);
1172
1173 iewStage->updateLSQNextCycle = true;
1174 }
1175
1176 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1177 "idx:%i\n",
1178 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1179
1180#if TRACING_ON
1181 if (DTRACE(O3PipeView)) {
1182 storeQueue[store_idx].inst->storeTick =
1183 curTick() - storeQueue[store_idx].inst->fetchTick;
1184 }
1185#endif
1186
1187 if (isStalled() &&
1188 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1189 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1190 "load idx:%i\n",
1191 stallingStoreIsn, stallingLoadIdx);
1192 stalled = false;
1193 stallingStoreIsn = 0;
1194 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1195 }
1196
1197 storeQueue[store_idx].inst->setCompleted();
1198
1199 if (needsTSO) {
1200 storeInFlight = false;
1201 }
1202
1203 // Tell the checker we've completed this instruction. Some stores
1204 // may get reported twice to the checker, but the checker can
1205 // handle that case.
1206 if (cpu->checker) {
1207 cpu->checker->verify(storeQueue[store_idx].inst);
1208 }
1209}
1210
1211template <class Impl>
1212bool
1213LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1214{
1215 if (!dcachePort->sendTimingReq(data_pkt)) {
1216 // Need to handle becoming blocked on a store.
1217 isStoreBlocked = true;
1218 ++lsqCacheBlocked;
1219 assert(retryPkt == NULL);
1220 retryPkt = data_pkt;
1221 return false;
1222 }
1223 return true;
1224}
1225
1226template <class Impl>
1227void
1228LSQUnit<Impl>::recvRetry()
1229{
1230 if (isStoreBlocked) {
1231 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1232 assert(retryPkt != NULL);
1233
1234 LSQSenderState *state =
1235 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1236
1237 if (dcachePort->sendTimingReq(retryPkt)) {
1238 // Don't finish the store unless this is the last packet.
1239 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1240 state->pendingPacket == retryPkt) {
1241 state->pktToSend = false;
1242 storePostSend(retryPkt);
1243 }
1244 retryPkt = NULL;
1245 isStoreBlocked = false;
1246
1247 // Send any outstanding packet.
1248 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1249 assert(state->pendingPacket);
1250 if (sendStore(state->pendingPacket)) {
1251 storePostSend(state->pendingPacket);
1252 }
1253 }
1254 } else {
1255 // Still blocked!
1256 ++lsqCacheBlocked;
1257 }
1258 }
1259}
1260
1261template <class Impl>
1262inline void
1263LSQUnit<Impl>::incrStIdx(int &store_idx) const
1264{
1265 if (++store_idx >= SQEntries)
1266 store_idx = 0;
1267}
1268
1269template <class Impl>
1270inline void
1271LSQUnit<Impl>::decrStIdx(int &store_idx) const
1272{
1273 if (--store_idx < 0)
1274 store_idx += SQEntries;
1275}
1276
1277template <class Impl>
1278inline void
1279LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1280{
1281 if (++load_idx >= LQEntries)
1282 load_idx = 0;
1283}
1284
1285template <class Impl>
1286inline void
1287LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1288{
1289 if (--load_idx < 0)
1290 load_idx += LQEntries;
1291}
1292
1293template <class Impl>
1294void
1295LSQUnit<Impl>::dumpInsts() const
1296{
1297 cprintf("Load store queue: Dumping instructions.\n");
1298 cprintf("Load queue size: %i\n", loads);
1299 cprintf("Load queue: ");
1300
1301 int load_idx = loadHead;
1302
1303 while (load_idx != loadTail && loadQueue[load_idx]) {
1304 const DynInstPtr &inst(loadQueue[load_idx]);
1305 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1306
1307 incrLdIdx(load_idx);
1308 }
1309 cprintf("\n");
1310
1311 cprintf("Store queue size: %i\n", stores);
1312 cprintf("Store queue: ");
1313
1314 int store_idx = storeHead;
1315
1316 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1317 const DynInstPtr &inst(storeQueue[store_idx].inst);
1318 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1319
1320 incrStIdx(store_idx);
1321 }
1322
1323 cprintf("\n");
1324}
1325
1326#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__