Deleted Added
sdiff udiff text old ( 2674:6d4afef73a20 ) new ( 2678:1f86b91dc3bb )
full compact
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "cpu/checker/cpu.hh"
30#include "cpu/o3/lsq_unit.hh"
31#include "base/str.hh"
32#include "mem/request.hh"
33
34template<class Impl>
35LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
36 LSQUnit *lsq_ptr)
37 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
38{
39 this->setFlags(Event::AutoDelete);
40}
41
42template<class Impl>
43void
44LSQUnit<Impl>::WritebackEvent::process()
45{
46 if (!lsqPtr->isSwitchedOut()) {
47 lsqPtr->writeback(inst, pkt);
48 }
49 delete pkt;
50}
51
52template<class Impl>
53const char *
54LSQUnit<Impl>::WritebackEvent::description()
55{
56 return "Store writeback event";
57}
58
59template<class Impl>
60void
61LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
62{
63 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
64 DynInstPtr inst = state->inst;
65 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
66// DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum);
67
68 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
69
70 if (isSwitchedOut() || inst->isSquashed()) {
71 delete state;
72 delete pkt;
73 return;
74 } else {
75 if (!state->noWB) {
76 writeback(inst, pkt);
77 }
78
79 if (inst->isStore()) {
80 completeStore(state->idx);
81 }
82 }
83
84 delete state;
85 delete pkt;
86}
87
88template <class Impl>
89Tick
90LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
91{
92 panic("O3CPU model does not work with atomic mode!");
93 return curTick;
94}
95
96template <class Impl>
97void
98LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
99{
100 panic("O3CPU doesn't expect recvFunctional callback!");
101}
102
103template <class Impl>
104void
105LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
106{
107 if (status == RangeChange)
108 return;
109
110 panic("O3CPU doesn't expect recvStatusChange callback!");
111}
112
113template <class Impl>
114bool
115LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
116{
117 lsq->completeDataAccess(pkt);
118 return true;
119}
120
121template <class Impl>
122void
123LSQUnit<Impl>::DcachePort::recvRetry()
124{
125 panic("Retry unsupported for now!");
126 // we shouldn't get a retry unless we have a packet that we're
127 // waiting to transmit
128/*
129 assert(cpu->dcache_pkt != NULL);
130 assert(cpu->_status == DcacheRetry);
131 PacketPtr tmp = cpu->dcache_pkt;
132 if (sendTiming(tmp)) {
133 cpu->_status = DcacheWaitResponse;
134 cpu->dcache_pkt = NULL;
135 }
136*/
137}
138
139template <class Impl>
140LSQUnit<Impl>::LSQUnit()
141 : loads(0), stores(0), storesToWB(0), stalled(false),
142 isStoreBlocked(false), isLoadBlocked(false),
143 loadBlockedHandled(false)
144{
145}
146
147template<class Impl>
148void
149LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
150 unsigned maxSQEntries, unsigned id)
151{
152 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
153
154 switchedOut = false;
155
156 lsqID = id;
157
158 // Add 1 for the sentinel entry (they are circular queues).
159 LQEntries = maxLQEntries + 1;
160 SQEntries = maxSQEntries + 1;
161
162 loadQueue.resize(LQEntries);
163 storeQueue.resize(SQEntries);
164
165 loadHead = loadTail = 0;
166
167 storeHead = storeWBIdx = storeTail = 0;
168
169 usedPorts = 0;
170 cachePorts = params->cachePorts;
171
172 mem = params->mem;
173
174 memDepViolator = NULL;
175
176 blockedLoadSeqNum = 0;
177}
178
179template<class Impl>
180void
181LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
182{
183 cpu = cpu_ptr;
184 dcachePort = new DcachePort(cpu, this);
185
186 Port *mem_dport = mem->getPort("");
187 dcachePort->setPeer(mem_dport);
188 mem_dport->setPeer(dcachePort);
189}
190
191template<class Impl>
192std::string
193LSQUnit<Impl>::name() const
194{
195 if (Impl::MaxThreads == 1) {
196 return iewStage->name() + ".lsq";
197 } else {
198 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
199 }
200}
201
202template<class Impl>
203void
204LSQUnit<Impl>::clearLQ()
205{
206 loadQueue.clear();
207}
208
209template<class Impl>
210void
211LSQUnit<Impl>::clearSQ()
212{
213 storeQueue.clear();
214}
215
216#if 0
217template<class Impl>
218void
219LSQUnit<Impl>::setPageTable(PageTable *pt_ptr)
220{
221 DPRINTF(LSQUnit, "Setting the page table pointer.\n");
222 pTable = pt_ptr;
223}
224#endif
225
226template<class Impl>
227void
228LSQUnit<Impl>::switchOut()
229{
230 switchedOut = true;
231 for (int i = 0; i < loadQueue.size(); ++i)
232 loadQueue[i] = NULL;
233
234 assert(storesToWB == 0);
235}
236
237template<class Impl>
238void
239LSQUnit<Impl>::takeOverFrom()
240{
241 switchedOut = false;
242 loads = stores = storesToWB = 0;
243
244 loadHead = loadTail = 0;
245
246 storeHead = storeWBIdx = storeTail = 0;
247
248 usedPorts = 0;
249
250 memDepViolator = NULL;
251
252 blockedLoadSeqNum = 0;
253
254 stalled = false;
255 isLoadBlocked = false;
256 loadBlockedHandled = false;
257}
258
259template<class Impl>
260void
261LSQUnit<Impl>::resizeLQ(unsigned size)
262{
263 unsigned size_plus_sentinel = size + 1;
264 assert(size_plus_sentinel >= LQEntries);
265
266 if (size_plus_sentinel > LQEntries) {
267 while (size_plus_sentinel > loadQueue.size()) {
268 DynInstPtr dummy;
269 loadQueue.push_back(dummy);
270 LQEntries++;
271 }
272 } else {
273 LQEntries = size_plus_sentinel;
274 }
275
276}
277
278template<class Impl>
279void
280LSQUnit<Impl>::resizeSQ(unsigned size)
281{
282 unsigned size_plus_sentinel = size + 1;
283 if (size_plus_sentinel > SQEntries) {
284 while (size_plus_sentinel > storeQueue.size()) {
285 SQEntry dummy;
286 storeQueue.push_back(dummy);
287 SQEntries++;
288 }
289 } else {
290 SQEntries = size_plus_sentinel;
291 }
292}
293
294template <class Impl>
295void
296LSQUnit<Impl>::insert(DynInstPtr &inst)
297{
298 assert(inst->isMemRef());
299
300 assert(inst->isLoad() || inst->isStore());
301
302 if (inst->isLoad()) {
303 insertLoad(inst);
304 } else {
305 insertStore(inst);
306 }
307
308 inst->setInLSQ();
309}
310
311template <class Impl>
312void
313LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
314{
315 assert((loadTail + 1) % LQEntries != loadHead);
316 assert(loads < LQEntries);
317
318 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
319 load_inst->readPC(), loadTail, load_inst->seqNum);
320
321 load_inst->lqIdx = loadTail;
322
323 if (stores == 0) {
324 load_inst->sqIdx = -1;
325 } else {
326 load_inst->sqIdx = storeTail;
327 }
328
329 loadQueue[loadTail] = load_inst;
330
331 incrLdIdx(loadTail);
332
333 ++loads;
334}
335
336template <class Impl>
337void
338LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
339{
340 // Make sure it is not full before inserting an instruction.
341 assert((storeTail + 1) % SQEntries != storeHead);
342 assert(stores < SQEntries);
343
344 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
345 store_inst->readPC(), storeTail, store_inst->seqNum);
346
347 store_inst->sqIdx = storeTail;
348 store_inst->lqIdx = loadTail;
349
350 storeQueue[storeTail] = SQEntry(store_inst);
351
352 incrStIdx(storeTail);
353
354 ++stores;
355}
356
357template <class Impl>
358typename Impl::DynInstPtr
359LSQUnit<Impl>::getMemDepViolator()
360{
361 DynInstPtr temp = memDepViolator;
362
363 memDepViolator = NULL;
364
365 return temp;
366}
367
368template <class Impl>
369unsigned
370LSQUnit<Impl>::numFreeEntries()
371{
372 unsigned free_lq_entries = LQEntries - loads;
373 unsigned free_sq_entries = SQEntries - stores;
374
375 // Both the LQ and SQ entries have an extra dummy entry to differentiate
376 // empty/full conditions. Subtract 1 from the free entries.
377 if (free_lq_entries < free_sq_entries) {
378 return free_lq_entries - 1;
379 } else {
380 return free_sq_entries - 1;
381 }
382}
383
384template <class Impl>
385int
386LSQUnit<Impl>::numLoadsReady()
387{
388 int load_idx = loadHead;
389 int retval = 0;
390
391 while (load_idx != loadTail) {
392 assert(loadQueue[load_idx]);
393
394 if (loadQueue[load_idx]->readyToIssue()) {
395 ++retval;
396 }
397 }
398
399 return retval;
400}
401
402template <class Impl>
403Fault
404LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
405{
406 // Execute a specific load.
407 Fault load_fault = NoFault;
408
409 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
410 inst->readPC(),inst->seqNum);
411
412 load_fault = inst->initiateAcc();
413
414 // If the instruction faulted, then we need to send it along to commit
415 // without the instruction completing.
416 if (load_fault != NoFault) {
417 // Send this instruction to commit, also make sure iew stage
418 // realizes there is activity.
419 iewStage->instToCommit(inst);
420 iewStage->activityThisCycle();
421 }
422
423 return load_fault;
424}
425
426template <class Impl>
427Fault
428LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
429{
430 using namespace TheISA;
431 // Make sure that a store exists.
432 assert(stores != 0);
433
434 int store_idx = store_inst->sqIdx;
435
436 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
437 store_inst->readPC(), store_inst->seqNum);
438
439 // Check the recently completed loads to see if any match this store's
440 // address. If so, then we have a memory ordering violation.
441 int load_idx = store_inst->lqIdx;
442
443 Fault store_fault = store_inst->initiateAcc();
444
445 if (storeQueue[store_idx].size == 0) {
446 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
447 store_inst->readPC(),store_inst->seqNum);
448
449 return store_fault;
450 }
451
452 assert(store_fault == NoFault);
453
454 if (store_inst->isStoreConditional()) {
455 // Store conditionals need to set themselves as able to
456 // writeback if we haven't had a fault by here.
457 storeQueue[store_idx].canWB = true;
458
459 ++storesToWB;
460 }
461
462 if (!memDepViolator) {
463 while (load_idx != loadTail) {
464 // Really only need to check loads that have actually executed
465 // It's safe to check all loads because effAddr is set to
466 // InvalAddr when the dyn inst is created.
467
468 // @todo: For now this is extra conservative, detecting a
469 // violation if the addresses match assuming all accesses
470 // are quad word accesses.
471
472 // @todo: Fix this, magic number being used here
473 if ((loadQueue[load_idx]->effAddr >> 8) ==
474 (store_inst->effAddr >> 8)) {
475 // A load incorrectly passed this store. Squash and refetch.
476 // For now return a fault to show that it was unsuccessful.
477 memDepViolator = loadQueue[load_idx];
478
479 return genMachineCheckFault();
480 }
481
482 incrLdIdx(load_idx);
483 }
484
485 // If we've reached this point, there was no violation.
486 memDepViolator = NULL;
487 }
488
489 return store_fault;
490}
491
492template <class Impl>
493void
494LSQUnit<Impl>::commitLoad()
495{
496 assert(loadQueue[loadHead]);
497
498 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
499 loadQueue[loadHead]->readPC());
500
501 loadQueue[loadHead] = NULL;
502
503 incrLdIdx(loadHead);
504
505 --loads;
506}
507
508template <class Impl>
509void
510LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
511{
512 assert(loads == 0 || loadQueue[loadHead]);
513
514 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
515 commitLoad();
516 }
517}
518
519template <class Impl>
520void
521LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
522{
523 assert(stores == 0 || storeQueue[storeHead].inst);
524
525 int store_idx = storeHead;
526
527 while (store_idx != storeTail) {
528 assert(storeQueue[store_idx].inst);
529 // Mark any stores that are now committed and have not yet
530 // been marked as able to write back.
531 if (!storeQueue[store_idx].canWB) {
532 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
533 break;
534 }
535 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
536 "%#x [sn:%lli]\n",
537 storeQueue[store_idx].inst->readPC(),
538 storeQueue[store_idx].inst->seqNum);
539
540 storeQueue[store_idx].canWB = true;
541
542 ++storesToWB;
543 }
544
545 incrStIdx(store_idx);
546 }
547}
548
549template <class Impl>
550void
551LSQUnit<Impl>::writebackStores()
552{
553 while (storesToWB > 0 &&
554 storeWBIdx != storeTail &&
555 storeQueue[storeWBIdx].inst &&
556 storeQueue[storeWBIdx].canWB &&
557 usedPorts < cachePorts) {
558
559 if (isStoreBlocked) {
560 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
561 " is blocked!\n");
562 break;
563 }
564
565 // Store didn't write any data so no need to write it back to
566 // memory.
567 if (storeQueue[storeWBIdx].size == 0) {
568 completeStore(storeWBIdx);
569
570 incrStIdx(storeWBIdx);
571
572 continue;
573 }
574
575 ++usedPorts;
576
577 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
578 incrStIdx(storeWBIdx);
579
580 continue;
581 }
582
583 assert(storeQueue[storeWBIdx].req);
584 assert(!storeQueue[storeWBIdx].committed);
585
586 DynInstPtr inst = storeQueue[storeWBIdx].inst;
587
588 Request *req = storeQueue[storeWBIdx].req;
589 storeQueue[storeWBIdx].committed = true;
590
591 assert(!inst->memData);
592 inst->memData = new uint8_t[64];
593 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
594 req->getSize());
595
596 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
597 data_pkt->dataStatic(inst->memData);
598
599 LSQSenderState *state = new LSQSenderState;
600 state->isLoad = false;
601 state->idx = storeWBIdx;
602 state->inst = inst;
603 data_pkt->senderState = state;
604
605 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
606 "to Addr:%#x, data:%#x [sn:%lli]\n",
607 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
608 req->getPaddr(), *(inst->memData),
609 storeQueue[storeWBIdx].inst->seqNum);
610
611 if (!dcachePort->sendTiming(data_pkt)) {
612 // Need to handle becoming blocked on a store.
613 isStoreBlocked = true;
614 } else {
615 if (isStalled() &&
616 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
617 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
618 "load idx:%i\n",
619 stallingStoreIsn, stallingLoadIdx);
620 stalled = false;
621 stallingStoreIsn = 0;
622 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
623 }
624
625 if (!(req->getFlags() & LOCKED)) {
626 assert(!storeQueue[storeWBIdx].inst->isStoreConditional());
627 // Non-store conditionals do not need a writeback.
628 state->noWB = true;
629 }
630
631 if (data_pkt->result != Packet::Success) {
632 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
633 storeWBIdx);
634
635 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
636 storeQueue[storeWBIdx].inst->seqNum);
637
638 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
639
640 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
641
642 // @todo: Increment stat here.
643 } else {
644 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
645 storeWBIdx);
646
647 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
648 storeQueue[storeWBIdx].inst->seqNum);
649 }
650
651 incrStIdx(storeWBIdx);
652 }
653 }
654
655 // Not sure this should set it to 0.
656 usedPorts = 0;
657
658 assert(stores >= 0 && storesToWB >= 0);
659}
660
661/*template <class Impl>
662void
663LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
664{
665 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
666 mshrSeqNums.end(),
667 seqNum);
668
669 if (mshr_it != mshrSeqNums.end()) {
670 mshrSeqNums.erase(mshr_it);
671 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
672 }
673}*/
674
675template <class Impl>
676void
677LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
678{
679 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
680 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
681
682 int load_idx = loadTail;
683 decrLdIdx(load_idx);
684
685 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
686 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
687 "[sn:%lli]\n",
688 loadQueue[load_idx]->readPC(),
689 loadQueue[load_idx]->seqNum);
690
691 if (isStalled() && load_idx == stallingLoadIdx) {
692 stalled = false;
693 stallingStoreIsn = 0;
694 stallingLoadIdx = 0;
695 }
696
697 // Clear the smart pointer to make sure it is decremented.
698 loadQueue[load_idx]->squashed = true;
699 loadQueue[load_idx] = NULL;
700 --loads;
701
702 // Inefficient!
703 loadTail = load_idx;
704
705 decrLdIdx(load_idx);
706 }
707
708 if (isLoadBlocked) {
709 if (squashed_num < blockedLoadSeqNum) {
710 isLoadBlocked = false;
711 loadBlockedHandled = false;
712 blockedLoadSeqNum = 0;
713 }
714 }
715
716 int store_idx = storeTail;
717 decrStIdx(store_idx);
718
719 while (stores != 0 &&
720 storeQueue[store_idx].inst->seqNum > squashed_num) {
721 // Instructions marked as can WB are already committed.
722 if (storeQueue[store_idx].canWB) {
723 break;
724 }
725
726 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
727 "idx:%i [sn:%lli]\n",
728 storeQueue[store_idx].inst->readPC(),
729 store_idx, storeQueue[store_idx].inst->seqNum);
730
731 // I don't think this can happen. It should have been cleared
732 // by the stalling load.
733 if (isStalled() &&
734 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
735 panic("Is stalled should have been cleared by stalling load!\n");
736 stalled = false;
737 stallingStoreIsn = 0;
738 }
739
740 // Clear the smart pointer to make sure it is decremented.
741 storeQueue[store_idx].inst->squashed = true;
742 storeQueue[store_idx].inst = NULL;
743 storeQueue[store_idx].canWB = 0;
744
745 storeQueue[store_idx].req = NULL;
746 --stores;
747
748 // Inefficient!
749 storeTail = store_idx;
750
751 decrStIdx(store_idx);
752 }
753}
754
755template <class Impl>
756void
757LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
758{
759 iewStage->wakeCPU();
760
761 // Squashed instructions do not need to complete their access.
762 if (inst->isSquashed()) {
763 assert(!inst->isStore());
764 return;
765 }
766
767 if (!inst->isExecuted()) {
768 inst->setExecuted();
769
770 // Complete access to copy data to proper place.
771 inst->completeAcc(pkt);
772 }
773
774 // Need to insert instruction into queue to commit
775 iewStage->instToCommit(inst);
776
777 iewStage->activityThisCycle();
778}
779
780template <class Impl>
781void
782LSQUnit<Impl>::completeStore(int store_idx)
783{
784 assert(storeQueue[store_idx].inst);
785 storeQueue[store_idx].completed = true;
786 --storesToWB;
787 // A bit conservative because a store completion may not free up entries,
788 // but hopefully avoids two store completions in one cycle from making
789 // the CPU tick twice.
790 cpu->activityThisCycle();
791
792 if (store_idx == storeHead) {
793 do {
794 incrStIdx(storeHead);
795
796 --stores;
797 } while (storeQueue[storeHead].completed &&
798 storeHead != storeTail);
799
800 iewStage->updateLSQNextCycle = true;
801 }
802
803 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
804 "idx:%i\n",
805 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
806
807 if (isStalled() &&
808 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
809 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
810 "load idx:%i\n",
811 stallingStoreIsn, stallingLoadIdx);
812 stalled = false;
813 stallingStoreIsn = 0;
814 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
815 }
816
817 storeQueue[store_idx].inst->setCompleted();
818
819 // Tell the checker we've completed this instruction. Some stores
820 // may get reported twice to the checker, but the checker can
821 // handle that case.
822 if (cpu->checker) {
823 cpu->checker->tick(storeQueue[store_idx].inst);
824 }
825}
826
827template <class Impl>
828inline void
829LSQUnit<Impl>::incrStIdx(int &store_idx)
830{
831 if (++store_idx >= SQEntries)
832 store_idx = 0;
833}
834
835template <class Impl>
836inline void
837LSQUnit<Impl>::decrStIdx(int &store_idx)
838{
839 if (--store_idx < 0)
840 store_idx += SQEntries;
841}
842
843template <class Impl>
844inline void
845LSQUnit<Impl>::incrLdIdx(int &load_idx)
846{
847 if (++load_idx >= LQEntries)
848 load_idx = 0;
849}
850
851template <class Impl>
852inline void
853LSQUnit<Impl>::decrLdIdx(int &load_idx)
854{
855 if (--load_idx < 0)
856 load_idx += LQEntries;
857}
858
859template <class Impl>
860void
861LSQUnit<Impl>::dumpInsts()
862{
863 cprintf("Load store queue: Dumping instructions.\n");
864 cprintf("Load queue size: %i\n", loads);
865 cprintf("Load queue: ");
866
867 int load_idx = loadHead;
868
869 while (load_idx != loadTail && loadQueue[load_idx]) {
870 cprintf("%#x ", loadQueue[load_idx]->readPC());
871
872 incrLdIdx(load_idx);
873 }
874
875 cprintf("Store queue size: %i\n", stores);
876 cprintf("Store queue: ");
877
878 int store_idx = storeHead;
879
880 while (store_idx != storeTail && storeQueue[store_idx].inst) {
881 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
882
883 incrStIdx(store_idx);
884 }
885
886 cprintf("\n");
887}