Deleted Added
sdiff udiff text old ( 2674:6d4afef73a20 ) new ( 2678:1f86b91dc3bb )
full compact
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "cpu/checker/cpu.hh"
30#include "cpu/o3/lsq_unit.hh"
31#include "base/str.hh"
32#include "mem/request.hh"
33
34template<class Impl>
35void
36LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
37{
38/*
39 DPRINTF(IEW, "Load writeback event [sn:%lli]\n", inst->seqNum);
40 DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum);
41
42 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
43
44 if (iewStage->isSwitchedOut()) {
45 inst = NULL;
46 return;
47 } else if (inst->isSquashed()) {
48 iewStage->wakeCPU();
49 inst = NULL;
50 return;
51 }
52
53 iewStage->wakeCPU();
54
55 if (!inst->isExecuted()) {
56 inst->setExecuted();
57
58 // Complete access to copy data to proper place.
59 inst->completeAcc();
60 }
61
62 // Need to insert instruction into queue to commit
63 iewStage->instToCommit(inst);
64
65 iewStage->activityThisCycle();
66
67 inst = NULL;
68*/
69}
70
71template<class Impl>
72void
73LSQUnit<Impl>::completeStoreDataAccess(DynInstPtr &inst)
74{
75/*
76 DPRINTF(LSQ, "Cache miss complete for store idx:%i\n", storeIdx);
77 DPRINTF(Activity, "Activity: st writeback event idx:%i\n", storeIdx);
78
79 //lsqPtr->removeMSHR(lsqPtr->storeQueue[storeIdx].inst->seqNum);
80
81 if (lsqPtr->isSwitchedOut()) {
82 if (wbEvent)
83 delete wbEvent;
84
85 return;
86 }
87
88 lsqPtr->cpu->wakeCPU();
89
90 if (wb)
91 lsqPtr->completeDataAccess(storeIdx);
92 lsqPtr->completeStore(storeIdx);
93*/
94}
95
96template <class Impl>
97Tick
98LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
99{
100 panic("O3CPU model does not work with atomic mode!");
101 return curTick;
102}
103
104template <class Impl>
105void
106LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
107{
108 panic("O3CPU doesn't expect recvFunctional callback!");
109}
110
111template <class Impl>
112void
113LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
114{
115 if (status == RangeChange)
116 return;
117
118 panic("O3CPU doesn't expect recvStatusChange callback!");
119}
120
121template <class Impl>
122bool
123LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
124{
125 lsq->completeDataAccess(pkt);
126 return true;
127}
128
129template <class Impl>
130void
131LSQUnit<Impl>::DcachePort::recvRetry()
132{
133 panic("Retry unsupported for now!");
134 // we shouldn't get a retry unless we have a packet that we're
135 // waiting to transmit
136/*
137 assert(cpu->dcache_pkt != NULL);
138 assert(cpu->_status == DcacheRetry);
139 PacketPtr tmp = cpu->dcache_pkt;
140 if (sendTiming(tmp)) {
141 cpu->_status = DcacheWaitResponse;
142 cpu->dcache_pkt = NULL;
143 }
144*/
145}
146
147template <class Impl>
148LSQUnit<Impl>::LSQUnit()
149 : loads(0), stores(0), storesToWB(0), stalled(false), isLoadBlocked(false),
150 loadBlockedHandled(false)
151{
152}
153
154template<class Impl>
155void
156LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
157 unsigned maxSQEntries, unsigned id)
158{
159 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
160
161 switchedOut = false;
162
163 lsqID = id;
164
165 // Add 1 for the sentinel entry (they are circular queues).
166 LQEntries = maxLQEntries + 1;
167 SQEntries = maxSQEntries + 1;
168
169 loadQueue.resize(LQEntries);
170 storeQueue.resize(SQEntries);
171
172 loadHead = loadTail = 0;
173
174 storeHead = storeWBIdx = storeTail = 0;
175
176 usedPorts = 0;
177 cachePorts = params->cachePorts;
178
179 Port *mem_dport = params->mem->getPort("");
180 dcachePort->setPeer(mem_dport);
181 mem_dport->setPeer(dcachePort);
182
183 memDepViolator = NULL;
184
185 blockedLoadSeqNum = 0;
186}
187
188template<class Impl>
189void
190LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
191{
192 cpu = cpu_ptr;
193 dcachePort = new DcachePort(cpu, this);
194}
195
196template<class Impl>
197std::string
198LSQUnit<Impl>::name() const
199{
200 if (Impl::MaxThreads == 1) {
201 return iewStage->name() + ".lsq";
202 } else {
203 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
204 }
205}
206
207template<class Impl>
208void
209LSQUnit<Impl>::clearLQ()
210{
211 loadQueue.clear();
212}
213
214template<class Impl>
215void
216LSQUnit<Impl>::clearSQ()
217{
218 storeQueue.clear();
219}
220
221#if 0
222template<class Impl>
223void
224LSQUnit<Impl>::setPageTable(PageTable *pt_ptr)
225{
226 DPRINTF(LSQUnit, "Setting the page table pointer.\n");
227 pTable = pt_ptr;
228}
229#endif
230
231template<class Impl>
232void
233LSQUnit<Impl>::switchOut()
234{
235 switchedOut = true;
236 for (int i = 0; i < loadQueue.size(); ++i)
237 loadQueue[i] = NULL;
238
239 assert(storesToWB == 0);
240}
241
242template<class Impl>
243void
244LSQUnit<Impl>::takeOverFrom()
245{
246 switchedOut = false;
247 loads = stores = storesToWB = 0;
248
249 loadHead = loadTail = 0;
250
251 storeHead = storeWBIdx = storeTail = 0;
252
253 usedPorts = 0;
254
255 memDepViolator = NULL;
256
257 blockedLoadSeqNum = 0;
258
259 stalled = false;
260 isLoadBlocked = false;
261 loadBlockedHandled = false;
262}
263
264template<class Impl>
265void
266LSQUnit<Impl>::resizeLQ(unsigned size)
267{
268 unsigned size_plus_sentinel = size + 1;
269 assert(size_plus_sentinel >= LQEntries);
270
271 if (size_plus_sentinel > LQEntries) {
272 while (size_plus_sentinel > loadQueue.size()) {
273 DynInstPtr dummy;
274 loadQueue.push_back(dummy);
275 LQEntries++;
276 }
277 } else {
278 LQEntries = size_plus_sentinel;
279 }
280
281}
282
283template<class Impl>
284void
285LSQUnit<Impl>::resizeSQ(unsigned size)
286{
287 unsigned size_plus_sentinel = size + 1;
288 if (size_plus_sentinel > SQEntries) {
289 while (size_plus_sentinel > storeQueue.size()) {
290 SQEntry dummy;
291 storeQueue.push_back(dummy);
292 SQEntries++;
293 }
294 } else {
295 SQEntries = size_plus_sentinel;
296 }
297}
298
299template <class Impl>
300void
301LSQUnit<Impl>::insert(DynInstPtr &inst)
302{
303 assert(inst->isMemRef());
304
305 assert(inst->isLoad() || inst->isStore());
306
307 if (inst->isLoad()) {
308 insertLoad(inst);
309 } else {
310 insertStore(inst);
311 }
312
313 inst->setInLSQ();
314}
315
316template <class Impl>
317void
318LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
319{
320 assert((loadTail + 1) % LQEntries != loadHead);
321 assert(loads < LQEntries);
322
323 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
324 load_inst->readPC(), loadTail, load_inst->seqNum);
325
326 load_inst->lqIdx = loadTail;
327
328 if (stores == 0) {
329 load_inst->sqIdx = -1;
330 } else {
331 load_inst->sqIdx = storeTail;
332 }
333
334 loadQueue[loadTail] = load_inst;
335
336 incrLdIdx(loadTail);
337
338 ++loads;
339}
340
341template <class Impl>
342void
343LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
344{
345 // Make sure it is not full before inserting an instruction.
346 assert((storeTail + 1) % SQEntries != storeHead);
347 assert(stores < SQEntries);
348
349 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
350 store_inst->readPC(), storeTail, store_inst->seqNum);
351
352 store_inst->sqIdx = storeTail;
353 store_inst->lqIdx = loadTail;
354
355 storeQueue[storeTail] = SQEntry(store_inst);
356
357 incrStIdx(storeTail);
358
359 ++stores;
360}
361
362template <class Impl>
363typename Impl::DynInstPtr
364LSQUnit<Impl>::getMemDepViolator()
365{
366 DynInstPtr temp = memDepViolator;
367
368 memDepViolator = NULL;
369
370 return temp;
371}
372
373template <class Impl>
374unsigned
375LSQUnit<Impl>::numFreeEntries()
376{
377 unsigned free_lq_entries = LQEntries - loads;
378 unsigned free_sq_entries = SQEntries - stores;
379
380 // Both the LQ and SQ entries have an extra dummy entry to differentiate
381 // empty/full conditions. Subtract 1 from the free entries.
382 if (free_lq_entries < free_sq_entries) {
383 return free_lq_entries - 1;
384 } else {
385 return free_sq_entries - 1;
386 }
387}
388
389template <class Impl>
390int
391LSQUnit<Impl>::numLoadsReady()
392{
393 int load_idx = loadHead;
394 int retval = 0;
395
396 while (load_idx != loadTail) {
397 assert(loadQueue[load_idx]);
398
399 if (loadQueue[load_idx]->readyToIssue()) {
400 ++retval;
401 }
402 }
403
404 return retval;
405}
406
407template <class Impl>
408Fault
409LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
410{
411 // Execute a specific load.
412 Fault load_fault = NoFault;
413
414 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
415 inst->readPC(),inst->seqNum);
416
417 load_fault = inst->initiateAcc();
418
419 // If the instruction faulted, then we need to send it along to commit
420 // without the instruction completing.
421 if (load_fault != NoFault) {
422 // Send this instruction to commit, also make sure iew stage
423 // realizes there is activity.
424 iewStage->instToCommit(inst);
425 iewStage->activityThisCycle();
426 }
427
428 return load_fault;
429}
430
431template <class Impl>
432Fault
433LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
434{
435 using namespace TheISA;
436 // Make sure that a store exists.
437 assert(stores != 0);
438
439 int store_idx = store_inst->sqIdx;
440
441 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
442 store_inst->readPC(), store_inst->seqNum);
443
444 // Check the recently completed loads to see if any match this store's
445 // address. If so, then we have a memory ordering violation.
446 int load_idx = store_inst->lqIdx;
447
448 Fault store_fault = store_inst->initiateAcc();
449// Fault store_fault = store_inst->execute();
450
451 if (storeQueue[store_idx].size == 0) {
452 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
453 store_inst->readPC(),store_inst->seqNum);
454
455 return store_fault;
456 }
457
458 assert(store_fault == NoFault);
459
460 if (store_inst->isStoreConditional()) {
461 // Store conditionals need to set themselves as able to
462 // writeback if we haven't had a fault by here.
463 storeQueue[store_idx].canWB = true;
464
465 ++storesToWB;
466 }
467
468 if (!memDepViolator) {
469 while (load_idx != loadTail) {
470 // Really only need to check loads that have actually executed
471 // It's safe to check all loads because effAddr is set to
472 // InvalAddr when the dyn inst is created.
473
474 // @todo: For now this is extra conservative, detecting a
475 // violation if the addresses match assuming all accesses
476 // are quad word accesses.
477
478 // @todo: Fix this, magic number being used here
479 if ((loadQueue[load_idx]->effAddr >> 8) ==
480 (store_inst->effAddr >> 8)) {
481 // A load incorrectly passed this store. Squash and refetch.
482 // For now return a fault to show that it was unsuccessful.
483 memDepViolator = loadQueue[load_idx];
484
485 return genMachineCheckFault();
486 }
487
488 incrLdIdx(load_idx);
489 }
490
491 // If we've reached this point, there was no violation.
492 memDepViolator = NULL;
493 }
494
495 return store_fault;
496}
497
498template <class Impl>
499void
500LSQUnit<Impl>::commitLoad()
501{
502 assert(loadQueue[loadHead]);
503
504 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
505 loadQueue[loadHead]->readPC());
506
507 loadQueue[loadHead] = NULL;
508
509 incrLdIdx(loadHead);
510
511 --loads;
512}
513
514template <class Impl>
515void
516LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
517{
518 assert(loads == 0 || loadQueue[loadHead]);
519
520 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
521 commitLoad();
522 }
523}
524
525template <class Impl>
526void
527LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
528{
529 assert(stores == 0 || storeQueue[storeHead].inst);
530
531 int store_idx = storeHead;
532
533 while (store_idx != storeTail) {
534 assert(storeQueue[store_idx].inst);
535 // Mark any stores that are now committed and have not yet
536 // been marked as able to write back.
537 if (!storeQueue[store_idx].canWB) {
538 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
539 break;
540 }
541 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
542 "%#x [sn:%lli]\n",
543 storeQueue[store_idx].inst->readPC(),
544 storeQueue[store_idx].inst->seqNum);
545
546 storeQueue[store_idx].canWB = true;
547
548 ++storesToWB;
549 }
550
551 incrStIdx(store_idx);
552 }
553}
554
555template <class Impl>
556void
557LSQUnit<Impl>::writebackStores()
558{
559 while (storesToWB > 0 &&
560 storeWBIdx != storeTail &&
561 storeQueue[storeWBIdx].inst &&
562 storeQueue[storeWBIdx].canWB &&
563 usedPorts < cachePorts) {
564
565 // Store didn't write any data so no need to write it back to
566 // memory.
567 if (storeQueue[storeWBIdx].size == 0) {
568 completeStore(storeWBIdx);
569
570 incrStIdx(storeWBIdx);
571
572 continue;
573 }
574/*
575 if (dcacheInterface && dcacheInterface->isBlocked()) {
576 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
577 " is blocked!\n");
578 break;
579 }
580*/
581 ++usedPorts;
582
583 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
584 incrStIdx(storeWBIdx);
585
586 continue;
587 }
588
589 assert(storeQueue[storeWBIdx].req);
590 assert(!storeQueue[storeWBIdx].committed);
591
592 DynInstPtr inst = storeQueue[storeWBIdx].inst;
593
594 Request *req = storeQueue[storeWBIdx].req;
595 storeQueue[storeWBIdx].committed = true;
596
597 assert(!inst->memData);
598 inst->memData = new uint8_t[64];
599 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data, req->getSize());
600
601 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
602 data_pkt->dataStatic(inst->memData);
603
604 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
605 "to Addr:%#x, data:%#x [sn:%lli]\n",
606 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
607 req->getPaddr(), *(inst->memData),
608 storeQueue[storeWBIdx].inst->seqNum);
609
610 if (!dcachePort->sendTiming(data_pkt)) {
611 // Need to handle becoming blocked on a store.
612 } else {
613 /*
614 StoreCompletionEvent *store_event = new
615 StoreCompletionEvent(storeWBIdx, NULL, this);
616 */
617 if (isStalled() &&
618 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
619 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
620 "load idx:%i\n",
621 stallingStoreIsn, stallingLoadIdx);
622 stalled = false;
623 stallingStoreIsn = 0;
624 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
625 }
626/*
627 typename LdWritebackEvent *wb = NULL;
628 if (req->flags & LOCKED) {
629 // Stx_C should not generate a system port transaction
630 // if it misses in the cache, but that might be hard
631 // to accomplish without explicit cache support.
632 wb = new typename
633 LdWritebackEvent(storeQueue[storeWBIdx].inst,
634 iewStage);
635 store_event->wbEvent = wb;
636 }
637*/
638 if (data_pkt->result != Packet::Success) {
639 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
640 storeWBIdx);
641
642 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
643 storeQueue[storeWBIdx].inst->seqNum);
644
645 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
646
647 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
648
649 // @todo: Increment stat here.
650 } else {
651 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
652 storeWBIdx);
653
654 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
655 storeQueue[storeWBIdx].inst->seqNum);
656 }
657
658 incrStIdx(storeWBIdx);
659 }
660 }
661
662 // Not sure this should set it to 0.
663 usedPorts = 0;
664
665 assert(stores >= 0 && storesToWB >= 0);
666}
667
668/*template <class Impl>
669void
670LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
671{
672 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
673 mshrSeqNums.end(),
674 seqNum);
675
676 if (mshr_it != mshrSeqNums.end()) {
677 mshrSeqNums.erase(mshr_it);
678 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
679 }
680}*/
681
682template <class Impl>
683void
684LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
685{
686 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
687 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
688
689 int load_idx = loadTail;
690 decrLdIdx(load_idx);
691
692 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
693 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
694 "[sn:%lli]\n",
695 loadQueue[load_idx]->readPC(),
696 loadQueue[load_idx]->seqNum);
697
698 if (isStalled() && load_idx == stallingLoadIdx) {
699 stalled = false;
700 stallingStoreIsn = 0;
701 stallingLoadIdx = 0;
702 }
703
704 // Clear the smart pointer to make sure it is decremented.
705 loadQueue[load_idx]->squashed = true;
706 loadQueue[load_idx] = NULL;
707 --loads;
708
709 // Inefficient!
710 loadTail = load_idx;
711
712 decrLdIdx(load_idx);
713 }
714
715 if (isLoadBlocked) {
716 if (squashed_num < blockedLoadSeqNum) {
717 isLoadBlocked = false;
718 loadBlockedHandled = false;
719 blockedLoadSeqNum = 0;
720 }
721 }
722
723 int store_idx = storeTail;
724 decrStIdx(store_idx);
725
726 while (stores != 0 &&
727 storeQueue[store_idx].inst->seqNum > squashed_num) {
728 // Instructions marked as can WB are already committed.
729 if (storeQueue[store_idx].canWB) {
730 break;
731 }
732
733 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
734 "idx:%i [sn:%lli]\n",
735 storeQueue[store_idx].inst->readPC(),
736 store_idx, storeQueue[store_idx].inst->seqNum);
737
738 // I don't think this can happen. It should have been cleared
739 // by the stalling load.
740 if (isStalled() &&
741 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
742 panic("Is stalled should have been cleared by stalling load!\n");
743 stalled = false;
744 stallingStoreIsn = 0;
745 }
746
747 // Clear the smart pointer to make sure it is decremented.
748 storeQueue[store_idx].inst->squashed = true;
749 storeQueue[store_idx].inst = NULL;
750 storeQueue[store_idx].canWB = 0;
751
752 storeQueue[store_idx].req = NULL;
753 --stores;
754
755 // Inefficient!
756 storeTail = store_idx;
757
758 decrStIdx(store_idx);
759 }
760}
761
762template <class Impl>
763void
764LSQUnit<Impl>::completeStore(int store_idx)
765{
766 assert(storeQueue[store_idx].inst);
767 storeQueue[store_idx].completed = true;
768 --storesToWB;
769 // A bit conservative because a store completion may not free up entries,
770 // but hopefully avoids two store completions in one cycle from making
771 // the CPU tick twice.
772 cpu->activityThisCycle();
773
774 if (store_idx == storeHead) {
775 do {
776 incrStIdx(storeHead);
777
778 --stores;
779 } while (storeQueue[storeHead].completed &&
780 storeHead != storeTail);
781
782 iewStage->updateLSQNextCycle = true;
783 }
784
785 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
786 "idx:%i\n",
787 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
788
789 if (isStalled() &&
790 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
791 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
792 "load idx:%i\n",
793 stallingStoreIsn, stallingLoadIdx);
794 stalled = false;
795 stallingStoreIsn = 0;
796 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
797 }
798
799 storeQueue[store_idx].inst->setCompleted();
800
801 // Tell the checker we've completed this instruction. Some stores
802 // may get reported twice to the checker, but the checker can
803 // handle that case.
804 if (cpu->checker) {
805 cpu->checker->tick(storeQueue[store_idx].inst);
806 }
807}
808
809template <class Impl>
810inline void
811LSQUnit<Impl>::incrStIdx(int &store_idx)
812{
813 if (++store_idx >= SQEntries)
814 store_idx = 0;
815}
816
817template <class Impl>
818inline void
819LSQUnit<Impl>::decrStIdx(int &store_idx)
820{
821 if (--store_idx < 0)
822 store_idx += SQEntries;
823}
824
825template <class Impl>
826inline void
827LSQUnit<Impl>::incrLdIdx(int &load_idx)
828{
829 if (++load_idx >= LQEntries)
830 load_idx = 0;
831}
832
833template <class Impl>
834inline void
835LSQUnit<Impl>::decrLdIdx(int &load_idx)
836{
837 if (--load_idx < 0)
838 load_idx += LQEntries;
839}
840
841template <class Impl>
842void
843LSQUnit<Impl>::dumpInsts()
844{
845 cprintf("Load store queue: Dumping instructions.\n");
846 cprintf("Load queue size: %i\n", loads);
847 cprintf("Load queue: ");
848
849 int load_idx = loadHead;
850
851 while (load_idx != loadTail && loadQueue[load_idx]) {
852 cprintf("%#x ", loadQueue[load_idx]->readPC());
853
854 incrLdIdx(load_idx);
855 }
856
857 cprintf("Store queue size: %i\n", stores);
858 cprintf("Store queue: ");
859
860 int store_idx = storeHead;
861
862 while (store_idx != storeTail && storeQueue[store_idx].inst) {
863 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
864
865 incrStIdx(store_idx);
866 }
867
868 cprintf("\n");
869}