lsq_unit_impl.hh (2790:2f8e9762bee9) lsq_unit_impl.hh (2820:7fde0b0f8f78)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
34#include "cpu/o3/lsq_unit.hh"
35#include "base/str.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38
39#if USE_CHECKER
40#include "cpu/checker/cpu.hh"
41#endif
42
43template<class Impl>
44LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
45 LSQUnit *lsq_ptr)
46 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template<class Impl>
52void
53LSQUnit<Impl>::WritebackEvent::process()
54{
55 if (!lsqPtr->isSwitchedOut()) {
56 lsqPtr->writeback(inst, pkt);
57 }
58 delete pkt;
59}
60
61template<class Impl>
62const char *
63LSQUnit<Impl>::WritebackEvent::description()
64{
65 return "Store writeback event";
66}
67
68template<class Impl>
69void
70LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
71{
72 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
73 DynInstPtr inst = state->inst;
74 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
75 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
76
77 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
78
79 if (isSwitchedOut() || inst->isSquashed()) {
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
34#include "cpu/o3/lsq_unit.hh"
35#include "base/str.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38
39#if USE_CHECKER
40#include "cpu/checker/cpu.hh"
41#endif
42
43template<class Impl>
44LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
45 LSQUnit *lsq_ptr)
46 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template<class Impl>
52void
53LSQUnit<Impl>::WritebackEvent::process()
54{
55 if (!lsqPtr->isSwitchedOut()) {
56 lsqPtr->writeback(inst, pkt);
57 }
58 delete pkt;
59}
60
61template<class Impl>
62const char *
63LSQUnit<Impl>::WritebackEvent::description()
64{
65 return "Store writeback event";
66}
67
68template<class Impl>
69void
70LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
71{
72 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
73 DynInstPtr inst = state->inst;
74 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
75 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
76
77 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
78
79 if (isSwitchedOut() || inst->isSquashed()) {
80 iewStage->decrWb(inst->seqNum);
80 delete state;
81 delete pkt;
82 return;
83 } else {
84 if (!state->noWB) {
85 writeback(inst, pkt);
86 }
87
88 if (inst->isStore()) {
89 completeStore(state->idx);
90 }
91 }
92
93 delete state;
94 delete pkt;
95}
96
97template <class Impl>
98Tick
99LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
100{
101 panic("O3CPU model does not work with atomic mode!");
102 return curTick;
103}
104
105template <class Impl>
106void
107LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
108{
109 panic("O3CPU doesn't expect recvFunctional callback!");
110}
111
112template <class Impl>
113void
114LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
115{
116 if (status == RangeChange)
117 return;
118
119 panic("O3CPU doesn't expect recvStatusChange callback!");
120}
121
122template <class Impl>
123bool
124LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
125{
126 lsq->completeDataAccess(pkt);
127 return true;
128}
129
130template <class Impl>
131void
132LSQUnit<Impl>::DcachePort::recvRetry()
133{
134 lsq->recvRetry();
135}
136
137template <class Impl>
138LSQUnit<Impl>::LSQUnit()
139 : loads(0), stores(0), storesToWB(0), stalled(false),
140 isStoreBlocked(false), isLoadBlocked(false),
141 loadBlockedHandled(false)
142{
143}
144
145template<class Impl>
146void
147LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
148 unsigned maxSQEntries, unsigned id)
149{
150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
151
152 switchedOut = false;
153
154 lsqID = id;
155
156 // Add 1 for the sentinel entry (they are circular queues).
157 LQEntries = maxLQEntries + 1;
158 SQEntries = maxSQEntries + 1;
159
160 loadQueue.resize(LQEntries);
161 storeQueue.resize(SQEntries);
162
163 loadHead = loadTail = 0;
164
165 storeHead = storeWBIdx = storeTail = 0;
166
167 usedPorts = 0;
168 cachePorts = params->cachePorts;
169
170 mem = params->mem;
171
172 memDepViolator = NULL;
173
174 blockedLoadSeqNum = 0;
175}
176
177template<class Impl>
178void
179LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
180{
181 cpu = cpu_ptr;
182 dcachePort = new DcachePort(cpu, this);
183
184 Port *mem_dport = mem->getPort("");
185 dcachePort->setPeer(mem_dport);
186 mem_dport->setPeer(dcachePort);
187
188#if USE_CHECKER
189 if (cpu->checker) {
190 cpu->checker->setDcachePort(dcachePort);
191 }
192#endif
193}
194
195template<class Impl>
196std::string
197LSQUnit<Impl>::name() const
198{
199 if (Impl::MaxThreads == 1) {
200 return iewStage->name() + ".lsq";
201 } else {
202 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
203 }
204}
205
206template<class Impl>
207void
208LSQUnit<Impl>::regStats()
209{
210 lsqForwLoads
211 .name(name() + ".forwLoads")
212 .desc("Number of loads that had data forwarded from stores");
213
214 invAddrLoads
215 .name(name() + ".invAddrLoads")
216 .desc("Number of loads ignored due to an invalid address");
217
218 lsqSquashedLoads
219 .name(name() + ".squashedLoads")
220 .desc("Number of loads squashed");
221
222 lsqIgnoredResponses
223 .name(name() + ".ignoredResponses")
224 .desc("Number of memory responses ignored because the instruction is squashed");
225
226 lsqSquashedStores
227 .name(name() + ".squashedStores")
228 .desc("Number of stores squashed");
229
230 invAddrSwpfs
231 .name(name() + ".invAddrSwpfs")
232 .desc("Number of software prefetches ignored due to an invalid address");
233
234 lsqBlockedLoads
235 .name(name() + ".blockedLoads")
236 .desc("Number of blocked loads due to partial load-store forwarding");
237
238 lsqRescheduledLoads
239 .name(name() + ".rescheduledLoads")
240 .desc("Number of loads that were rescheduled");
241
242 lsqCacheBlocked
243 .name(name() + ".cacheBlocked")
244 .desc("Number of times an access to memory failed due to the cache being blocked");
245}
246
247template<class Impl>
248void
249LSQUnit<Impl>::clearLQ()
250{
251 loadQueue.clear();
252}
253
254template<class Impl>
255void
256LSQUnit<Impl>::clearSQ()
257{
258 storeQueue.clear();
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::switchOut()
264{
265 switchedOut = true;
266 for (int i = 0; i < loadQueue.size(); ++i)
267 loadQueue[i] = NULL;
268
269 assert(storesToWB == 0);
270}
271
272template<class Impl>
273void
274LSQUnit<Impl>::takeOverFrom()
275{
276 switchedOut = false;
277 loads = stores = storesToWB = 0;
278
279 loadHead = loadTail = 0;
280
281 storeHead = storeWBIdx = storeTail = 0;
282
283 usedPorts = 0;
284
285 memDepViolator = NULL;
286
287 blockedLoadSeqNum = 0;
288
289 stalled = false;
290 isLoadBlocked = false;
291 loadBlockedHandled = false;
292}
293
294template<class Impl>
295void
296LSQUnit<Impl>::resizeLQ(unsigned size)
297{
298 unsigned size_plus_sentinel = size + 1;
299 assert(size_plus_sentinel >= LQEntries);
300
301 if (size_plus_sentinel > LQEntries) {
302 while (size_plus_sentinel > loadQueue.size()) {
303 DynInstPtr dummy;
304 loadQueue.push_back(dummy);
305 LQEntries++;
306 }
307 } else {
308 LQEntries = size_plus_sentinel;
309 }
310
311}
312
313template<class Impl>
314void
315LSQUnit<Impl>::resizeSQ(unsigned size)
316{
317 unsigned size_plus_sentinel = size + 1;
318 if (size_plus_sentinel > SQEntries) {
319 while (size_plus_sentinel > storeQueue.size()) {
320 SQEntry dummy;
321 storeQueue.push_back(dummy);
322 SQEntries++;
323 }
324 } else {
325 SQEntries = size_plus_sentinel;
326 }
327}
328
329template <class Impl>
330void
331LSQUnit<Impl>::insert(DynInstPtr &inst)
332{
333 assert(inst->isMemRef());
334
335 assert(inst->isLoad() || inst->isStore());
336
337 if (inst->isLoad()) {
338 insertLoad(inst);
339 } else {
340 insertStore(inst);
341 }
342
343 inst->setInLSQ();
344}
345
346template <class Impl>
347void
348LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
349{
350 assert((loadTail + 1) % LQEntries != loadHead);
351 assert(loads < LQEntries);
352
353 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
354 load_inst->readPC(), loadTail, load_inst->seqNum);
355
356 load_inst->lqIdx = loadTail;
357
358 if (stores == 0) {
359 load_inst->sqIdx = -1;
360 } else {
361 load_inst->sqIdx = storeTail;
362 }
363
364 loadQueue[loadTail] = load_inst;
365
366 incrLdIdx(loadTail);
367
368 ++loads;
369}
370
371template <class Impl>
372void
373LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
374{
375 // Make sure it is not full before inserting an instruction.
376 assert((storeTail + 1) % SQEntries != storeHead);
377 assert(stores < SQEntries);
378
379 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
380 store_inst->readPC(), storeTail, store_inst->seqNum);
381
382 store_inst->sqIdx = storeTail;
383 store_inst->lqIdx = loadTail;
384
385 storeQueue[storeTail] = SQEntry(store_inst);
386
387 incrStIdx(storeTail);
388
389 ++stores;
390}
391
392template <class Impl>
393typename Impl::DynInstPtr
394LSQUnit<Impl>::getMemDepViolator()
395{
396 DynInstPtr temp = memDepViolator;
397
398 memDepViolator = NULL;
399
400 return temp;
401}
402
403template <class Impl>
404unsigned
405LSQUnit<Impl>::numFreeEntries()
406{
407 unsigned free_lq_entries = LQEntries - loads;
408 unsigned free_sq_entries = SQEntries - stores;
409
410 // Both the LQ and SQ entries have an extra dummy entry to differentiate
411 // empty/full conditions. Subtract 1 from the free entries.
412 if (free_lq_entries < free_sq_entries) {
413 return free_lq_entries - 1;
414 } else {
415 return free_sq_entries - 1;
416 }
417}
418
419template <class Impl>
420int
421LSQUnit<Impl>::numLoadsReady()
422{
423 int load_idx = loadHead;
424 int retval = 0;
425
426 while (load_idx != loadTail) {
427 assert(loadQueue[load_idx]);
428
429 if (loadQueue[load_idx]->readyToIssue()) {
430 ++retval;
431 }
432 }
433
434 return retval;
435}
436
437template <class Impl>
438Fault
439LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
440{
441 // Execute a specific load.
442 Fault load_fault = NoFault;
443
444 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
445 inst->readPC(),inst->seqNum);
446
447 load_fault = inst->initiateAcc();
448
449 // If the instruction faulted, then we need to send it along to commit
450 // without the instruction completing.
451 if (load_fault != NoFault) {
452 // Send this instruction to commit, also make sure iew stage
453 // realizes there is activity.
454 iewStage->instToCommit(inst);
455 iewStage->activityThisCycle();
456 }
457
458 return load_fault;
459}
460
461template <class Impl>
462Fault
463LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
464{
465 using namespace TheISA;
466 // Make sure that a store exists.
467 assert(stores != 0);
468
469 int store_idx = store_inst->sqIdx;
470
471 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
472 store_inst->readPC(), store_inst->seqNum);
473
474 // Check the recently completed loads to see if any match this store's
475 // address. If so, then we have a memory ordering violation.
476 int load_idx = store_inst->lqIdx;
477
478 Fault store_fault = store_inst->initiateAcc();
479
480 if (storeQueue[store_idx].size == 0) {
481 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
482 store_inst->readPC(),store_inst->seqNum);
483
484 return store_fault;
485 }
486
487 assert(store_fault == NoFault);
488
489 if (store_inst->isStoreConditional()) {
490 // Store conditionals need to set themselves as able to
491 // writeback if we haven't had a fault by here.
492 storeQueue[store_idx].canWB = true;
493
494 ++storesToWB;
495 }
496
497 if (!memDepViolator) {
498 while (load_idx != loadTail) {
499 // Really only need to check loads that have actually executed
500 // It's safe to check all loads because effAddr is set to
501 // InvalAddr when the dyn inst is created.
502
503 // @todo: For now this is extra conservative, detecting a
504 // violation if the addresses match assuming all accesses
505 // are quad word accesses.
506
507 // @todo: Fix this, magic number being used here
508 if ((loadQueue[load_idx]->effAddr >> 8) ==
509 (store_inst->effAddr >> 8)) {
510 // A load incorrectly passed this store. Squash and refetch.
511 // For now return a fault to show that it was unsuccessful.
512 memDepViolator = loadQueue[load_idx];
513
514 return genMachineCheckFault();
515 }
516
517 incrLdIdx(load_idx);
518 }
519
520 // If we've reached this point, there was no violation.
521 memDepViolator = NULL;
522 }
523
524 return store_fault;
525}
526
527template <class Impl>
528void
529LSQUnit<Impl>::commitLoad()
530{
531 assert(loadQueue[loadHead]);
532
533 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
534 loadQueue[loadHead]->readPC());
535
536 loadQueue[loadHead] = NULL;
537
538 incrLdIdx(loadHead);
539
540 --loads;
541}
542
543template <class Impl>
544void
545LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
546{
547 assert(loads == 0 || loadQueue[loadHead]);
548
549 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
550 commitLoad();
551 }
552}
553
554template <class Impl>
555void
556LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
557{
558 assert(stores == 0 || storeQueue[storeHead].inst);
559
560 int store_idx = storeHead;
561
562 while (store_idx != storeTail) {
563 assert(storeQueue[store_idx].inst);
564 // Mark any stores that are now committed and have not yet
565 // been marked as able to write back.
566 if (!storeQueue[store_idx].canWB) {
567 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
568 break;
569 }
570 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
571 "%#x [sn:%lli]\n",
572 storeQueue[store_idx].inst->readPC(),
573 storeQueue[store_idx].inst->seqNum);
574
575 storeQueue[store_idx].canWB = true;
576
577 ++storesToWB;
578 }
579
580 incrStIdx(store_idx);
581 }
582}
583
584template <class Impl>
585void
586LSQUnit<Impl>::writebackStores()
587{
588 while (storesToWB > 0 &&
589 storeWBIdx != storeTail &&
590 storeQueue[storeWBIdx].inst &&
591 storeQueue[storeWBIdx].canWB &&
592 usedPorts < cachePorts) {
593
594 if (isStoreBlocked) {
595 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
596 " is blocked!\n");
597 break;
598 }
599
600 // Store didn't write any data so no need to write it back to
601 // memory.
602 if (storeQueue[storeWBIdx].size == 0) {
603 completeStore(storeWBIdx);
604
605 incrStIdx(storeWBIdx);
606
607 continue;
608 }
609
610 ++usedPorts;
611
612 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
613 incrStIdx(storeWBIdx);
614
615 continue;
616 }
617
618 assert(storeQueue[storeWBIdx].req);
619 assert(!storeQueue[storeWBIdx].committed);
620
621 DynInstPtr inst = storeQueue[storeWBIdx].inst;
622
623 Request *req = storeQueue[storeWBIdx].req;
624 storeQueue[storeWBIdx].committed = true;
625
626 assert(!inst->memData);
627 inst->memData = new uint8_t[64];
628 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
629 req->getSize());
630
631 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
632 data_pkt->dataStatic(inst->memData);
633
634 LSQSenderState *state = new LSQSenderState;
635 state->isLoad = false;
636 state->idx = storeWBIdx;
637 state->inst = inst;
638 data_pkt->senderState = state;
639
640 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
641 "to Addr:%#x, data:%#x [sn:%lli]\n",
642 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
643 req->getPaddr(), *(inst->memData),
644 storeQueue[storeWBIdx].inst->seqNum);
645
646 // @todo: Remove this SC hack once the memory system handles it.
647 if (req->getFlags() & LOCKED) {
648 if (req->getFlags() & UNCACHEABLE) {
649 req->setScResult(2);
650 } else {
651 if (cpu->lockFlag) {
652 req->setScResult(1);
653 } else {
654 req->setScResult(0);
655 // Hack: Instantly complete this store.
656 completeDataAccess(data_pkt);
657 incrStIdx(storeWBIdx);
658 continue;
659 }
660 }
661 } else {
662 // Non-store conditionals do not need a writeback.
663 state->noWB = true;
664 }
665
666 if (!dcachePort->sendTiming(data_pkt)) {
667 // Need to handle becoming blocked on a store.
668 isStoreBlocked = true;
669 ++lsqCacheBlocked;
670 assert(retryPkt == NULL);
671 retryPkt = data_pkt;
672 } else {
673 storePostSend(data_pkt);
674 }
675 }
676
677 // Not sure this should set it to 0.
678 usedPorts = 0;
679
680 assert(stores >= 0 && storesToWB >= 0);
681}
682
683/*template <class Impl>
684void
685LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
686{
687 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
688 mshrSeqNums.end(),
689 seqNum);
690
691 if (mshr_it != mshrSeqNums.end()) {
692 mshrSeqNums.erase(mshr_it);
693 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
694 }
695}*/
696
697template <class Impl>
698void
699LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
700{
701 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
702 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
703
704 int load_idx = loadTail;
705 decrLdIdx(load_idx);
706
707 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
708 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
709 "[sn:%lli]\n",
710 loadQueue[load_idx]->readPC(),
711 loadQueue[load_idx]->seqNum);
712
713 if (isStalled() && load_idx == stallingLoadIdx) {
714 stalled = false;
715 stallingStoreIsn = 0;
716 stallingLoadIdx = 0;
717 }
718
719 // Clear the smart pointer to make sure it is decremented.
720 loadQueue[load_idx]->setSquashed();
721 loadQueue[load_idx] = NULL;
722 --loads;
723
724 // Inefficient!
725 loadTail = load_idx;
726
727 decrLdIdx(load_idx);
728 ++lsqSquashedLoads;
729 }
730
731 if (isLoadBlocked) {
732 if (squashed_num < blockedLoadSeqNum) {
733 isLoadBlocked = false;
734 loadBlockedHandled = false;
735 blockedLoadSeqNum = 0;
736 }
737 }
738
739 int store_idx = storeTail;
740 decrStIdx(store_idx);
741
742 while (stores != 0 &&
743 storeQueue[store_idx].inst->seqNum > squashed_num) {
744 // Instructions marked as can WB are already committed.
745 if (storeQueue[store_idx].canWB) {
746 break;
747 }
748
749 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
750 "idx:%i [sn:%lli]\n",
751 storeQueue[store_idx].inst->readPC(),
752 store_idx, storeQueue[store_idx].inst->seqNum);
753
754 // I don't think this can happen. It should have been cleared
755 // by the stalling load.
756 if (isStalled() &&
757 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
758 panic("Is stalled should have been cleared by stalling load!\n");
759 stalled = false;
760 stallingStoreIsn = 0;
761 }
762
763 // Clear the smart pointer to make sure it is decremented.
764 storeQueue[store_idx].inst->setSquashed();
765 storeQueue[store_idx].inst = NULL;
766 storeQueue[store_idx].canWB = 0;
767
768 storeQueue[store_idx].req = NULL;
769 --stores;
770
771 // Inefficient!
772 storeTail = store_idx;
773
774 decrStIdx(store_idx);
775 ++lsqSquashedStores;
776 }
777}
778
779template <class Impl>
780void
781LSQUnit<Impl>::storePostSend(Packet *pkt)
782{
783 if (isStalled() &&
784 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
785 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
786 "load idx:%i\n",
787 stallingStoreIsn, stallingLoadIdx);
788 stalled = false;
789 stallingStoreIsn = 0;
790 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
791 }
792
793 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
794 // The store is basically completed at this time. This
795 // only works so long as the checker doesn't try to
796 // verify the value in memory for stores.
797 storeQueue[storeWBIdx].inst->setCompleted();
798#if USE_CHECKER
799 if (cpu->checker) {
800 cpu->checker->verify(storeQueue[storeWBIdx].inst);
801 }
802#endif
803 }
804
805 if (pkt->result != Packet::Success) {
806 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
807 storeWBIdx);
808
809 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
810 storeQueue[storeWBIdx].inst->seqNum);
811
812 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
813
814 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
815
816 // @todo: Increment stat here.
817 } else {
818 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
819 storeWBIdx);
820
821 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
822 storeQueue[storeWBIdx].inst->seqNum);
823 }
824
825 incrStIdx(storeWBIdx);
826}
827
828template <class Impl>
829void
830LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
831{
832 iewStage->wakeCPU();
833
834 // Squashed instructions do not need to complete their access.
835 if (inst->isSquashed()) {
836 assert(!inst->isStore());
837 ++lsqIgnoredResponses;
838 return;
839 }
840
841 if (!inst->isExecuted()) {
842 inst->setExecuted();
843
844 // Complete access to copy data to proper place.
845 inst->completeAcc(pkt);
846 }
847
848 // Need to insert instruction into queue to commit
849 iewStage->instToCommit(inst);
850
851 iewStage->activityThisCycle();
852}
853
854template <class Impl>
855void
856LSQUnit<Impl>::completeStore(int store_idx)
857{
858 assert(storeQueue[store_idx].inst);
859 storeQueue[store_idx].completed = true;
860 --storesToWB;
861 // A bit conservative because a store completion may not free up entries,
862 // but hopefully avoids two store completions in one cycle from making
863 // the CPU tick twice.
864 cpu->activityThisCycle();
865
866 if (store_idx == storeHead) {
867 do {
868 incrStIdx(storeHead);
869
870 --stores;
871 } while (storeQueue[storeHead].completed &&
872 storeHead != storeTail);
873
874 iewStage->updateLSQNextCycle = true;
875 }
876
877 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
878 "idx:%i\n",
879 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
880
881 if (isStalled() &&
882 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
883 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
884 "load idx:%i\n",
885 stallingStoreIsn, stallingLoadIdx);
886 stalled = false;
887 stallingStoreIsn = 0;
888 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
889 }
890
891 storeQueue[store_idx].inst->setCompleted();
892
893 // Tell the checker we've completed this instruction. Some stores
894 // may get reported twice to the checker, but the checker can
895 // handle that case.
896#if USE_CHECKER
897 if (cpu->checker) {
898 cpu->checker->verify(storeQueue[store_idx].inst);
899 }
900#endif
901}
902
903template <class Impl>
904void
905LSQUnit<Impl>::recvRetry()
906{
907 if (isStoreBlocked) {
908 assert(retryPkt != NULL);
909
910 if (dcachePort->sendTiming(retryPkt)) {
911 storePostSend(retryPkt);
912 retryPkt = NULL;
913 isStoreBlocked = false;
914 } else {
915 // Still blocked!
916 ++lsqCacheBlocked;
917 }
918 } else if (isLoadBlocked) {
919 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
920 "no need to resend packet.\n");
921 } else {
922 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
923 }
924}
925
926template <class Impl>
927inline void
928LSQUnit<Impl>::incrStIdx(int &store_idx)
929{
930 if (++store_idx >= SQEntries)
931 store_idx = 0;
932}
933
934template <class Impl>
935inline void
936LSQUnit<Impl>::decrStIdx(int &store_idx)
937{
938 if (--store_idx < 0)
939 store_idx += SQEntries;
940}
941
942template <class Impl>
943inline void
944LSQUnit<Impl>::incrLdIdx(int &load_idx)
945{
946 if (++load_idx >= LQEntries)
947 load_idx = 0;
948}
949
950template <class Impl>
951inline void
952LSQUnit<Impl>::decrLdIdx(int &load_idx)
953{
954 if (--load_idx < 0)
955 load_idx += LQEntries;
956}
957
958template <class Impl>
959void
960LSQUnit<Impl>::dumpInsts()
961{
962 cprintf("Load store queue: Dumping instructions.\n");
963 cprintf("Load queue size: %i\n", loads);
964 cprintf("Load queue: ");
965
966 int load_idx = loadHead;
967
968 while (load_idx != loadTail && loadQueue[load_idx]) {
969 cprintf("%#x ", loadQueue[load_idx]->readPC());
970
971 incrLdIdx(load_idx);
972 }
973
974 cprintf("Store queue size: %i\n", stores);
975 cprintf("Store queue: ");
976
977 int store_idx = storeHead;
978
979 while (store_idx != storeTail && storeQueue[store_idx].inst) {
980 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
981
982 incrStIdx(store_idx);
983 }
984
985 cprintf("\n");
986}
81 delete state;
82 delete pkt;
83 return;
84 } else {
85 if (!state->noWB) {
86 writeback(inst, pkt);
87 }
88
89 if (inst->isStore()) {
90 completeStore(state->idx);
91 }
92 }
93
94 delete state;
95 delete pkt;
96}
97
98template <class Impl>
99Tick
100LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
101{
102 panic("O3CPU model does not work with atomic mode!");
103 return curTick;
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
109{
110 panic("O3CPU doesn't expect recvFunctional callback!");
111}
112
113template <class Impl>
114void
115LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
116{
117 if (status == RangeChange)
118 return;
119
120 panic("O3CPU doesn't expect recvStatusChange callback!");
121}
122
123template <class Impl>
124bool
125LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
126{
127 lsq->completeDataAccess(pkt);
128 return true;
129}
130
131template <class Impl>
132void
133LSQUnit<Impl>::DcachePort::recvRetry()
134{
135 lsq->recvRetry();
136}
137
138template <class Impl>
139LSQUnit<Impl>::LSQUnit()
140 : loads(0), stores(0), storesToWB(0), stalled(false),
141 isStoreBlocked(false), isLoadBlocked(false),
142 loadBlockedHandled(false)
143{
144}
145
146template<class Impl>
147void
148LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
149 unsigned maxSQEntries, unsigned id)
150{
151 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
152
153 switchedOut = false;
154
155 lsqID = id;
156
157 // Add 1 for the sentinel entry (they are circular queues).
158 LQEntries = maxLQEntries + 1;
159 SQEntries = maxSQEntries + 1;
160
161 loadQueue.resize(LQEntries);
162 storeQueue.resize(SQEntries);
163
164 loadHead = loadTail = 0;
165
166 storeHead = storeWBIdx = storeTail = 0;
167
168 usedPorts = 0;
169 cachePorts = params->cachePorts;
170
171 mem = params->mem;
172
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179void
180LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
181{
182 cpu = cpu_ptr;
183 dcachePort = new DcachePort(cpu, this);
184
185 Port *mem_dport = mem->getPort("");
186 dcachePort->setPeer(mem_dport);
187 mem_dport->setPeer(dcachePort);
188
189#if USE_CHECKER
190 if (cpu->checker) {
191 cpu->checker->setDcachePort(dcachePort);
192 }
193#endif
194}
195
196template<class Impl>
197std::string
198LSQUnit<Impl>::name() const
199{
200 if (Impl::MaxThreads == 1) {
201 return iewStage->name() + ".lsq";
202 } else {
203 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
204 }
205}
206
207template<class Impl>
208void
209LSQUnit<Impl>::regStats()
210{
211 lsqForwLoads
212 .name(name() + ".forwLoads")
213 .desc("Number of loads that had data forwarded from stores");
214
215 invAddrLoads
216 .name(name() + ".invAddrLoads")
217 .desc("Number of loads ignored due to an invalid address");
218
219 lsqSquashedLoads
220 .name(name() + ".squashedLoads")
221 .desc("Number of loads squashed");
222
223 lsqIgnoredResponses
224 .name(name() + ".ignoredResponses")
225 .desc("Number of memory responses ignored because the instruction is squashed");
226
227 lsqSquashedStores
228 .name(name() + ".squashedStores")
229 .desc("Number of stores squashed");
230
231 invAddrSwpfs
232 .name(name() + ".invAddrSwpfs")
233 .desc("Number of software prefetches ignored due to an invalid address");
234
235 lsqBlockedLoads
236 .name(name() + ".blockedLoads")
237 .desc("Number of blocked loads due to partial load-store forwarding");
238
239 lsqRescheduledLoads
240 .name(name() + ".rescheduledLoads")
241 .desc("Number of loads that were rescheduled");
242
243 lsqCacheBlocked
244 .name(name() + ".cacheBlocked")
245 .desc("Number of times an access to memory failed due to the cache being blocked");
246}
247
248template<class Impl>
249void
250LSQUnit<Impl>::clearLQ()
251{
252 loadQueue.clear();
253}
254
255template<class Impl>
256void
257LSQUnit<Impl>::clearSQ()
258{
259 storeQueue.clear();
260}
261
262template<class Impl>
263void
264LSQUnit<Impl>::switchOut()
265{
266 switchedOut = true;
267 for (int i = 0; i < loadQueue.size(); ++i)
268 loadQueue[i] = NULL;
269
270 assert(storesToWB == 0);
271}
272
273template<class Impl>
274void
275LSQUnit<Impl>::takeOverFrom()
276{
277 switchedOut = false;
278 loads = stores = storesToWB = 0;
279
280 loadHead = loadTail = 0;
281
282 storeHead = storeWBIdx = storeTail = 0;
283
284 usedPorts = 0;
285
286 memDepViolator = NULL;
287
288 blockedLoadSeqNum = 0;
289
290 stalled = false;
291 isLoadBlocked = false;
292 loadBlockedHandled = false;
293}
294
295template<class Impl>
296void
297LSQUnit<Impl>::resizeLQ(unsigned size)
298{
299 unsigned size_plus_sentinel = size + 1;
300 assert(size_plus_sentinel >= LQEntries);
301
302 if (size_plus_sentinel > LQEntries) {
303 while (size_plus_sentinel > loadQueue.size()) {
304 DynInstPtr dummy;
305 loadQueue.push_back(dummy);
306 LQEntries++;
307 }
308 } else {
309 LQEntries = size_plus_sentinel;
310 }
311
312}
313
314template<class Impl>
315void
316LSQUnit<Impl>::resizeSQ(unsigned size)
317{
318 unsigned size_plus_sentinel = size + 1;
319 if (size_plus_sentinel > SQEntries) {
320 while (size_plus_sentinel > storeQueue.size()) {
321 SQEntry dummy;
322 storeQueue.push_back(dummy);
323 SQEntries++;
324 }
325 } else {
326 SQEntries = size_plus_sentinel;
327 }
328}
329
330template <class Impl>
331void
332LSQUnit<Impl>::insert(DynInstPtr &inst)
333{
334 assert(inst->isMemRef());
335
336 assert(inst->isLoad() || inst->isStore());
337
338 if (inst->isLoad()) {
339 insertLoad(inst);
340 } else {
341 insertStore(inst);
342 }
343
344 inst->setInLSQ();
345}
346
347template <class Impl>
348void
349LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
350{
351 assert((loadTail + 1) % LQEntries != loadHead);
352 assert(loads < LQEntries);
353
354 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
355 load_inst->readPC(), loadTail, load_inst->seqNum);
356
357 load_inst->lqIdx = loadTail;
358
359 if (stores == 0) {
360 load_inst->sqIdx = -1;
361 } else {
362 load_inst->sqIdx = storeTail;
363 }
364
365 loadQueue[loadTail] = load_inst;
366
367 incrLdIdx(loadTail);
368
369 ++loads;
370}
371
372template <class Impl>
373void
374LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
375{
376 // Make sure it is not full before inserting an instruction.
377 assert((storeTail + 1) % SQEntries != storeHead);
378 assert(stores < SQEntries);
379
380 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
381 store_inst->readPC(), storeTail, store_inst->seqNum);
382
383 store_inst->sqIdx = storeTail;
384 store_inst->lqIdx = loadTail;
385
386 storeQueue[storeTail] = SQEntry(store_inst);
387
388 incrStIdx(storeTail);
389
390 ++stores;
391}
392
393template <class Impl>
394typename Impl::DynInstPtr
395LSQUnit<Impl>::getMemDepViolator()
396{
397 DynInstPtr temp = memDepViolator;
398
399 memDepViolator = NULL;
400
401 return temp;
402}
403
404template <class Impl>
405unsigned
406LSQUnit<Impl>::numFreeEntries()
407{
408 unsigned free_lq_entries = LQEntries - loads;
409 unsigned free_sq_entries = SQEntries - stores;
410
411 // Both the LQ and SQ entries have an extra dummy entry to differentiate
412 // empty/full conditions. Subtract 1 from the free entries.
413 if (free_lq_entries < free_sq_entries) {
414 return free_lq_entries - 1;
415 } else {
416 return free_sq_entries - 1;
417 }
418}
419
420template <class Impl>
421int
422LSQUnit<Impl>::numLoadsReady()
423{
424 int load_idx = loadHead;
425 int retval = 0;
426
427 while (load_idx != loadTail) {
428 assert(loadQueue[load_idx]);
429
430 if (loadQueue[load_idx]->readyToIssue()) {
431 ++retval;
432 }
433 }
434
435 return retval;
436}
437
438template <class Impl>
439Fault
440LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
441{
442 // Execute a specific load.
443 Fault load_fault = NoFault;
444
445 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
446 inst->readPC(),inst->seqNum);
447
448 load_fault = inst->initiateAcc();
449
450 // If the instruction faulted, then we need to send it along to commit
451 // without the instruction completing.
452 if (load_fault != NoFault) {
453 // Send this instruction to commit, also make sure iew stage
454 // realizes there is activity.
455 iewStage->instToCommit(inst);
456 iewStage->activityThisCycle();
457 }
458
459 return load_fault;
460}
461
462template <class Impl>
463Fault
464LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
465{
466 using namespace TheISA;
467 // Make sure that a store exists.
468 assert(stores != 0);
469
470 int store_idx = store_inst->sqIdx;
471
472 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
473 store_inst->readPC(), store_inst->seqNum);
474
475 // Check the recently completed loads to see if any match this store's
476 // address. If so, then we have a memory ordering violation.
477 int load_idx = store_inst->lqIdx;
478
479 Fault store_fault = store_inst->initiateAcc();
480
481 if (storeQueue[store_idx].size == 0) {
482 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
483 store_inst->readPC(),store_inst->seqNum);
484
485 return store_fault;
486 }
487
488 assert(store_fault == NoFault);
489
490 if (store_inst->isStoreConditional()) {
491 // Store conditionals need to set themselves as able to
492 // writeback if we haven't had a fault by here.
493 storeQueue[store_idx].canWB = true;
494
495 ++storesToWB;
496 }
497
498 if (!memDepViolator) {
499 while (load_idx != loadTail) {
500 // Really only need to check loads that have actually executed
501 // It's safe to check all loads because effAddr is set to
502 // InvalAddr when the dyn inst is created.
503
504 // @todo: For now this is extra conservative, detecting a
505 // violation if the addresses match assuming all accesses
506 // are quad word accesses.
507
508 // @todo: Fix this, magic number being used here
509 if ((loadQueue[load_idx]->effAddr >> 8) ==
510 (store_inst->effAddr >> 8)) {
511 // A load incorrectly passed this store. Squash and refetch.
512 // For now return a fault to show that it was unsuccessful.
513 memDepViolator = loadQueue[load_idx];
514
515 return genMachineCheckFault();
516 }
517
518 incrLdIdx(load_idx);
519 }
520
521 // If we've reached this point, there was no violation.
522 memDepViolator = NULL;
523 }
524
525 return store_fault;
526}
527
528template <class Impl>
529void
530LSQUnit<Impl>::commitLoad()
531{
532 assert(loadQueue[loadHead]);
533
534 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
535 loadQueue[loadHead]->readPC());
536
537 loadQueue[loadHead] = NULL;
538
539 incrLdIdx(loadHead);
540
541 --loads;
542}
543
544template <class Impl>
545void
546LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
547{
548 assert(loads == 0 || loadQueue[loadHead]);
549
550 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
551 commitLoad();
552 }
553}
554
555template <class Impl>
556void
557LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
558{
559 assert(stores == 0 || storeQueue[storeHead].inst);
560
561 int store_idx = storeHead;
562
563 while (store_idx != storeTail) {
564 assert(storeQueue[store_idx].inst);
565 // Mark any stores that are now committed and have not yet
566 // been marked as able to write back.
567 if (!storeQueue[store_idx].canWB) {
568 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
569 break;
570 }
571 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
572 "%#x [sn:%lli]\n",
573 storeQueue[store_idx].inst->readPC(),
574 storeQueue[store_idx].inst->seqNum);
575
576 storeQueue[store_idx].canWB = true;
577
578 ++storesToWB;
579 }
580
581 incrStIdx(store_idx);
582 }
583}
584
585template <class Impl>
586void
587LSQUnit<Impl>::writebackStores()
588{
589 while (storesToWB > 0 &&
590 storeWBIdx != storeTail &&
591 storeQueue[storeWBIdx].inst &&
592 storeQueue[storeWBIdx].canWB &&
593 usedPorts < cachePorts) {
594
595 if (isStoreBlocked) {
596 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
597 " is blocked!\n");
598 break;
599 }
600
601 // Store didn't write any data so no need to write it back to
602 // memory.
603 if (storeQueue[storeWBIdx].size == 0) {
604 completeStore(storeWBIdx);
605
606 incrStIdx(storeWBIdx);
607
608 continue;
609 }
610
611 ++usedPorts;
612
613 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
614 incrStIdx(storeWBIdx);
615
616 continue;
617 }
618
619 assert(storeQueue[storeWBIdx].req);
620 assert(!storeQueue[storeWBIdx].committed);
621
622 DynInstPtr inst = storeQueue[storeWBIdx].inst;
623
624 Request *req = storeQueue[storeWBIdx].req;
625 storeQueue[storeWBIdx].committed = true;
626
627 assert(!inst->memData);
628 inst->memData = new uint8_t[64];
629 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
630 req->getSize());
631
632 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
633 data_pkt->dataStatic(inst->memData);
634
635 LSQSenderState *state = new LSQSenderState;
636 state->isLoad = false;
637 state->idx = storeWBIdx;
638 state->inst = inst;
639 data_pkt->senderState = state;
640
641 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
642 "to Addr:%#x, data:%#x [sn:%lli]\n",
643 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
644 req->getPaddr(), *(inst->memData),
645 storeQueue[storeWBIdx].inst->seqNum);
646
647 // @todo: Remove this SC hack once the memory system handles it.
648 if (req->getFlags() & LOCKED) {
649 if (req->getFlags() & UNCACHEABLE) {
650 req->setScResult(2);
651 } else {
652 if (cpu->lockFlag) {
653 req->setScResult(1);
654 } else {
655 req->setScResult(0);
656 // Hack: Instantly complete this store.
657 completeDataAccess(data_pkt);
658 incrStIdx(storeWBIdx);
659 continue;
660 }
661 }
662 } else {
663 // Non-store conditionals do not need a writeback.
664 state->noWB = true;
665 }
666
667 if (!dcachePort->sendTiming(data_pkt)) {
668 // Need to handle becoming blocked on a store.
669 isStoreBlocked = true;
670 ++lsqCacheBlocked;
671 assert(retryPkt == NULL);
672 retryPkt = data_pkt;
673 } else {
674 storePostSend(data_pkt);
675 }
676 }
677
678 // Not sure this should set it to 0.
679 usedPorts = 0;
680
681 assert(stores >= 0 && storesToWB >= 0);
682}
683
684/*template <class Impl>
685void
686LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
687{
688 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
689 mshrSeqNums.end(),
690 seqNum);
691
692 if (mshr_it != mshrSeqNums.end()) {
693 mshrSeqNums.erase(mshr_it);
694 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
695 }
696}*/
697
698template <class Impl>
699void
700LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
701{
702 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
703 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
704
705 int load_idx = loadTail;
706 decrLdIdx(load_idx);
707
708 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
709 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
710 "[sn:%lli]\n",
711 loadQueue[load_idx]->readPC(),
712 loadQueue[load_idx]->seqNum);
713
714 if (isStalled() && load_idx == stallingLoadIdx) {
715 stalled = false;
716 stallingStoreIsn = 0;
717 stallingLoadIdx = 0;
718 }
719
720 // Clear the smart pointer to make sure it is decremented.
721 loadQueue[load_idx]->setSquashed();
722 loadQueue[load_idx] = NULL;
723 --loads;
724
725 // Inefficient!
726 loadTail = load_idx;
727
728 decrLdIdx(load_idx);
729 ++lsqSquashedLoads;
730 }
731
732 if (isLoadBlocked) {
733 if (squashed_num < blockedLoadSeqNum) {
734 isLoadBlocked = false;
735 loadBlockedHandled = false;
736 blockedLoadSeqNum = 0;
737 }
738 }
739
740 int store_idx = storeTail;
741 decrStIdx(store_idx);
742
743 while (stores != 0 &&
744 storeQueue[store_idx].inst->seqNum > squashed_num) {
745 // Instructions marked as can WB are already committed.
746 if (storeQueue[store_idx].canWB) {
747 break;
748 }
749
750 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
751 "idx:%i [sn:%lli]\n",
752 storeQueue[store_idx].inst->readPC(),
753 store_idx, storeQueue[store_idx].inst->seqNum);
754
755 // I don't think this can happen. It should have been cleared
756 // by the stalling load.
757 if (isStalled() &&
758 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
759 panic("Is stalled should have been cleared by stalling load!\n");
760 stalled = false;
761 stallingStoreIsn = 0;
762 }
763
764 // Clear the smart pointer to make sure it is decremented.
765 storeQueue[store_idx].inst->setSquashed();
766 storeQueue[store_idx].inst = NULL;
767 storeQueue[store_idx].canWB = 0;
768
769 storeQueue[store_idx].req = NULL;
770 --stores;
771
772 // Inefficient!
773 storeTail = store_idx;
774
775 decrStIdx(store_idx);
776 ++lsqSquashedStores;
777 }
778}
779
780template <class Impl>
781void
782LSQUnit<Impl>::storePostSend(Packet *pkt)
783{
784 if (isStalled() &&
785 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
786 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
787 "load idx:%i\n",
788 stallingStoreIsn, stallingLoadIdx);
789 stalled = false;
790 stallingStoreIsn = 0;
791 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
792 }
793
794 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
795 // The store is basically completed at this time. This
796 // only works so long as the checker doesn't try to
797 // verify the value in memory for stores.
798 storeQueue[storeWBIdx].inst->setCompleted();
799#if USE_CHECKER
800 if (cpu->checker) {
801 cpu->checker->verify(storeQueue[storeWBIdx].inst);
802 }
803#endif
804 }
805
806 if (pkt->result != Packet::Success) {
807 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
808 storeWBIdx);
809
810 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
811 storeQueue[storeWBIdx].inst->seqNum);
812
813 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
814
815 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
816
817 // @todo: Increment stat here.
818 } else {
819 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
820 storeWBIdx);
821
822 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
823 storeQueue[storeWBIdx].inst->seqNum);
824 }
825
826 incrStIdx(storeWBIdx);
827}
828
829template <class Impl>
830void
831LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
832{
833 iewStage->wakeCPU();
834
835 // Squashed instructions do not need to complete their access.
836 if (inst->isSquashed()) {
837 assert(!inst->isStore());
838 ++lsqIgnoredResponses;
839 return;
840 }
841
842 if (!inst->isExecuted()) {
843 inst->setExecuted();
844
845 // Complete access to copy data to proper place.
846 inst->completeAcc(pkt);
847 }
848
849 // Need to insert instruction into queue to commit
850 iewStage->instToCommit(inst);
851
852 iewStage->activityThisCycle();
853}
854
855template <class Impl>
856void
857LSQUnit<Impl>::completeStore(int store_idx)
858{
859 assert(storeQueue[store_idx].inst);
860 storeQueue[store_idx].completed = true;
861 --storesToWB;
862 // A bit conservative because a store completion may not free up entries,
863 // but hopefully avoids two store completions in one cycle from making
864 // the CPU tick twice.
865 cpu->activityThisCycle();
866
867 if (store_idx == storeHead) {
868 do {
869 incrStIdx(storeHead);
870
871 --stores;
872 } while (storeQueue[storeHead].completed &&
873 storeHead != storeTail);
874
875 iewStage->updateLSQNextCycle = true;
876 }
877
878 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
879 "idx:%i\n",
880 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
881
882 if (isStalled() &&
883 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
884 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
885 "load idx:%i\n",
886 stallingStoreIsn, stallingLoadIdx);
887 stalled = false;
888 stallingStoreIsn = 0;
889 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
890 }
891
892 storeQueue[store_idx].inst->setCompleted();
893
894 // Tell the checker we've completed this instruction. Some stores
895 // may get reported twice to the checker, but the checker can
896 // handle that case.
897#if USE_CHECKER
898 if (cpu->checker) {
899 cpu->checker->verify(storeQueue[store_idx].inst);
900 }
901#endif
902}
903
904template <class Impl>
905void
906LSQUnit<Impl>::recvRetry()
907{
908 if (isStoreBlocked) {
909 assert(retryPkt != NULL);
910
911 if (dcachePort->sendTiming(retryPkt)) {
912 storePostSend(retryPkt);
913 retryPkt = NULL;
914 isStoreBlocked = false;
915 } else {
916 // Still blocked!
917 ++lsqCacheBlocked;
918 }
919 } else if (isLoadBlocked) {
920 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
921 "no need to resend packet.\n");
922 } else {
923 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
924 }
925}
926
927template <class Impl>
928inline void
929LSQUnit<Impl>::incrStIdx(int &store_idx)
930{
931 if (++store_idx >= SQEntries)
932 store_idx = 0;
933}
934
935template <class Impl>
936inline void
937LSQUnit<Impl>::decrStIdx(int &store_idx)
938{
939 if (--store_idx < 0)
940 store_idx += SQEntries;
941}
942
943template <class Impl>
944inline void
945LSQUnit<Impl>::incrLdIdx(int &load_idx)
946{
947 if (++load_idx >= LQEntries)
948 load_idx = 0;
949}
950
951template <class Impl>
952inline void
953LSQUnit<Impl>::decrLdIdx(int &load_idx)
954{
955 if (--load_idx < 0)
956 load_idx += LQEntries;
957}
958
959template <class Impl>
960void
961LSQUnit<Impl>::dumpInsts()
962{
963 cprintf("Load store queue: Dumping instructions.\n");
964 cprintf("Load queue size: %i\n", loads);
965 cprintf("Load queue: ");
966
967 int load_idx = loadHead;
968
969 while (load_idx != loadTail && loadQueue[load_idx]) {
970 cprintf("%#x ", loadQueue[load_idx]->readPC());
971
972 incrLdIdx(load_idx);
973 }
974
975 cprintf("Store queue size: %i\n", stores);
976 cprintf("Store queue: ");
977
978 int store_idx = storeHead;
979
980 while (store_idx != storeTail && storeQueue[store_idx].inst) {
981 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
982
983 incrStIdx(store_idx);
984 }
985
986 cprintf("\n");
987}