lsq_unit_impl.hh (9046:a1104cc13db2) lsq_unit_impl.hh (9165:f9e3dac185ba)
1/*
2 * Copyright (c) 2010-2011 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include "arch/generic/debugfaults.hh"
45#include "arch/locked_mem.hh"
46#include "base/str.hh"
47#include "config/the_isa.hh"
48#include "cpu/checker/cpu.hh"
49#include "cpu/o3/lsq.hh"
50#include "cpu/o3/lsq_unit.hh"
51#include "debug/Activity.hh"
52#include "debug/IEW.hh"
53#include "debug/LSQUnit.hh"
54#include "mem/packet.hh"
55#include "mem/request.hh"
56
57template<class Impl>
58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
59 LSQUnit *lsq_ptr)
60 : Event(Default_Pri, AutoDelete),
61 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
62{
63}
64
65template<class Impl>
66void
67LSQUnit<Impl>::WritebackEvent::process()
68{
69 if (!lsqPtr->isSwitchedOut()) {
70 lsqPtr->writeback(inst, pkt);
71 }
72
73 if (pkt->senderState)
74 delete pkt->senderState;
75
76 delete pkt->req;
77 delete pkt;
78}
79
80template<class Impl>
81const char *
82LSQUnit<Impl>::WritebackEvent::description() const
83{
84 return "Store writeback";
85}
86
87template<class Impl>
88void
89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
90{
91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
92 DynInstPtr inst = state->inst;
93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
95
96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
97
1/*
2 * Copyright (c) 2010-2011 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include "arch/generic/debugfaults.hh"
45#include "arch/locked_mem.hh"
46#include "base/str.hh"
47#include "config/the_isa.hh"
48#include "cpu/checker/cpu.hh"
49#include "cpu/o3/lsq.hh"
50#include "cpu/o3/lsq_unit.hh"
51#include "debug/Activity.hh"
52#include "debug/IEW.hh"
53#include "debug/LSQUnit.hh"
54#include "mem/packet.hh"
55#include "mem/request.hh"
56
57template<class Impl>
58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
59 LSQUnit *lsq_ptr)
60 : Event(Default_Pri, AutoDelete),
61 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
62{
63}
64
65template<class Impl>
66void
67LSQUnit<Impl>::WritebackEvent::process()
68{
69 if (!lsqPtr->isSwitchedOut()) {
70 lsqPtr->writeback(inst, pkt);
71 }
72
73 if (pkt->senderState)
74 delete pkt->senderState;
75
76 delete pkt->req;
77 delete pkt;
78}
79
80template<class Impl>
81const char *
82LSQUnit<Impl>::WritebackEvent::description() const
83{
84 return "Store writeback";
85}
86
87template<class Impl>
88void
89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
90{
91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
92 DynInstPtr inst = state->inst;
93 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
95
96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
97
98 assert(!pkt->wasNacked());
99
100 // If this is a split access, wait until all packets are received.
101 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
102 delete pkt->req;
103 delete pkt;
104 return;
105 }
106
107 if (isSwitchedOut() || inst->isSquashed()) {
108 iewStage->decrWb(inst->seqNum);
109 } else {
110 if (!state->noWB) {
111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
112 !state->isLoad) {
113 writeback(inst, pkt);
114 } else {
115 writeback(inst, state->mainPkt);
116 }
117 }
118
119 if (inst->isStore()) {
120 completeStore(state->idx);
121 }
122 }
123
124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
125 delete state->mainPkt->req;
126 delete state->mainPkt;
127 }
128 delete state;
129 delete pkt->req;
130 delete pkt;
131}
132
133template <class Impl>
134LSQUnit<Impl>::LSQUnit()
135 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
136 isStoreBlocked(false), isLoadBlocked(false),
137 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
138{
139}
140
141template<class Impl>
142void
143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
145 unsigned id)
146{
147 cpu = cpu_ptr;
148 iewStage = iew_ptr;
149
150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
151
152 switchedOut = false;
153
154 cacheBlockMask = 0;
155
156 lsq = lsq_ptr;
157
158 lsqID = id;
159
160 // Add 1 for the sentinel entry (they are circular queues).
161 LQEntries = maxLQEntries + 1;
162 SQEntries = maxSQEntries + 1;
163
164 loadQueue.resize(LQEntries);
165 storeQueue.resize(SQEntries);
166
167 depCheckShift = params->LSQDepCheckShift;
168 checkLoads = params->LSQCheckLoads;
169
170 loadHead = loadTail = 0;
171
172 storeHead = storeWBIdx = storeTail = 0;
173
174 usedPorts = 0;
175 cachePorts = params->cachePorts;
176
177 retryPkt = NULL;
178 memDepViolator = NULL;
179
180 blockedLoadSeqNum = 0;
181 needsTSO = params->needsTSO;
182}
183
184template<class Impl>
185std::string
186LSQUnit<Impl>::name() const
187{
188 if (Impl::MaxThreads == 1) {
189 return iewStage->name() + ".lsq";
190 } else {
191 return iewStage->name() + ".lsq.thread" + to_string(lsqID);
192 }
193}
194
195template<class Impl>
196void
197LSQUnit<Impl>::regStats()
198{
199 lsqForwLoads
200 .name(name() + ".forwLoads")
201 .desc("Number of loads that had data forwarded from stores");
202
203 invAddrLoads
204 .name(name() + ".invAddrLoads")
205 .desc("Number of loads ignored due to an invalid address");
206
207 lsqSquashedLoads
208 .name(name() + ".squashedLoads")
209 .desc("Number of loads squashed");
210
211 lsqIgnoredResponses
212 .name(name() + ".ignoredResponses")
213 .desc("Number of memory responses ignored because the instruction is squashed");
214
215 lsqMemOrderViolation
216 .name(name() + ".memOrderViolation")
217 .desc("Number of memory ordering violations");
218
219 lsqSquashedStores
220 .name(name() + ".squashedStores")
221 .desc("Number of stores squashed");
222
223 invAddrSwpfs
224 .name(name() + ".invAddrSwpfs")
225 .desc("Number of software prefetches ignored due to an invalid address");
226
227 lsqBlockedLoads
228 .name(name() + ".blockedLoads")
229 .desc("Number of blocked loads due to partial load-store forwarding");
230
231 lsqRescheduledLoads
232 .name(name() + ".rescheduledLoads")
233 .desc("Number of loads that were rescheduled");
234
235 lsqCacheBlocked
236 .name(name() + ".cacheBlocked")
237 .desc("Number of times an access to memory failed due to the cache being blocked");
238}
239
240template<class Impl>
241void
242LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
243{
244 dcachePort = dcache_port;
245}
246
247template<class Impl>
248void
249LSQUnit<Impl>::clearLQ()
250{
251 loadQueue.clear();
252}
253
254template<class Impl>
255void
256LSQUnit<Impl>::clearSQ()
257{
258 storeQueue.clear();
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::switchOut()
264{
265 switchedOut = true;
266 for (int i = 0; i < loadQueue.size(); ++i) {
267 assert(!loadQueue[i]);
268 loadQueue[i] = NULL;
269 }
270
271 assert(storesToWB == 0);
272}
273
274template<class Impl>
275void
276LSQUnit<Impl>::takeOverFrom()
277{
278 switchedOut = false;
279 loads = stores = storesToWB = 0;
280
281 loadHead = loadTail = 0;
282
283 storeHead = storeWBIdx = storeTail = 0;
284
285 usedPorts = 0;
286
287 memDepViolator = NULL;
288
289 blockedLoadSeqNum = 0;
290
291 stalled = false;
292 isLoadBlocked = false;
293 loadBlockedHandled = false;
294
295 // Just incase the memory system changed out from under us
296 cacheBlockMask = 0;
297}
298
299template<class Impl>
300void
301LSQUnit<Impl>::resizeLQ(unsigned size)
302{
303 unsigned size_plus_sentinel = size + 1;
304 assert(size_plus_sentinel >= LQEntries);
305
306 if (size_plus_sentinel > LQEntries) {
307 while (size_plus_sentinel > loadQueue.size()) {
308 DynInstPtr dummy;
309 loadQueue.push_back(dummy);
310 LQEntries++;
311 }
312 } else {
313 LQEntries = size_plus_sentinel;
314 }
315
316}
317
318template<class Impl>
319void
320LSQUnit<Impl>::resizeSQ(unsigned size)
321{
322 unsigned size_plus_sentinel = size + 1;
323 if (size_plus_sentinel > SQEntries) {
324 while (size_plus_sentinel > storeQueue.size()) {
325 SQEntry dummy;
326 storeQueue.push_back(dummy);
327 SQEntries++;
328 }
329 } else {
330 SQEntries = size_plus_sentinel;
331 }
332}
333
334template <class Impl>
335void
336LSQUnit<Impl>::insert(DynInstPtr &inst)
337{
338 assert(inst->isMemRef());
339
340 assert(inst->isLoad() || inst->isStore());
341
342 if (inst->isLoad()) {
343 insertLoad(inst);
344 } else {
345 insertStore(inst);
346 }
347
348 inst->setInLSQ();
349}
350
351template <class Impl>
352void
353LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
354{
355 assert((loadTail + 1) % LQEntries != loadHead);
356 assert(loads < LQEntries);
357
358 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
359 load_inst->pcState(), loadTail, load_inst->seqNum);
360
361 load_inst->lqIdx = loadTail;
362
363 if (stores == 0) {
364 load_inst->sqIdx = -1;
365 } else {
366 load_inst->sqIdx = storeTail;
367 }
368
369 loadQueue[loadTail] = load_inst;
370
371 incrLdIdx(loadTail);
372
373 ++loads;
374}
375
376template <class Impl>
377void
378LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
379{
380 // Make sure it is not full before inserting an instruction.
381 assert((storeTail + 1) % SQEntries != storeHead);
382 assert(stores < SQEntries);
383
384 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
385 store_inst->pcState(), storeTail, store_inst->seqNum);
386
387 store_inst->sqIdx = storeTail;
388 store_inst->lqIdx = loadTail;
389
390 storeQueue[storeTail] = SQEntry(store_inst);
391
392 incrStIdx(storeTail);
393
394 ++stores;
395}
396
397template <class Impl>
398typename Impl::DynInstPtr
399LSQUnit<Impl>::getMemDepViolator()
400{
401 DynInstPtr temp = memDepViolator;
402
403 memDepViolator = NULL;
404
405 return temp;
406}
407
408template <class Impl>
409unsigned
410LSQUnit<Impl>::numFreeEntries()
411{
412 unsigned free_lq_entries = LQEntries - loads;
413 unsigned free_sq_entries = SQEntries - stores;
414
415 // Both the LQ and SQ entries have an extra dummy entry to differentiate
416 // empty/full conditions. Subtract 1 from the free entries.
417 if (free_lq_entries < free_sq_entries) {
418 return free_lq_entries - 1;
419 } else {
420 return free_sq_entries - 1;
421 }
422}
423
424template <class Impl>
425int
426LSQUnit<Impl>::numLoadsReady()
427{
428 int load_idx = loadHead;
429 int retval = 0;
430
431 while (load_idx != loadTail) {
432 assert(loadQueue[load_idx]);
433
434 if (loadQueue[load_idx]->readyToIssue()) {
435 ++retval;
436 }
437 }
438
439 return retval;
440}
441
442template <class Impl>
443void
444LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
445{
446 int load_idx = loadHead;
447
448 if (!cacheBlockMask) {
449 assert(dcachePort);
450 Addr bs = dcachePort->peerBlockSize();
451
452 // Make sure we actually got a size
453 assert(bs != 0);
454
455 cacheBlockMask = ~(bs - 1);
456 }
457
458 // If this is the only load in the LSQ we don't care
459 if (load_idx == loadTail)
460 return;
461 incrLdIdx(load_idx);
462
463 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
464 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
465 while (load_idx != loadTail) {
466 DynInstPtr ld_inst = loadQueue[load_idx];
467
468 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
469 incrLdIdx(load_idx);
470 continue;
471 }
472
473 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
474 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
475 ld_inst->seqNum, load_addr, invalidate_addr);
476
477 if (load_addr == invalidate_addr) {
478 if (ld_inst->possibleLoadViolation()) {
479 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
480 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum);
481
482 // Mark the load for re-execution
483 ld_inst->fault = new ReExec;
484 } else {
485 // If a older load checks this and it's true
486 // then we might have missed the snoop
487 // in which case we need to invalidate to be sure
488 ld_inst->hitExternalSnoop(true);
489 }
490 }
491 incrLdIdx(load_idx);
492 }
493 return;
494}
495
496template <class Impl>
497Fault
498LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
499{
500 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
501 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
502
503 /** @todo in theory you only need to check an instruction that has executed
504 * however, there isn't a good way in the pipeline at the moment to check
505 * all instructions that will execute before the store writes back. Thus,
506 * like the implementation that came before it, we're overly conservative.
507 */
508 while (load_idx != loadTail) {
509 DynInstPtr ld_inst = loadQueue[load_idx];
510 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
511 incrLdIdx(load_idx);
512 continue;
513 }
514
515 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
516 Addr ld_eff_addr2 =
517 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
518
519 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
520 if (inst->isLoad()) {
521 // If this load is to the same block as an external snoop
522 // invalidate that we've observed then the load needs to be
523 // squashed as it could have newer data
524 if (ld_inst->hitExternalSnoop()) {
525 if (!memDepViolator ||
526 ld_inst->seqNum < memDepViolator->seqNum) {
527 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
528 "and [sn:%lli] at address %#x\n",
529 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
530 memDepViolator = ld_inst;
531
532 ++lsqMemOrderViolation;
533
534 return new GenericISA::M5PanicFault(
535 "Detected fault with inst [sn:%lli] and "
536 "[sn:%lli] at address %#x\n",
537 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
538 }
539 }
540
541 // Otherwise, mark the load has a possible load violation
542 // and if we see a snoop before it's commited, we need to squash
543 ld_inst->possibleLoadViolation(true);
544 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
545 " between instructions [sn:%lli] and [sn:%lli]\n",
546 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
547 } else {
548 // A load/store incorrectly passed this store.
549 // Check if we already have a violator, or if it's newer
550 // squash and refetch.
551 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
552 break;
553
554 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
555 "[sn:%lli] at address %#x\n",
556 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
557 memDepViolator = ld_inst;
558
559 ++lsqMemOrderViolation;
560
561 return new GenericISA::M5PanicFault("Detected fault with "
562 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
563 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
564 }
565 }
566
567 incrLdIdx(load_idx);
568 }
569 return NoFault;
570}
571
572
573
574
575template <class Impl>
576Fault
577LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
578{
579 using namespace TheISA;
580 // Execute a specific load.
581 Fault load_fault = NoFault;
582
583 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
584 inst->pcState(), inst->seqNum);
585
586 assert(!inst->isSquashed());
587
588 load_fault = inst->initiateAcc();
589
590 if (inst->isTranslationDelayed() &&
591 load_fault == NoFault)
592 return load_fault;
593
594 // If the instruction faulted or predicated false, then we need to send it
595 // along to commit without the instruction completing.
596 if (load_fault != NoFault || inst->readPredicate() == false) {
597 // Send this instruction to commit, also make sure iew stage
598 // realizes there is activity.
599 // Mark it as executed unless it is an uncached load that
600 // needs to hit the head of commit.
601 if (inst->readPredicate() == false)
602 inst->forwardOldRegs();
603 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
604 inst->seqNum,
605 (load_fault != NoFault ? "fault" : "predication"));
606 if (!(inst->hasRequest() && inst->uncacheable()) ||
607 inst->isAtCommit()) {
608 inst->setExecuted();
609 }
610 iewStage->instToCommit(inst);
611 iewStage->activityThisCycle();
612 } else if (!loadBlocked()) {
613 assert(inst->effAddrValid());
614 int load_idx = inst->lqIdx;
615 incrLdIdx(load_idx);
616
617 if (checkLoads)
618 return checkViolations(load_idx, inst);
619 }
620
621 return load_fault;
622}
623
624template <class Impl>
625Fault
626LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
627{
628 using namespace TheISA;
629 // Make sure that a store exists.
630 assert(stores != 0);
631
632 int store_idx = store_inst->sqIdx;
633
634 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
635 store_inst->pcState(), store_inst->seqNum);
636
637 assert(!store_inst->isSquashed());
638
639 // Check the recently completed loads to see if any match this store's
640 // address. If so, then we have a memory ordering violation.
641 int load_idx = store_inst->lqIdx;
642
643 Fault store_fault = store_inst->initiateAcc();
644
645 if (store_inst->isTranslationDelayed() &&
646 store_fault == NoFault)
647 return store_fault;
648
649 if (store_inst->readPredicate() == false)
650 store_inst->forwardOldRegs();
651
652 if (storeQueue[store_idx].size == 0) {
653 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
654 store_inst->pcState(), store_inst->seqNum);
655
656 return store_fault;
657 } else if (store_inst->readPredicate() == false) {
658 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
659 store_inst->seqNum);
660 return store_fault;
661 }
662
663 assert(store_fault == NoFault);
664
665 if (store_inst->isStoreConditional()) {
666 // Store conditionals need to set themselves as able to
667 // writeback if we haven't had a fault by here.
668 storeQueue[store_idx].canWB = true;
669
670 ++storesToWB;
671 }
672
673 return checkViolations(load_idx, store_inst);
674
675}
676
677template <class Impl>
678void
679LSQUnit<Impl>::commitLoad()
680{
681 assert(loadQueue[loadHead]);
682
683 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
684 loadQueue[loadHead]->pcState());
685
686 loadQueue[loadHead] = NULL;
687
688 incrLdIdx(loadHead);
689
690 --loads;
691}
692
693template <class Impl>
694void
695LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
696{
697 assert(loads == 0 || loadQueue[loadHead]);
698
699 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
700 commitLoad();
701 }
702}
703
704template <class Impl>
705void
706LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
707{
708 assert(stores == 0 || storeQueue[storeHead].inst);
709
710 int store_idx = storeHead;
711
712 while (store_idx != storeTail) {
713 assert(storeQueue[store_idx].inst);
714 // Mark any stores that are now committed and have not yet
715 // been marked as able to write back.
716 if (!storeQueue[store_idx].canWB) {
717 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
718 break;
719 }
720 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
721 "%s [sn:%lli]\n",
722 storeQueue[store_idx].inst->pcState(),
723 storeQueue[store_idx].inst->seqNum);
724
725 storeQueue[store_idx].canWB = true;
726
727 ++storesToWB;
728 }
729
730 incrStIdx(store_idx);
731 }
732}
733
734template <class Impl>
735void
736LSQUnit<Impl>::writebackPendingStore()
737{
738 if (hasPendingPkt) {
739 assert(pendingPkt != NULL);
740
741 // If the cache is blocked, this will store the packet for retry.
742 if (sendStore(pendingPkt)) {
743 storePostSend(pendingPkt);
744 }
745 pendingPkt = NULL;
746 hasPendingPkt = false;
747 }
748}
749
750template <class Impl>
751void
752LSQUnit<Impl>::writebackStores()
753{
754 // First writeback the second packet from any split store that didn't
755 // complete last cycle because there weren't enough cache ports available.
756 if (TheISA::HasUnalignedMemAcc) {
757 writebackPendingStore();
758 }
759
760 while (storesToWB > 0 &&
761 storeWBIdx != storeTail &&
762 storeQueue[storeWBIdx].inst &&
763 storeQueue[storeWBIdx].canWB &&
764 ((!needsTSO) || (!storeInFlight)) &&
765 usedPorts < cachePorts) {
766
767 if (isStoreBlocked || lsq->cacheBlocked()) {
768 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
769 " is blocked!\n");
770 break;
771 }
772
773 // Store didn't write any data so no need to write it back to
774 // memory.
775 if (storeQueue[storeWBIdx].size == 0) {
776 completeStore(storeWBIdx);
777
778 incrStIdx(storeWBIdx);
779
780 continue;
781 }
782
783 ++usedPorts;
784
785 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
786 incrStIdx(storeWBIdx);
787
788 continue;
789 }
790
791 assert(storeQueue[storeWBIdx].req);
792 assert(!storeQueue[storeWBIdx].committed);
793
794 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
795 assert(storeQueue[storeWBIdx].sreqLow);
796 assert(storeQueue[storeWBIdx].sreqHigh);
797 }
798
799 DynInstPtr inst = storeQueue[storeWBIdx].inst;
800
801 Request *req = storeQueue[storeWBIdx].req;
802 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
803 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
804
805 storeQueue[storeWBIdx].committed = true;
806
807 assert(!inst->memData);
808 inst->memData = new uint8_t[64];
809
810 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
811
812 MemCmd command =
813 req->isSwap() ? MemCmd::SwapReq :
814 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
815 PacketPtr data_pkt;
816 PacketPtr snd_data_pkt = NULL;
817
818 LSQSenderState *state = new LSQSenderState;
819 state->isLoad = false;
820 state->idx = storeWBIdx;
821 state->inst = inst;
822
823 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
824
825 // Build a single data packet if the store isn't split.
826 data_pkt = new Packet(req, command);
827 data_pkt->dataStatic(inst->memData);
828 data_pkt->senderState = state;
829 } else {
830 // Create two packets if the store is split in two.
831 data_pkt = new Packet(sreqLow, command);
832 snd_data_pkt = new Packet(sreqHigh, command);
833
834 data_pkt->dataStatic(inst->memData);
835 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
836
837 data_pkt->senderState = state;
838 snd_data_pkt->senderState = state;
839
840 state->isSplit = true;
841 state->outstanding = 2;
842
843 // Can delete the main request now.
844 delete req;
845 req = sreqLow;
846 }
847
848 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
849 "to Addr:%#x, data:%#x [sn:%lli]\n",
850 storeWBIdx, inst->pcState(),
851 req->getPaddr(), (int)*(inst->memData),
852 inst->seqNum);
853
854 // @todo: Remove this SC hack once the memory system handles it.
855 if (inst->isStoreConditional()) {
856 assert(!storeQueue[storeWBIdx].isSplit);
857 // Disable recording the result temporarily. Writing to
858 // misc regs normally updates the result, but this is not
859 // the desired behavior when handling store conditionals.
860 inst->recordResult(false);
861 bool success = TheISA::handleLockedWrite(inst.get(), req);
862 inst->recordResult(true);
863
864 if (!success) {
865 // Instantly complete this store.
866 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
867 "Instantly completing it.\n",
868 inst->seqNum);
869 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
870 cpu->schedule(wb, curTick() + 1);
871 if (cpu->checker) {
872 // Make sure to set the LLSC data for verification
873 // if checker is loaded
874 inst->reqToVerify->setExtraData(0);
875 inst->completeAcc(data_pkt);
876 }
877 completeStore(storeWBIdx);
878 incrStIdx(storeWBIdx);
879 continue;
880 }
881 } else {
882 // Non-store conditionals do not need a writeback.
883 state->noWB = true;
884 }
885
886 bool split =
887 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
888
889 ThreadContext *thread = cpu->tcBase(lsqID);
890
891 if (req->isMmappedIpr()) {
892 assert(!inst->isStoreConditional());
893 TheISA::handleIprWrite(thread, data_pkt);
894 delete data_pkt;
895 if (split) {
896 assert(snd_data_pkt->req->isMmappedIpr());
897 TheISA::handleIprWrite(thread, snd_data_pkt);
898 delete snd_data_pkt;
899 delete sreqLow;
900 delete sreqHigh;
901 }
902 delete state;
903 delete req;
904 completeStore(storeWBIdx);
905 incrStIdx(storeWBIdx);
906 } else if (!sendStore(data_pkt)) {
907 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
908 "retry later\n",
909 inst->seqNum);
910
911 // Need to store the second packet, if split.
912 if (split) {
913 state->pktToSend = true;
914 state->pendingPacket = snd_data_pkt;
915 }
916 } else {
917
918 // If split, try to send the second packet too
919 if (split) {
920 assert(snd_data_pkt);
921
922 // Ensure there are enough ports to use.
923 if (usedPorts < cachePorts) {
924 ++usedPorts;
925 if (sendStore(snd_data_pkt)) {
926 storePostSend(snd_data_pkt);
927 } else {
928 DPRINTF(IEW, "D-Cache became blocked when writing"
929 " [sn:%lli] second packet, will retry later\n",
930 inst->seqNum);
931 }
932 } else {
933
934 // Store the packet for when there's free ports.
935 assert(pendingPkt == NULL);
936 pendingPkt = snd_data_pkt;
937 hasPendingPkt = true;
938 }
939 } else {
940
941 // Not a split store.
942 storePostSend(data_pkt);
943 }
944 }
945 }
946
947 // Not sure this should set it to 0.
948 usedPorts = 0;
949
950 assert(stores >= 0 && storesToWB >= 0);
951}
952
953/*template <class Impl>
954void
955LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
956{
957 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
958 mshrSeqNums.end(),
959 seqNum);
960
961 if (mshr_it != mshrSeqNums.end()) {
962 mshrSeqNums.erase(mshr_it);
963 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
964 }
965}*/
966
967template <class Impl>
968void
969LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
970{
971 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
972 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
973
974 int load_idx = loadTail;
975 decrLdIdx(load_idx);
976
977 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
978 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
979 "[sn:%lli]\n",
980 loadQueue[load_idx]->pcState(),
981 loadQueue[load_idx]->seqNum);
982
983 if (isStalled() && load_idx == stallingLoadIdx) {
984 stalled = false;
985 stallingStoreIsn = 0;
986 stallingLoadIdx = 0;
987 }
988
989 // Clear the smart pointer to make sure it is decremented.
990 loadQueue[load_idx]->setSquashed();
991 loadQueue[load_idx] = NULL;
992 --loads;
993
994 // Inefficient!
995 loadTail = load_idx;
996
997 decrLdIdx(load_idx);
998 ++lsqSquashedLoads;
999 }
1000
1001 if (isLoadBlocked) {
1002 if (squashed_num < blockedLoadSeqNum) {
1003 isLoadBlocked = false;
1004 loadBlockedHandled = false;
1005 blockedLoadSeqNum = 0;
1006 }
1007 }
1008
1009 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1010 memDepViolator = NULL;
1011 }
1012
1013 int store_idx = storeTail;
1014 decrStIdx(store_idx);
1015
1016 while (stores != 0 &&
1017 storeQueue[store_idx].inst->seqNum > squashed_num) {
1018 // Instructions marked as can WB are already committed.
1019 if (storeQueue[store_idx].canWB) {
1020 break;
1021 }
1022
1023 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1024 "idx:%i [sn:%lli]\n",
1025 storeQueue[store_idx].inst->pcState(),
1026 store_idx, storeQueue[store_idx].inst->seqNum);
1027
1028 // I don't think this can happen. It should have been cleared
1029 // by the stalling load.
1030 if (isStalled() &&
1031 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1032 panic("Is stalled should have been cleared by stalling load!\n");
1033 stalled = false;
1034 stallingStoreIsn = 0;
1035 }
1036
1037 // Clear the smart pointer to make sure it is decremented.
1038 storeQueue[store_idx].inst->setSquashed();
1039 storeQueue[store_idx].inst = NULL;
1040 storeQueue[store_idx].canWB = 0;
1041
1042 // Must delete request now that it wasn't handed off to
1043 // memory. This is quite ugly. @todo: Figure out the proper
1044 // place to really handle request deletes.
1045 delete storeQueue[store_idx].req;
1046 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1047 delete storeQueue[store_idx].sreqLow;
1048 delete storeQueue[store_idx].sreqHigh;
1049
1050 storeQueue[store_idx].sreqLow = NULL;
1051 storeQueue[store_idx].sreqHigh = NULL;
1052 }
1053
1054 storeQueue[store_idx].req = NULL;
1055 --stores;
1056
1057 // Inefficient!
1058 storeTail = store_idx;
1059
1060 decrStIdx(store_idx);
1061 ++lsqSquashedStores;
1062 }
1063}
1064
1065template <class Impl>
1066void
1067LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1068{
1069 if (isStalled() &&
1070 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1071 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1072 "load idx:%i\n",
1073 stallingStoreIsn, stallingLoadIdx);
1074 stalled = false;
1075 stallingStoreIsn = 0;
1076 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1077 }
1078
1079 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1080 // The store is basically completed at this time. This
1081 // only works so long as the checker doesn't try to
1082 // verify the value in memory for stores.
1083 storeQueue[storeWBIdx].inst->setCompleted();
1084
1085 if (cpu->checker) {
1086 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1087 }
1088 }
1089
1090 if (needsTSO) {
1091 storeInFlight = true;
1092 }
1093
1094 incrStIdx(storeWBIdx);
1095}
1096
1097template <class Impl>
1098void
1099LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1100{
1101 iewStage->wakeCPU();
1102
1103 // Squashed instructions do not need to complete their access.
1104 if (inst->isSquashed()) {
1105 iewStage->decrWb(inst->seqNum);
1106 assert(!inst->isStore());
1107 ++lsqIgnoredResponses;
1108 return;
1109 }
1110
1111 if (!inst->isExecuted()) {
1112 inst->setExecuted();
1113
1114 // Complete access to copy data to proper place.
1115 inst->completeAcc(pkt);
1116 }
1117
1118 // Need to insert instruction into queue to commit
1119 iewStage->instToCommit(inst);
1120
1121 iewStage->activityThisCycle();
1122
1123 // see if this load changed the PC
1124 iewStage->checkMisprediction(inst);
1125}
1126
1127template <class Impl>
1128void
1129LSQUnit<Impl>::completeStore(int store_idx)
1130{
1131 assert(storeQueue[store_idx].inst);
1132 storeQueue[store_idx].completed = true;
1133 --storesToWB;
1134 // A bit conservative because a store completion may not free up entries,
1135 // but hopefully avoids two store completions in one cycle from making
1136 // the CPU tick twice.
1137 cpu->wakeCPU();
1138 cpu->activityThisCycle();
1139
1140 if (store_idx == storeHead) {
1141 do {
1142 incrStIdx(storeHead);
1143
1144 --stores;
1145 } while (storeQueue[storeHead].completed &&
1146 storeHead != storeTail);
1147
1148 iewStage->updateLSQNextCycle = true;
1149 }
1150
1151 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1152 "idx:%i\n",
1153 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1154
1155 if (isStalled() &&
1156 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1157 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1158 "load idx:%i\n",
1159 stallingStoreIsn, stallingLoadIdx);
1160 stalled = false;
1161 stallingStoreIsn = 0;
1162 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1163 }
1164
1165 storeQueue[store_idx].inst->setCompleted();
1166
1167 if (needsTSO) {
1168 storeInFlight = false;
1169 }
1170
1171 // Tell the checker we've completed this instruction. Some stores
1172 // may get reported twice to the checker, but the checker can
1173 // handle that case.
1174 if (cpu->checker) {
1175 cpu->checker->verify(storeQueue[store_idx].inst);
1176 }
1177}
1178
1179template <class Impl>
1180bool
1181LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1182{
1183 if (!dcachePort->sendTimingReq(data_pkt)) {
1184 // Need to handle becoming blocked on a store.
1185 isStoreBlocked = true;
1186 ++lsqCacheBlocked;
1187 assert(retryPkt == NULL);
1188 retryPkt = data_pkt;
1189 lsq->setRetryTid(lsqID);
1190 return false;
1191 }
1192 return true;
1193}
1194
1195template <class Impl>
1196void
1197LSQUnit<Impl>::recvRetry()
1198{
1199 if (isStoreBlocked) {
1200 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1201 assert(retryPkt != NULL);
1202
1203 LSQSenderState *state =
1204 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1205
1206 if (dcachePort->sendTimingReq(retryPkt)) {
1207 // Don't finish the store unless this is the last packet.
1208 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1209 state->pendingPacket == retryPkt) {
1210 state->pktToSend = false;
1211 storePostSend(retryPkt);
1212 }
1213 retryPkt = NULL;
1214 isStoreBlocked = false;
1215 lsq->setRetryTid(InvalidThreadID);
1216
1217 // Send any outstanding packet.
1218 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1219 assert(state->pendingPacket);
1220 if (sendStore(state->pendingPacket)) {
1221 storePostSend(state->pendingPacket);
1222 }
1223 }
1224 } else {
1225 // Still blocked!
1226 ++lsqCacheBlocked;
1227 lsq->setRetryTid(lsqID);
1228 }
1229 } else if (isLoadBlocked) {
1230 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1231 "no need to resend packet.\n");
1232 } else {
1233 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1234 }
1235}
1236
1237template <class Impl>
1238inline void
1239LSQUnit<Impl>::incrStIdx(int &store_idx)
1240{
1241 if (++store_idx >= SQEntries)
1242 store_idx = 0;
1243}
1244
1245template <class Impl>
1246inline void
1247LSQUnit<Impl>::decrStIdx(int &store_idx)
1248{
1249 if (--store_idx < 0)
1250 store_idx += SQEntries;
1251}
1252
1253template <class Impl>
1254inline void
1255LSQUnit<Impl>::incrLdIdx(int &load_idx)
1256{
1257 if (++load_idx >= LQEntries)
1258 load_idx = 0;
1259}
1260
1261template <class Impl>
1262inline void
1263LSQUnit<Impl>::decrLdIdx(int &load_idx)
1264{
1265 if (--load_idx < 0)
1266 load_idx += LQEntries;
1267}
1268
1269template <class Impl>
1270void
1271LSQUnit<Impl>::dumpInsts()
1272{
1273 cprintf("Load store queue: Dumping instructions.\n");
1274 cprintf("Load queue size: %i\n", loads);
1275 cprintf("Load queue: ");
1276
1277 int load_idx = loadHead;
1278
1279 while (load_idx != loadTail && loadQueue[load_idx]) {
1280 cprintf("%s ", loadQueue[load_idx]->pcState());
1281
1282 incrLdIdx(load_idx);
1283 }
1284
1285 cprintf("Store queue size: %i\n", stores);
1286 cprintf("Store queue: ");
1287
1288 int store_idx = storeHead;
1289
1290 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1291 cprintf("%s ", storeQueue[store_idx].inst->pcState());
1292
1293 incrStIdx(store_idx);
1294 }
1295
1296 cprintf("\n");
1297}
98 // If this is a split access, wait until all packets are received.
99 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
100 delete pkt->req;
101 delete pkt;
102 return;
103 }
104
105 if (isSwitchedOut() || inst->isSquashed()) {
106 iewStage->decrWb(inst->seqNum);
107 } else {
108 if (!state->noWB) {
109 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
110 !state->isLoad) {
111 writeback(inst, pkt);
112 } else {
113 writeback(inst, state->mainPkt);
114 }
115 }
116
117 if (inst->isStore()) {
118 completeStore(state->idx);
119 }
120 }
121
122 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
123 delete state->mainPkt->req;
124 delete state->mainPkt;
125 }
126 delete state;
127 delete pkt->req;
128 delete pkt;
129}
130
131template <class Impl>
132LSQUnit<Impl>::LSQUnit()
133 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
134 isStoreBlocked(false), isLoadBlocked(false),
135 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
136{
137}
138
139template<class Impl>
140void
141LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
142 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
143 unsigned id)
144{
145 cpu = cpu_ptr;
146 iewStage = iew_ptr;
147
148 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
149
150 switchedOut = false;
151
152 cacheBlockMask = 0;
153
154 lsq = lsq_ptr;
155
156 lsqID = id;
157
158 // Add 1 for the sentinel entry (they are circular queues).
159 LQEntries = maxLQEntries + 1;
160 SQEntries = maxSQEntries + 1;
161
162 loadQueue.resize(LQEntries);
163 storeQueue.resize(SQEntries);
164
165 depCheckShift = params->LSQDepCheckShift;
166 checkLoads = params->LSQCheckLoads;
167
168 loadHead = loadTail = 0;
169
170 storeHead = storeWBIdx = storeTail = 0;
171
172 usedPorts = 0;
173 cachePorts = params->cachePorts;
174
175 retryPkt = NULL;
176 memDepViolator = NULL;
177
178 blockedLoadSeqNum = 0;
179 needsTSO = params->needsTSO;
180}
181
182template<class Impl>
183std::string
184LSQUnit<Impl>::name() const
185{
186 if (Impl::MaxThreads == 1) {
187 return iewStage->name() + ".lsq";
188 } else {
189 return iewStage->name() + ".lsq.thread" + to_string(lsqID);
190 }
191}
192
193template<class Impl>
194void
195LSQUnit<Impl>::regStats()
196{
197 lsqForwLoads
198 .name(name() + ".forwLoads")
199 .desc("Number of loads that had data forwarded from stores");
200
201 invAddrLoads
202 .name(name() + ".invAddrLoads")
203 .desc("Number of loads ignored due to an invalid address");
204
205 lsqSquashedLoads
206 .name(name() + ".squashedLoads")
207 .desc("Number of loads squashed");
208
209 lsqIgnoredResponses
210 .name(name() + ".ignoredResponses")
211 .desc("Number of memory responses ignored because the instruction is squashed");
212
213 lsqMemOrderViolation
214 .name(name() + ".memOrderViolation")
215 .desc("Number of memory ordering violations");
216
217 lsqSquashedStores
218 .name(name() + ".squashedStores")
219 .desc("Number of stores squashed");
220
221 invAddrSwpfs
222 .name(name() + ".invAddrSwpfs")
223 .desc("Number of software prefetches ignored due to an invalid address");
224
225 lsqBlockedLoads
226 .name(name() + ".blockedLoads")
227 .desc("Number of blocked loads due to partial load-store forwarding");
228
229 lsqRescheduledLoads
230 .name(name() + ".rescheduledLoads")
231 .desc("Number of loads that were rescheduled");
232
233 lsqCacheBlocked
234 .name(name() + ".cacheBlocked")
235 .desc("Number of times an access to memory failed due to the cache being blocked");
236}
237
238template<class Impl>
239void
240LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
241{
242 dcachePort = dcache_port;
243}
244
245template<class Impl>
246void
247LSQUnit<Impl>::clearLQ()
248{
249 loadQueue.clear();
250}
251
252template<class Impl>
253void
254LSQUnit<Impl>::clearSQ()
255{
256 storeQueue.clear();
257}
258
259template<class Impl>
260void
261LSQUnit<Impl>::switchOut()
262{
263 switchedOut = true;
264 for (int i = 0; i < loadQueue.size(); ++i) {
265 assert(!loadQueue[i]);
266 loadQueue[i] = NULL;
267 }
268
269 assert(storesToWB == 0);
270}
271
272template<class Impl>
273void
274LSQUnit<Impl>::takeOverFrom()
275{
276 switchedOut = false;
277 loads = stores = storesToWB = 0;
278
279 loadHead = loadTail = 0;
280
281 storeHead = storeWBIdx = storeTail = 0;
282
283 usedPorts = 0;
284
285 memDepViolator = NULL;
286
287 blockedLoadSeqNum = 0;
288
289 stalled = false;
290 isLoadBlocked = false;
291 loadBlockedHandled = false;
292
293 // Just incase the memory system changed out from under us
294 cacheBlockMask = 0;
295}
296
297template<class Impl>
298void
299LSQUnit<Impl>::resizeLQ(unsigned size)
300{
301 unsigned size_plus_sentinel = size + 1;
302 assert(size_plus_sentinel >= LQEntries);
303
304 if (size_plus_sentinel > LQEntries) {
305 while (size_plus_sentinel > loadQueue.size()) {
306 DynInstPtr dummy;
307 loadQueue.push_back(dummy);
308 LQEntries++;
309 }
310 } else {
311 LQEntries = size_plus_sentinel;
312 }
313
314}
315
316template<class Impl>
317void
318LSQUnit<Impl>::resizeSQ(unsigned size)
319{
320 unsigned size_plus_sentinel = size + 1;
321 if (size_plus_sentinel > SQEntries) {
322 while (size_plus_sentinel > storeQueue.size()) {
323 SQEntry dummy;
324 storeQueue.push_back(dummy);
325 SQEntries++;
326 }
327 } else {
328 SQEntries = size_plus_sentinel;
329 }
330}
331
332template <class Impl>
333void
334LSQUnit<Impl>::insert(DynInstPtr &inst)
335{
336 assert(inst->isMemRef());
337
338 assert(inst->isLoad() || inst->isStore());
339
340 if (inst->isLoad()) {
341 insertLoad(inst);
342 } else {
343 insertStore(inst);
344 }
345
346 inst->setInLSQ();
347}
348
349template <class Impl>
350void
351LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
352{
353 assert((loadTail + 1) % LQEntries != loadHead);
354 assert(loads < LQEntries);
355
356 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
357 load_inst->pcState(), loadTail, load_inst->seqNum);
358
359 load_inst->lqIdx = loadTail;
360
361 if (stores == 0) {
362 load_inst->sqIdx = -1;
363 } else {
364 load_inst->sqIdx = storeTail;
365 }
366
367 loadQueue[loadTail] = load_inst;
368
369 incrLdIdx(loadTail);
370
371 ++loads;
372}
373
374template <class Impl>
375void
376LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
377{
378 // Make sure it is not full before inserting an instruction.
379 assert((storeTail + 1) % SQEntries != storeHead);
380 assert(stores < SQEntries);
381
382 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
383 store_inst->pcState(), storeTail, store_inst->seqNum);
384
385 store_inst->sqIdx = storeTail;
386 store_inst->lqIdx = loadTail;
387
388 storeQueue[storeTail] = SQEntry(store_inst);
389
390 incrStIdx(storeTail);
391
392 ++stores;
393}
394
395template <class Impl>
396typename Impl::DynInstPtr
397LSQUnit<Impl>::getMemDepViolator()
398{
399 DynInstPtr temp = memDepViolator;
400
401 memDepViolator = NULL;
402
403 return temp;
404}
405
406template <class Impl>
407unsigned
408LSQUnit<Impl>::numFreeEntries()
409{
410 unsigned free_lq_entries = LQEntries - loads;
411 unsigned free_sq_entries = SQEntries - stores;
412
413 // Both the LQ and SQ entries have an extra dummy entry to differentiate
414 // empty/full conditions. Subtract 1 from the free entries.
415 if (free_lq_entries < free_sq_entries) {
416 return free_lq_entries - 1;
417 } else {
418 return free_sq_entries - 1;
419 }
420}
421
422template <class Impl>
423int
424LSQUnit<Impl>::numLoadsReady()
425{
426 int load_idx = loadHead;
427 int retval = 0;
428
429 while (load_idx != loadTail) {
430 assert(loadQueue[load_idx]);
431
432 if (loadQueue[load_idx]->readyToIssue()) {
433 ++retval;
434 }
435 }
436
437 return retval;
438}
439
440template <class Impl>
441void
442LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
443{
444 int load_idx = loadHead;
445
446 if (!cacheBlockMask) {
447 assert(dcachePort);
448 Addr bs = dcachePort->peerBlockSize();
449
450 // Make sure we actually got a size
451 assert(bs != 0);
452
453 cacheBlockMask = ~(bs - 1);
454 }
455
456 // If this is the only load in the LSQ we don't care
457 if (load_idx == loadTail)
458 return;
459 incrLdIdx(load_idx);
460
461 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
462 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
463 while (load_idx != loadTail) {
464 DynInstPtr ld_inst = loadQueue[load_idx];
465
466 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
467 incrLdIdx(load_idx);
468 continue;
469 }
470
471 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
472 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
473 ld_inst->seqNum, load_addr, invalidate_addr);
474
475 if (load_addr == invalidate_addr) {
476 if (ld_inst->possibleLoadViolation()) {
477 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
478 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum);
479
480 // Mark the load for re-execution
481 ld_inst->fault = new ReExec;
482 } else {
483 // If a older load checks this and it's true
484 // then we might have missed the snoop
485 // in which case we need to invalidate to be sure
486 ld_inst->hitExternalSnoop(true);
487 }
488 }
489 incrLdIdx(load_idx);
490 }
491 return;
492}
493
494template <class Impl>
495Fault
496LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
497{
498 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
499 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
500
501 /** @todo in theory you only need to check an instruction that has executed
502 * however, there isn't a good way in the pipeline at the moment to check
503 * all instructions that will execute before the store writes back. Thus,
504 * like the implementation that came before it, we're overly conservative.
505 */
506 while (load_idx != loadTail) {
507 DynInstPtr ld_inst = loadQueue[load_idx];
508 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
509 incrLdIdx(load_idx);
510 continue;
511 }
512
513 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
514 Addr ld_eff_addr2 =
515 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
516
517 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
518 if (inst->isLoad()) {
519 // If this load is to the same block as an external snoop
520 // invalidate that we've observed then the load needs to be
521 // squashed as it could have newer data
522 if (ld_inst->hitExternalSnoop()) {
523 if (!memDepViolator ||
524 ld_inst->seqNum < memDepViolator->seqNum) {
525 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
526 "and [sn:%lli] at address %#x\n",
527 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
528 memDepViolator = ld_inst;
529
530 ++lsqMemOrderViolation;
531
532 return new GenericISA::M5PanicFault(
533 "Detected fault with inst [sn:%lli] and "
534 "[sn:%lli] at address %#x\n",
535 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
536 }
537 }
538
539 // Otherwise, mark the load has a possible load violation
540 // and if we see a snoop before it's commited, we need to squash
541 ld_inst->possibleLoadViolation(true);
542 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
543 " between instructions [sn:%lli] and [sn:%lli]\n",
544 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
545 } else {
546 // A load/store incorrectly passed this store.
547 // Check if we already have a violator, or if it's newer
548 // squash and refetch.
549 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
550 break;
551
552 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
553 "[sn:%lli] at address %#x\n",
554 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
555 memDepViolator = ld_inst;
556
557 ++lsqMemOrderViolation;
558
559 return new GenericISA::M5PanicFault("Detected fault with "
560 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
561 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
562 }
563 }
564
565 incrLdIdx(load_idx);
566 }
567 return NoFault;
568}
569
570
571
572
573template <class Impl>
574Fault
575LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
576{
577 using namespace TheISA;
578 // Execute a specific load.
579 Fault load_fault = NoFault;
580
581 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
582 inst->pcState(), inst->seqNum);
583
584 assert(!inst->isSquashed());
585
586 load_fault = inst->initiateAcc();
587
588 if (inst->isTranslationDelayed() &&
589 load_fault == NoFault)
590 return load_fault;
591
592 // If the instruction faulted or predicated false, then we need to send it
593 // along to commit without the instruction completing.
594 if (load_fault != NoFault || inst->readPredicate() == false) {
595 // Send this instruction to commit, also make sure iew stage
596 // realizes there is activity.
597 // Mark it as executed unless it is an uncached load that
598 // needs to hit the head of commit.
599 if (inst->readPredicate() == false)
600 inst->forwardOldRegs();
601 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
602 inst->seqNum,
603 (load_fault != NoFault ? "fault" : "predication"));
604 if (!(inst->hasRequest() && inst->uncacheable()) ||
605 inst->isAtCommit()) {
606 inst->setExecuted();
607 }
608 iewStage->instToCommit(inst);
609 iewStage->activityThisCycle();
610 } else if (!loadBlocked()) {
611 assert(inst->effAddrValid());
612 int load_idx = inst->lqIdx;
613 incrLdIdx(load_idx);
614
615 if (checkLoads)
616 return checkViolations(load_idx, inst);
617 }
618
619 return load_fault;
620}
621
622template <class Impl>
623Fault
624LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
625{
626 using namespace TheISA;
627 // Make sure that a store exists.
628 assert(stores != 0);
629
630 int store_idx = store_inst->sqIdx;
631
632 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
633 store_inst->pcState(), store_inst->seqNum);
634
635 assert(!store_inst->isSquashed());
636
637 // Check the recently completed loads to see if any match this store's
638 // address. If so, then we have a memory ordering violation.
639 int load_idx = store_inst->lqIdx;
640
641 Fault store_fault = store_inst->initiateAcc();
642
643 if (store_inst->isTranslationDelayed() &&
644 store_fault == NoFault)
645 return store_fault;
646
647 if (store_inst->readPredicate() == false)
648 store_inst->forwardOldRegs();
649
650 if (storeQueue[store_idx].size == 0) {
651 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
652 store_inst->pcState(), store_inst->seqNum);
653
654 return store_fault;
655 } else if (store_inst->readPredicate() == false) {
656 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
657 store_inst->seqNum);
658 return store_fault;
659 }
660
661 assert(store_fault == NoFault);
662
663 if (store_inst->isStoreConditional()) {
664 // Store conditionals need to set themselves as able to
665 // writeback if we haven't had a fault by here.
666 storeQueue[store_idx].canWB = true;
667
668 ++storesToWB;
669 }
670
671 return checkViolations(load_idx, store_inst);
672
673}
674
675template <class Impl>
676void
677LSQUnit<Impl>::commitLoad()
678{
679 assert(loadQueue[loadHead]);
680
681 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
682 loadQueue[loadHead]->pcState());
683
684 loadQueue[loadHead] = NULL;
685
686 incrLdIdx(loadHead);
687
688 --loads;
689}
690
691template <class Impl>
692void
693LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
694{
695 assert(loads == 0 || loadQueue[loadHead]);
696
697 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
698 commitLoad();
699 }
700}
701
702template <class Impl>
703void
704LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
705{
706 assert(stores == 0 || storeQueue[storeHead].inst);
707
708 int store_idx = storeHead;
709
710 while (store_idx != storeTail) {
711 assert(storeQueue[store_idx].inst);
712 // Mark any stores that are now committed and have not yet
713 // been marked as able to write back.
714 if (!storeQueue[store_idx].canWB) {
715 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
716 break;
717 }
718 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
719 "%s [sn:%lli]\n",
720 storeQueue[store_idx].inst->pcState(),
721 storeQueue[store_idx].inst->seqNum);
722
723 storeQueue[store_idx].canWB = true;
724
725 ++storesToWB;
726 }
727
728 incrStIdx(store_idx);
729 }
730}
731
732template <class Impl>
733void
734LSQUnit<Impl>::writebackPendingStore()
735{
736 if (hasPendingPkt) {
737 assert(pendingPkt != NULL);
738
739 // If the cache is blocked, this will store the packet for retry.
740 if (sendStore(pendingPkt)) {
741 storePostSend(pendingPkt);
742 }
743 pendingPkt = NULL;
744 hasPendingPkt = false;
745 }
746}
747
748template <class Impl>
749void
750LSQUnit<Impl>::writebackStores()
751{
752 // First writeback the second packet from any split store that didn't
753 // complete last cycle because there weren't enough cache ports available.
754 if (TheISA::HasUnalignedMemAcc) {
755 writebackPendingStore();
756 }
757
758 while (storesToWB > 0 &&
759 storeWBIdx != storeTail &&
760 storeQueue[storeWBIdx].inst &&
761 storeQueue[storeWBIdx].canWB &&
762 ((!needsTSO) || (!storeInFlight)) &&
763 usedPorts < cachePorts) {
764
765 if (isStoreBlocked || lsq->cacheBlocked()) {
766 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
767 " is blocked!\n");
768 break;
769 }
770
771 // Store didn't write any data so no need to write it back to
772 // memory.
773 if (storeQueue[storeWBIdx].size == 0) {
774 completeStore(storeWBIdx);
775
776 incrStIdx(storeWBIdx);
777
778 continue;
779 }
780
781 ++usedPorts;
782
783 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
784 incrStIdx(storeWBIdx);
785
786 continue;
787 }
788
789 assert(storeQueue[storeWBIdx].req);
790 assert(!storeQueue[storeWBIdx].committed);
791
792 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
793 assert(storeQueue[storeWBIdx].sreqLow);
794 assert(storeQueue[storeWBIdx].sreqHigh);
795 }
796
797 DynInstPtr inst = storeQueue[storeWBIdx].inst;
798
799 Request *req = storeQueue[storeWBIdx].req;
800 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
801 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
802
803 storeQueue[storeWBIdx].committed = true;
804
805 assert(!inst->memData);
806 inst->memData = new uint8_t[64];
807
808 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
809
810 MemCmd command =
811 req->isSwap() ? MemCmd::SwapReq :
812 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
813 PacketPtr data_pkt;
814 PacketPtr snd_data_pkt = NULL;
815
816 LSQSenderState *state = new LSQSenderState;
817 state->isLoad = false;
818 state->idx = storeWBIdx;
819 state->inst = inst;
820
821 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
822
823 // Build a single data packet if the store isn't split.
824 data_pkt = new Packet(req, command);
825 data_pkt->dataStatic(inst->memData);
826 data_pkt->senderState = state;
827 } else {
828 // Create two packets if the store is split in two.
829 data_pkt = new Packet(sreqLow, command);
830 snd_data_pkt = new Packet(sreqHigh, command);
831
832 data_pkt->dataStatic(inst->memData);
833 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
834
835 data_pkt->senderState = state;
836 snd_data_pkt->senderState = state;
837
838 state->isSplit = true;
839 state->outstanding = 2;
840
841 // Can delete the main request now.
842 delete req;
843 req = sreqLow;
844 }
845
846 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
847 "to Addr:%#x, data:%#x [sn:%lli]\n",
848 storeWBIdx, inst->pcState(),
849 req->getPaddr(), (int)*(inst->memData),
850 inst->seqNum);
851
852 // @todo: Remove this SC hack once the memory system handles it.
853 if (inst->isStoreConditional()) {
854 assert(!storeQueue[storeWBIdx].isSplit);
855 // Disable recording the result temporarily. Writing to
856 // misc regs normally updates the result, but this is not
857 // the desired behavior when handling store conditionals.
858 inst->recordResult(false);
859 bool success = TheISA::handleLockedWrite(inst.get(), req);
860 inst->recordResult(true);
861
862 if (!success) {
863 // Instantly complete this store.
864 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
865 "Instantly completing it.\n",
866 inst->seqNum);
867 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
868 cpu->schedule(wb, curTick() + 1);
869 if (cpu->checker) {
870 // Make sure to set the LLSC data for verification
871 // if checker is loaded
872 inst->reqToVerify->setExtraData(0);
873 inst->completeAcc(data_pkt);
874 }
875 completeStore(storeWBIdx);
876 incrStIdx(storeWBIdx);
877 continue;
878 }
879 } else {
880 // Non-store conditionals do not need a writeback.
881 state->noWB = true;
882 }
883
884 bool split =
885 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
886
887 ThreadContext *thread = cpu->tcBase(lsqID);
888
889 if (req->isMmappedIpr()) {
890 assert(!inst->isStoreConditional());
891 TheISA::handleIprWrite(thread, data_pkt);
892 delete data_pkt;
893 if (split) {
894 assert(snd_data_pkt->req->isMmappedIpr());
895 TheISA::handleIprWrite(thread, snd_data_pkt);
896 delete snd_data_pkt;
897 delete sreqLow;
898 delete sreqHigh;
899 }
900 delete state;
901 delete req;
902 completeStore(storeWBIdx);
903 incrStIdx(storeWBIdx);
904 } else if (!sendStore(data_pkt)) {
905 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
906 "retry later\n",
907 inst->seqNum);
908
909 // Need to store the second packet, if split.
910 if (split) {
911 state->pktToSend = true;
912 state->pendingPacket = snd_data_pkt;
913 }
914 } else {
915
916 // If split, try to send the second packet too
917 if (split) {
918 assert(snd_data_pkt);
919
920 // Ensure there are enough ports to use.
921 if (usedPorts < cachePorts) {
922 ++usedPorts;
923 if (sendStore(snd_data_pkt)) {
924 storePostSend(snd_data_pkt);
925 } else {
926 DPRINTF(IEW, "D-Cache became blocked when writing"
927 " [sn:%lli] second packet, will retry later\n",
928 inst->seqNum);
929 }
930 } else {
931
932 // Store the packet for when there's free ports.
933 assert(pendingPkt == NULL);
934 pendingPkt = snd_data_pkt;
935 hasPendingPkt = true;
936 }
937 } else {
938
939 // Not a split store.
940 storePostSend(data_pkt);
941 }
942 }
943 }
944
945 // Not sure this should set it to 0.
946 usedPorts = 0;
947
948 assert(stores >= 0 && storesToWB >= 0);
949}
950
951/*template <class Impl>
952void
953LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
954{
955 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
956 mshrSeqNums.end(),
957 seqNum);
958
959 if (mshr_it != mshrSeqNums.end()) {
960 mshrSeqNums.erase(mshr_it);
961 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
962 }
963}*/
964
965template <class Impl>
966void
967LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
968{
969 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
970 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
971
972 int load_idx = loadTail;
973 decrLdIdx(load_idx);
974
975 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
976 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
977 "[sn:%lli]\n",
978 loadQueue[load_idx]->pcState(),
979 loadQueue[load_idx]->seqNum);
980
981 if (isStalled() && load_idx == stallingLoadIdx) {
982 stalled = false;
983 stallingStoreIsn = 0;
984 stallingLoadIdx = 0;
985 }
986
987 // Clear the smart pointer to make sure it is decremented.
988 loadQueue[load_idx]->setSquashed();
989 loadQueue[load_idx] = NULL;
990 --loads;
991
992 // Inefficient!
993 loadTail = load_idx;
994
995 decrLdIdx(load_idx);
996 ++lsqSquashedLoads;
997 }
998
999 if (isLoadBlocked) {
1000 if (squashed_num < blockedLoadSeqNum) {
1001 isLoadBlocked = false;
1002 loadBlockedHandled = false;
1003 blockedLoadSeqNum = 0;
1004 }
1005 }
1006
1007 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1008 memDepViolator = NULL;
1009 }
1010
1011 int store_idx = storeTail;
1012 decrStIdx(store_idx);
1013
1014 while (stores != 0 &&
1015 storeQueue[store_idx].inst->seqNum > squashed_num) {
1016 // Instructions marked as can WB are already committed.
1017 if (storeQueue[store_idx].canWB) {
1018 break;
1019 }
1020
1021 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1022 "idx:%i [sn:%lli]\n",
1023 storeQueue[store_idx].inst->pcState(),
1024 store_idx, storeQueue[store_idx].inst->seqNum);
1025
1026 // I don't think this can happen. It should have been cleared
1027 // by the stalling load.
1028 if (isStalled() &&
1029 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1030 panic("Is stalled should have been cleared by stalling load!\n");
1031 stalled = false;
1032 stallingStoreIsn = 0;
1033 }
1034
1035 // Clear the smart pointer to make sure it is decremented.
1036 storeQueue[store_idx].inst->setSquashed();
1037 storeQueue[store_idx].inst = NULL;
1038 storeQueue[store_idx].canWB = 0;
1039
1040 // Must delete request now that it wasn't handed off to
1041 // memory. This is quite ugly. @todo: Figure out the proper
1042 // place to really handle request deletes.
1043 delete storeQueue[store_idx].req;
1044 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1045 delete storeQueue[store_idx].sreqLow;
1046 delete storeQueue[store_idx].sreqHigh;
1047
1048 storeQueue[store_idx].sreqLow = NULL;
1049 storeQueue[store_idx].sreqHigh = NULL;
1050 }
1051
1052 storeQueue[store_idx].req = NULL;
1053 --stores;
1054
1055 // Inefficient!
1056 storeTail = store_idx;
1057
1058 decrStIdx(store_idx);
1059 ++lsqSquashedStores;
1060 }
1061}
1062
1063template <class Impl>
1064void
1065LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1066{
1067 if (isStalled() &&
1068 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1069 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1070 "load idx:%i\n",
1071 stallingStoreIsn, stallingLoadIdx);
1072 stalled = false;
1073 stallingStoreIsn = 0;
1074 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1075 }
1076
1077 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1078 // The store is basically completed at this time. This
1079 // only works so long as the checker doesn't try to
1080 // verify the value in memory for stores.
1081 storeQueue[storeWBIdx].inst->setCompleted();
1082
1083 if (cpu->checker) {
1084 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1085 }
1086 }
1087
1088 if (needsTSO) {
1089 storeInFlight = true;
1090 }
1091
1092 incrStIdx(storeWBIdx);
1093}
1094
1095template <class Impl>
1096void
1097LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1098{
1099 iewStage->wakeCPU();
1100
1101 // Squashed instructions do not need to complete their access.
1102 if (inst->isSquashed()) {
1103 iewStage->decrWb(inst->seqNum);
1104 assert(!inst->isStore());
1105 ++lsqIgnoredResponses;
1106 return;
1107 }
1108
1109 if (!inst->isExecuted()) {
1110 inst->setExecuted();
1111
1112 // Complete access to copy data to proper place.
1113 inst->completeAcc(pkt);
1114 }
1115
1116 // Need to insert instruction into queue to commit
1117 iewStage->instToCommit(inst);
1118
1119 iewStage->activityThisCycle();
1120
1121 // see if this load changed the PC
1122 iewStage->checkMisprediction(inst);
1123}
1124
1125template <class Impl>
1126void
1127LSQUnit<Impl>::completeStore(int store_idx)
1128{
1129 assert(storeQueue[store_idx].inst);
1130 storeQueue[store_idx].completed = true;
1131 --storesToWB;
1132 // A bit conservative because a store completion may not free up entries,
1133 // but hopefully avoids two store completions in one cycle from making
1134 // the CPU tick twice.
1135 cpu->wakeCPU();
1136 cpu->activityThisCycle();
1137
1138 if (store_idx == storeHead) {
1139 do {
1140 incrStIdx(storeHead);
1141
1142 --stores;
1143 } while (storeQueue[storeHead].completed &&
1144 storeHead != storeTail);
1145
1146 iewStage->updateLSQNextCycle = true;
1147 }
1148
1149 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1150 "idx:%i\n",
1151 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1152
1153 if (isStalled() &&
1154 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1155 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1156 "load idx:%i\n",
1157 stallingStoreIsn, stallingLoadIdx);
1158 stalled = false;
1159 stallingStoreIsn = 0;
1160 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1161 }
1162
1163 storeQueue[store_idx].inst->setCompleted();
1164
1165 if (needsTSO) {
1166 storeInFlight = false;
1167 }
1168
1169 // Tell the checker we've completed this instruction. Some stores
1170 // may get reported twice to the checker, but the checker can
1171 // handle that case.
1172 if (cpu->checker) {
1173 cpu->checker->verify(storeQueue[store_idx].inst);
1174 }
1175}
1176
1177template <class Impl>
1178bool
1179LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1180{
1181 if (!dcachePort->sendTimingReq(data_pkt)) {
1182 // Need to handle becoming blocked on a store.
1183 isStoreBlocked = true;
1184 ++lsqCacheBlocked;
1185 assert(retryPkt == NULL);
1186 retryPkt = data_pkt;
1187 lsq->setRetryTid(lsqID);
1188 return false;
1189 }
1190 return true;
1191}
1192
1193template <class Impl>
1194void
1195LSQUnit<Impl>::recvRetry()
1196{
1197 if (isStoreBlocked) {
1198 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1199 assert(retryPkt != NULL);
1200
1201 LSQSenderState *state =
1202 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1203
1204 if (dcachePort->sendTimingReq(retryPkt)) {
1205 // Don't finish the store unless this is the last packet.
1206 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1207 state->pendingPacket == retryPkt) {
1208 state->pktToSend = false;
1209 storePostSend(retryPkt);
1210 }
1211 retryPkt = NULL;
1212 isStoreBlocked = false;
1213 lsq->setRetryTid(InvalidThreadID);
1214
1215 // Send any outstanding packet.
1216 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1217 assert(state->pendingPacket);
1218 if (sendStore(state->pendingPacket)) {
1219 storePostSend(state->pendingPacket);
1220 }
1221 }
1222 } else {
1223 // Still blocked!
1224 ++lsqCacheBlocked;
1225 lsq->setRetryTid(lsqID);
1226 }
1227 } else if (isLoadBlocked) {
1228 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1229 "no need to resend packet.\n");
1230 } else {
1231 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1232 }
1233}
1234
1235template <class Impl>
1236inline void
1237LSQUnit<Impl>::incrStIdx(int &store_idx)
1238{
1239 if (++store_idx >= SQEntries)
1240 store_idx = 0;
1241}
1242
1243template <class Impl>
1244inline void
1245LSQUnit<Impl>::decrStIdx(int &store_idx)
1246{
1247 if (--store_idx < 0)
1248 store_idx += SQEntries;
1249}
1250
1251template <class Impl>
1252inline void
1253LSQUnit<Impl>::incrLdIdx(int &load_idx)
1254{
1255 if (++load_idx >= LQEntries)
1256 load_idx = 0;
1257}
1258
1259template <class Impl>
1260inline void
1261LSQUnit<Impl>::decrLdIdx(int &load_idx)
1262{
1263 if (--load_idx < 0)
1264 load_idx += LQEntries;
1265}
1266
1267template <class Impl>
1268void
1269LSQUnit<Impl>::dumpInsts()
1270{
1271 cprintf("Load store queue: Dumping instructions.\n");
1272 cprintf("Load queue size: %i\n", loads);
1273 cprintf("Load queue: ");
1274
1275 int load_idx = loadHead;
1276
1277 while (load_idx != loadTail && loadQueue[load_idx]) {
1278 cprintf("%s ", loadQueue[load_idx]->pcState());
1279
1280 incrLdIdx(load_idx);
1281 }
1282
1283 cprintf("Store queue size: %i\n", stores);
1284 cprintf("Store queue: ");
1285
1286 int store_idx = storeHead;
1287
1288 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1289 cprintf("%s ", storeQueue[store_idx].inst->pcState());
1290
1291 incrStIdx(store_idx);
1292 }
1293
1294 cprintf("\n");
1295}