lsq_unit_impl.hh (2820:7fde0b0f8f78) lsq_unit_impl.hh (2871:7ed5c9ef3eb6)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
34#include "cpu/o3/lsq_unit.hh"
35#include "base/str.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38
39#if USE_CHECKER
40#include "cpu/checker/cpu.hh"
41#endif
42
43template<class Impl>
44LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
45 LSQUnit *lsq_ptr)
46 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template<class Impl>
52void
53LSQUnit<Impl>::WritebackEvent::process()
54{
55 if (!lsqPtr->isSwitchedOut()) {
56 lsqPtr->writeback(inst, pkt);
57 }
58 delete pkt;
59}
60
61template<class Impl>
62const char *
63LSQUnit<Impl>::WritebackEvent::description()
64{
65 return "Store writeback event";
66}
67
68template<class Impl>
69void
70LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
71{
72 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
73 DynInstPtr inst = state->inst;
74 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
75 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
76
77 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
78
79 if (isSwitchedOut() || inst->isSquashed()) {
80 iewStage->decrWb(inst->seqNum);
81 delete state;
82 delete pkt;
83 return;
84 } else {
85 if (!state->noWB) {
86 writeback(inst, pkt);
87 }
88
89 if (inst->isStore()) {
90 completeStore(state->idx);
91 }
92 }
93
94 delete state;
95 delete pkt;
96}
97
98template <class Impl>
99Tick
100LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
101{
102 panic("O3CPU model does not work with atomic mode!");
103 return curTick;
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
109{
110 panic("O3CPU doesn't expect recvFunctional callback!");
111}
112
113template <class Impl>
114void
115LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
116{
117 if (status == RangeChange)
118 return;
119
120 panic("O3CPU doesn't expect recvStatusChange callback!");
121}
122
123template <class Impl>
124bool
125LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
126{
127 lsq->completeDataAccess(pkt);
128 return true;
129}
130
131template <class Impl>
132void
133LSQUnit<Impl>::DcachePort::recvRetry()
134{
135 lsq->recvRetry();
136}
137
138template <class Impl>
139LSQUnit<Impl>::LSQUnit()
140 : loads(0), stores(0), storesToWB(0), stalled(false),
141 isStoreBlocked(false), isLoadBlocked(false),
142 loadBlockedHandled(false)
143{
144}
145
146template<class Impl>
147void
148LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
149 unsigned maxSQEntries, unsigned id)
150{
151 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
152
153 switchedOut = false;
154
155 lsqID = id;
156
157 // Add 1 for the sentinel entry (they are circular queues).
158 LQEntries = maxLQEntries + 1;
159 SQEntries = maxSQEntries + 1;
160
161 loadQueue.resize(LQEntries);
162 storeQueue.resize(SQEntries);
163
164 loadHead = loadTail = 0;
165
166 storeHead = storeWBIdx = storeTail = 0;
167
168 usedPorts = 0;
169 cachePorts = params->cachePorts;
170
171 mem = params->mem;
172
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179void
180LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
181{
182 cpu = cpu_ptr;
183 dcachePort = new DcachePort(cpu, this);
184
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
34#include "cpu/o3/lsq_unit.hh"
35#include "base/str.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38
39#if USE_CHECKER
40#include "cpu/checker/cpu.hh"
41#endif
42
43template<class Impl>
44LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
45 LSQUnit *lsq_ptr)
46 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template<class Impl>
52void
53LSQUnit<Impl>::WritebackEvent::process()
54{
55 if (!lsqPtr->isSwitchedOut()) {
56 lsqPtr->writeback(inst, pkt);
57 }
58 delete pkt;
59}
60
61template<class Impl>
62const char *
63LSQUnit<Impl>::WritebackEvent::description()
64{
65 return "Store writeback event";
66}
67
68template<class Impl>
69void
70LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
71{
72 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
73 DynInstPtr inst = state->inst;
74 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
75 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
76
77 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
78
79 if (isSwitchedOut() || inst->isSquashed()) {
80 iewStage->decrWb(inst->seqNum);
81 delete state;
82 delete pkt;
83 return;
84 } else {
85 if (!state->noWB) {
86 writeback(inst, pkt);
87 }
88
89 if (inst->isStore()) {
90 completeStore(state->idx);
91 }
92 }
93
94 delete state;
95 delete pkt;
96}
97
98template <class Impl>
99Tick
100LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
101{
102 panic("O3CPU model does not work with atomic mode!");
103 return curTick;
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
109{
110 panic("O3CPU doesn't expect recvFunctional callback!");
111}
112
113template <class Impl>
114void
115LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
116{
117 if (status == RangeChange)
118 return;
119
120 panic("O3CPU doesn't expect recvStatusChange callback!");
121}
122
123template <class Impl>
124bool
125LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
126{
127 lsq->completeDataAccess(pkt);
128 return true;
129}
130
131template <class Impl>
132void
133LSQUnit<Impl>::DcachePort::recvRetry()
134{
135 lsq->recvRetry();
136}
137
138template <class Impl>
139LSQUnit<Impl>::LSQUnit()
140 : loads(0), stores(0), storesToWB(0), stalled(false),
141 isStoreBlocked(false), isLoadBlocked(false),
142 loadBlockedHandled(false)
143{
144}
145
146template<class Impl>
147void
148LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
149 unsigned maxSQEntries, unsigned id)
150{
151 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
152
153 switchedOut = false;
154
155 lsqID = id;
156
157 // Add 1 for the sentinel entry (they are circular queues).
158 LQEntries = maxLQEntries + 1;
159 SQEntries = maxSQEntries + 1;
160
161 loadQueue.resize(LQEntries);
162 storeQueue.resize(SQEntries);
163
164 loadHead = loadTail = 0;
165
166 storeHead = storeWBIdx = storeTail = 0;
167
168 usedPorts = 0;
169 cachePorts = params->cachePorts;
170
171 mem = params->mem;
172
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179void
180LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
181{
182 cpu = cpu_ptr;
183 dcachePort = new DcachePort(cpu, this);
184
185 Port *mem_dport = mem->getPort("");
186 dcachePort->setPeer(mem_dport);
187 mem_dport->setPeer(dcachePort);
188
189#if USE_CHECKER
190 if (cpu->checker) {
191 cpu->checker->setDcachePort(dcachePort);
192 }
193#endif
194}
195
196template<class Impl>
197std::string
198LSQUnit<Impl>::name() const
199{
200 if (Impl::MaxThreads == 1) {
201 return iewStage->name() + ".lsq";
202 } else {
203 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
204 }
205}
206
207template<class Impl>
208void
209LSQUnit<Impl>::regStats()
210{
211 lsqForwLoads
212 .name(name() + ".forwLoads")
213 .desc("Number of loads that had data forwarded from stores");
214
215 invAddrLoads
216 .name(name() + ".invAddrLoads")
217 .desc("Number of loads ignored due to an invalid address");
218
219 lsqSquashedLoads
220 .name(name() + ".squashedLoads")
221 .desc("Number of loads squashed");
222
223 lsqIgnoredResponses
224 .name(name() + ".ignoredResponses")
225 .desc("Number of memory responses ignored because the instruction is squashed");
226
227 lsqSquashedStores
228 .name(name() + ".squashedStores")
229 .desc("Number of stores squashed");
230
231 invAddrSwpfs
232 .name(name() + ".invAddrSwpfs")
233 .desc("Number of software prefetches ignored due to an invalid address");
234
235 lsqBlockedLoads
236 .name(name() + ".blockedLoads")
237 .desc("Number of blocked loads due to partial load-store forwarding");
238
239 lsqRescheduledLoads
240 .name(name() + ".rescheduledLoads")
241 .desc("Number of loads that were rescheduled");
242
243 lsqCacheBlocked
244 .name(name() + ".cacheBlocked")
245 .desc("Number of times an access to memory failed due to the cache being blocked");
246}
247
248template<class Impl>
249void
250LSQUnit<Impl>::clearLQ()
251{
252 loadQueue.clear();
253}
254
255template<class Impl>
256void
257LSQUnit<Impl>::clearSQ()
258{
259 storeQueue.clear();
260}
261
262template<class Impl>
263void
264LSQUnit<Impl>::switchOut()
265{
266 switchedOut = true;
267 for (int i = 0; i < loadQueue.size(); ++i)
268 loadQueue[i] = NULL;
269
270 assert(storesToWB == 0);
271}
272
273template<class Impl>
274void
275LSQUnit<Impl>::takeOverFrom()
276{
277 switchedOut = false;
278 loads = stores = storesToWB = 0;
279
280 loadHead = loadTail = 0;
281
282 storeHead = storeWBIdx = storeTail = 0;
283
284 usedPorts = 0;
285
286 memDepViolator = NULL;
287
288 blockedLoadSeqNum = 0;
289
290 stalled = false;
291 isLoadBlocked = false;
292 loadBlockedHandled = false;
293}
294
295template<class Impl>
296void
297LSQUnit<Impl>::resizeLQ(unsigned size)
298{
299 unsigned size_plus_sentinel = size + 1;
300 assert(size_plus_sentinel >= LQEntries);
301
302 if (size_plus_sentinel > LQEntries) {
303 while (size_plus_sentinel > loadQueue.size()) {
304 DynInstPtr dummy;
305 loadQueue.push_back(dummy);
306 LQEntries++;
307 }
308 } else {
309 LQEntries = size_plus_sentinel;
310 }
311
312}
313
314template<class Impl>
315void
316LSQUnit<Impl>::resizeSQ(unsigned size)
317{
318 unsigned size_plus_sentinel = size + 1;
319 if (size_plus_sentinel > SQEntries) {
320 while (size_plus_sentinel > storeQueue.size()) {
321 SQEntry dummy;
322 storeQueue.push_back(dummy);
323 SQEntries++;
324 }
325 } else {
326 SQEntries = size_plus_sentinel;
327 }
328}
329
330template <class Impl>
331void
332LSQUnit<Impl>::insert(DynInstPtr &inst)
333{
334 assert(inst->isMemRef());
335
336 assert(inst->isLoad() || inst->isStore());
337
338 if (inst->isLoad()) {
339 insertLoad(inst);
340 } else {
341 insertStore(inst);
342 }
343
344 inst->setInLSQ();
345}
346
347template <class Impl>
348void
349LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
350{
351 assert((loadTail + 1) % LQEntries != loadHead);
352 assert(loads < LQEntries);
353
354 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
355 load_inst->readPC(), loadTail, load_inst->seqNum);
356
357 load_inst->lqIdx = loadTail;
358
359 if (stores == 0) {
360 load_inst->sqIdx = -1;
361 } else {
362 load_inst->sqIdx = storeTail;
363 }
364
365 loadQueue[loadTail] = load_inst;
366
367 incrLdIdx(loadTail);
368
369 ++loads;
370}
371
372template <class Impl>
373void
374LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
375{
376 // Make sure it is not full before inserting an instruction.
377 assert((storeTail + 1) % SQEntries != storeHead);
378 assert(stores < SQEntries);
379
380 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
381 store_inst->readPC(), storeTail, store_inst->seqNum);
382
383 store_inst->sqIdx = storeTail;
384 store_inst->lqIdx = loadTail;
385
386 storeQueue[storeTail] = SQEntry(store_inst);
387
388 incrStIdx(storeTail);
389
390 ++stores;
391}
392
393template <class Impl>
394typename Impl::DynInstPtr
395LSQUnit<Impl>::getMemDepViolator()
396{
397 DynInstPtr temp = memDepViolator;
398
399 memDepViolator = NULL;
400
401 return temp;
402}
403
404template <class Impl>
405unsigned
406LSQUnit<Impl>::numFreeEntries()
407{
408 unsigned free_lq_entries = LQEntries - loads;
409 unsigned free_sq_entries = SQEntries - stores;
410
411 // Both the LQ and SQ entries have an extra dummy entry to differentiate
412 // empty/full conditions. Subtract 1 from the free entries.
413 if (free_lq_entries < free_sq_entries) {
414 return free_lq_entries - 1;
415 } else {
416 return free_sq_entries - 1;
417 }
418}
419
420template <class Impl>
421int
422LSQUnit<Impl>::numLoadsReady()
423{
424 int load_idx = loadHead;
425 int retval = 0;
426
427 while (load_idx != loadTail) {
428 assert(loadQueue[load_idx]);
429
430 if (loadQueue[load_idx]->readyToIssue()) {
431 ++retval;
432 }
433 }
434
435 return retval;
436}
437
438template <class Impl>
439Fault
440LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
441{
442 // Execute a specific load.
443 Fault load_fault = NoFault;
444
445 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
446 inst->readPC(),inst->seqNum);
447
448 load_fault = inst->initiateAcc();
449
450 // If the instruction faulted, then we need to send it along to commit
451 // without the instruction completing.
452 if (load_fault != NoFault) {
453 // Send this instruction to commit, also make sure iew stage
454 // realizes there is activity.
455 iewStage->instToCommit(inst);
456 iewStage->activityThisCycle();
457 }
458
459 return load_fault;
460}
461
462template <class Impl>
463Fault
464LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
465{
466 using namespace TheISA;
467 // Make sure that a store exists.
468 assert(stores != 0);
469
470 int store_idx = store_inst->sqIdx;
471
472 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
473 store_inst->readPC(), store_inst->seqNum);
474
475 // Check the recently completed loads to see if any match this store's
476 // address. If so, then we have a memory ordering violation.
477 int load_idx = store_inst->lqIdx;
478
479 Fault store_fault = store_inst->initiateAcc();
480
481 if (storeQueue[store_idx].size == 0) {
482 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
483 store_inst->readPC(),store_inst->seqNum);
484
485 return store_fault;
486 }
487
488 assert(store_fault == NoFault);
489
490 if (store_inst->isStoreConditional()) {
491 // Store conditionals need to set themselves as able to
492 // writeback if we haven't had a fault by here.
493 storeQueue[store_idx].canWB = true;
494
495 ++storesToWB;
496 }
497
498 if (!memDepViolator) {
499 while (load_idx != loadTail) {
500 // Really only need to check loads that have actually executed
501 // It's safe to check all loads because effAddr is set to
502 // InvalAddr when the dyn inst is created.
503
504 // @todo: For now this is extra conservative, detecting a
505 // violation if the addresses match assuming all accesses
506 // are quad word accesses.
507
508 // @todo: Fix this, magic number being used here
509 if ((loadQueue[load_idx]->effAddr >> 8) ==
510 (store_inst->effAddr >> 8)) {
511 // A load incorrectly passed this store. Squash and refetch.
512 // For now return a fault to show that it was unsuccessful.
513 memDepViolator = loadQueue[load_idx];
514
515 return genMachineCheckFault();
516 }
517
518 incrLdIdx(load_idx);
519 }
520
521 // If we've reached this point, there was no violation.
522 memDepViolator = NULL;
523 }
524
525 return store_fault;
526}
527
528template <class Impl>
529void
530LSQUnit<Impl>::commitLoad()
531{
532 assert(loadQueue[loadHead]);
533
534 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
535 loadQueue[loadHead]->readPC());
536
537 loadQueue[loadHead] = NULL;
538
539 incrLdIdx(loadHead);
540
541 --loads;
542}
543
544template <class Impl>
545void
546LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
547{
548 assert(loads == 0 || loadQueue[loadHead]);
549
550 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
551 commitLoad();
552 }
553}
554
555template <class Impl>
556void
557LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
558{
559 assert(stores == 0 || storeQueue[storeHead].inst);
560
561 int store_idx = storeHead;
562
563 while (store_idx != storeTail) {
564 assert(storeQueue[store_idx].inst);
565 // Mark any stores that are now committed and have not yet
566 // been marked as able to write back.
567 if (!storeQueue[store_idx].canWB) {
568 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
569 break;
570 }
571 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
572 "%#x [sn:%lli]\n",
573 storeQueue[store_idx].inst->readPC(),
574 storeQueue[store_idx].inst->seqNum);
575
576 storeQueue[store_idx].canWB = true;
577
578 ++storesToWB;
579 }
580
581 incrStIdx(store_idx);
582 }
583}
584
585template <class Impl>
586void
587LSQUnit<Impl>::writebackStores()
588{
589 while (storesToWB > 0 &&
590 storeWBIdx != storeTail &&
591 storeQueue[storeWBIdx].inst &&
592 storeQueue[storeWBIdx].canWB &&
593 usedPorts < cachePorts) {
594
595 if (isStoreBlocked) {
596 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
597 " is blocked!\n");
598 break;
599 }
600
601 // Store didn't write any data so no need to write it back to
602 // memory.
603 if (storeQueue[storeWBIdx].size == 0) {
604 completeStore(storeWBIdx);
605
606 incrStIdx(storeWBIdx);
607
608 continue;
609 }
610
611 ++usedPorts;
612
613 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
614 incrStIdx(storeWBIdx);
615
616 continue;
617 }
618
619 assert(storeQueue[storeWBIdx].req);
620 assert(!storeQueue[storeWBIdx].committed);
621
622 DynInstPtr inst = storeQueue[storeWBIdx].inst;
623
624 Request *req = storeQueue[storeWBIdx].req;
625 storeQueue[storeWBIdx].committed = true;
626
627 assert(!inst->memData);
628 inst->memData = new uint8_t[64];
629 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
630 req->getSize());
631
632 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
633 data_pkt->dataStatic(inst->memData);
634
635 LSQSenderState *state = new LSQSenderState;
636 state->isLoad = false;
637 state->idx = storeWBIdx;
638 state->inst = inst;
639 data_pkt->senderState = state;
640
641 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
642 "to Addr:%#x, data:%#x [sn:%lli]\n",
643 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
644 req->getPaddr(), *(inst->memData),
645 storeQueue[storeWBIdx].inst->seqNum);
646
647 // @todo: Remove this SC hack once the memory system handles it.
648 if (req->getFlags() & LOCKED) {
649 if (req->getFlags() & UNCACHEABLE) {
650 req->setScResult(2);
651 } else {
652 if (cpu->lockFlag) {
653 req->setScResult(1);
654 } else {
655 req->setScResult(0);
656 // Hack: Instantly complete this store.
657 completeDataAccess(data_pkt);
658 incrStIdx(storeWBIdx);
659 continue;
660 }
661 }
662 } else {
663 // Non-store conditionals do not need a writeback.
664 state->noWB = true;
665 }
666
667 if (!dcachePort->sendTiming(data_pkt)) {
668 // Need to handle becoming blocked on a store.
669 isStoreBlocked = true;
670 ++lsqCacheBlocked;
671 assert(retryPkt == NULL);
672 retryPkt = data_pkt;
673 } else {
674 storePostSend(data_pkt);
675 }
676 }
677
678 // Not sure this should set it to 0.
679 usedPorts = 0;
680
681 assert(stores >= 0 && storesToWB >= 0);
682}
683
684/*template <class Impl>
685void
686LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
687{
688 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
689 mshrSeqNums.end(),
690 seqNum);
691
692 if (mshr_it != mshrSeqNums.end()) {
693 mshrSeqNums.erase(mshr_it);
694 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
695 }
696}*/
697
698template <class Impl>
699void
700LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
701{
702 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
703 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
704
705 int load_idx = loadTail;
706 decrLdIdx(load_idx);
707
708 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
709 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
710 "[sn:%lli]\n",
711 loadQueue[load_idx]->readPC(),
712 loadQueue[load_idx]->seqNum);
713
714 if (isStalled() && load_idx == stallingLoadIdx) {
715 stalled = false;
716 stallingStoreIsn = 0;
717 stallingLoadIdx = 0;
718 }
719
720 // Clear the smart pointer to make sure it is decremented.
721 loadQueue[load_idx]->setSquashed();
722 loadQueue[load_idx] = NULL;
723 --loads;
724
725 // Inefficient!
726 loadTail = load_idx;
727
728 decrLdIdx(load_idx);
729 ++lsqSquashedLoads;
730 }
731
732 if (isLoadBlocked) {
733 if (squashed_num < blockedLoadSeqNum) {
734 isLoadBlocked = false;
735 loadBlockedHandled = false;
736 blockedLoadSeqNum = 0;
737 }
738 }
739
740 int store_idx = storeTail;
741 decrStIdx(store_idx);
742
743 while (stores != 0 &&
744 storeQueue[store_idx].inst->seqNum > squashed_num) {
745 // Instructions marked as can WB are already committed.
746 if (storeQueue[store_idx].canWB) {
747 break;
748 }
749
750 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
751 "idx:%i [sn:%lli]\n",
752 storeQueue[store_idx].inst->readPC(),
753 store_idx, storeQueue[store_idx].inst->seqNum);
754
755 // I don't think this can happen. It should have been cleared
756 // by the stalling load.
757 if (isStalled() &&
758 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
759 panic("Is stalled should have been cleared by stalling load!\n");
760 stalled = false;
761 stallingStoreIsn = 0;
762 }
763
764 // Clear the smart pointer to make sure it is decremented.
765 storeQueue[store_idx].inst->setSquashed();
766 storeQueue[store_idx].inst = NULL;
767 storeQueue[store_idx].canWB = 0;
768
769 storeQueue[store_idx].req = NULL;
770 --stores;
771
772 // Inefficient!
773 storeTail = store_idx;
774
775 decrStIdx(store_idx);
776 ++lsqSquashedStores;
777 }
778}
779
780template <class Impl>
781void
782LSQUnit<Impl>::storePostSend(Packet *pkt)
783{
784 if (isStalled() &&
785 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
786 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
787 "load idx:%i\n",
788 stallingStoreIsn, stallingLoadIdx);
789 stalled = false;
790 stallingStoreIsn = 0;
791 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
792 }
793
794 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
795 // The store is basically completed at this time. This
796 // only works so long as the checker doesn't try to
797 // verify the value in memory for stores.
798 storeQueue[storeWBIdx].inst->setCompleted();
799#if USE_CHECKER
800 if (cpu->checker) {
801 cpu->checker->verify(storeQueue[storeWBIdx].inst);
802 }
803#endif
804 }
805
806 if (pkt->result != Packet::Success) {
807 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
808 storeWBIdx);
809
810 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
811 storeQueue[storeWBIdx].inst->seqNum);
812
813 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
814
815 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
816
817 // @todo: Increment stat here.
818 } else {
819 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
820 storeWBIdx);
821
822 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
823 storeQueue[storeWBIdx].inst->seqNum);
824 }
825
826 incrStIdx(storeWBIdx);
827}
828
829template <class Impl>
830void
831LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
832{
833 iewStage->wakeCPU();
834
835 // Squashed instructions do not need to complete their access.
836 if (inst->isSquashed()) {
837 assert(!inst->isStore());
838 ++lsqIgnoredResponses;
839 return;
840 }
841
842 if (!inst->isExecuted()) {
843 inst->setExecuted();
844
845 // Complete access to copy data to proper place.
846 inst->completeAcc(pkt);
847 }
848
849 // Need to insert instruction into queue to commit
850 iewStage->instToCommit(inst);
851
852 iewStage->activityThisCycle();
853}
854
855template <class Impl>
856void
857LSQUnit<Impl>::completeStore(int store_idx)
858{
859 assert(storeQueue[store_idx].inst);
860 storeQueue[store_idx].completed = true;
861 --storesToWB;
862 // A bit conservative because a store completion may not free up entries,
863 // but hopefully avoids two store completions in one cycle from making
864 // the CPU tick twice.
865 cpu->activityThisCycle();
866
867 if (store_idx == storeHead) {
868 do {
869 incrStIdx(storeHead);
870
871 --stores;
872 } while (storeQueue[storeHead].completed &&
873 storeHead != storeTail);
874
875 iewStage->updateLSQNextCycle = true;
876 }
877
878 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
879 "idx:%i\n",
880 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
881
882 if (isStalled() &&
883 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
884 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
885 "load idx:%i\n",
886 stallingStoreIsn, stallingLoadIdx);
887 stalled = false;
888 stallingStoreIsn = 0;
889 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
890 }
891
892 storeQueue[store_idx].inst->setCompleted();
893
894 // Tell the checker we've completed this instruction. Some stores
895 // may get reported twice to the checker, but the checker can
896 // handle that case.
897#if USE_CHECKER
898 if (cpu->checker) {
899 cpu->checker->verify(storeQueue[store_idx].inst);
900 }
901#endif
902}
903
904template <class Impl>
905void
906LSQUnit<Impl>::recvRetry()
907{
908 if (isStoreBlocked) {
909 assert(retryPkt != NULL);
910
911 if (dcachePort->sendTiming(retryPkt)) {
912 storePostSend(retryPkt);
913 retryPkt = NULL;
914 isStoreBlocked = false;
915 } else {
916 // Still blocked!
917 ++lsqCacheBlocked;
918 }
919 } else if (isLoadBlocked) {
920 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
921 "no need to resend packet.\n");
922 } else {
923 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
924 }
925}
926
927template <class Impl>
928inline void
929LSQUnit<Impl>::incrStIdx(int &store_idx)
930{
931 if (++store_idx >= SQEntries)
932 store_idx = 0;
933}
934
935template <class Impl>
936inline void
937LSQUnit<Impl>::decrStIdx(int &store_idx)
938{
939 if (--store_idx < 0)
940 store_idx += SQEntries;
941}
942
943template <class Impl>
944inline void
945LSQUnit<Impl>::incrLdIdx(int &load_idx)
946{
947 if (++load_idx >= LQEntries)
948 load_idx = 0;
949}
950
951template <class Impl>
952inline void
953LSQUnit<Impl>::decrLdIdx(int &load_idx)
954{
955 if (--load_idx < 0)
956 load_idx += LQEntries;
957}
958
959template <class Impl>
960void
961LSQUnit<Impl>::dumpInsts()
962{
963 cprintf("Load store queue: Dumping instructions.\n");
964 cprintf("Load queue size: %i\n", loads);
965 cprintf("Load queue: ");
966
967 int load_idx = loadHead;
968
969 while (load_idx != loadTail && loadQueue[load_idx]) {
970 cprintf("%#x ", loadQueue[load_idx]->readPC());
971
972 incrLdIdx(load_idx);
973 }
974
975 cprintf("Store queue size: %i\n", stores);
976 cprintf("Store queue: ");
977
978 int store_idx = storeHead;
979
980 while (store_idx != storeTail && storeQueue[store_idx].inst) {
981 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
982
983 incrStIdx(store_idx);
984 }
985
986 cprintf("\n");
987}
185#if USE_CHECKER
186 if (cpu->checker) {
187 cpu->checker->setDcachePort(dcachePort);
188 }
189#endif
190}
191
192template<class Impl>
193std::string
194LSQUnit<Impl>::name() const
195{
196 if (Impl::MaxThreads == 1) {
197 return iewStage->name() + ".lsq";
198 } else {
199 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
200 }
201}
202
203template<class Impl>
204void
205LSQUnit<Impl>::regStats()
206{
207 lsqForwLoads
208 .name(name() + ".forwLoads")
209 .desc("Number of loads that had data forwarded from stores");
210
211 invAddrLoads
212 .name(name() + ".invAddrLoads")
213 .desc("Number of loads ignored due to an invalid address");
214
215 lsqSquashedLoads
216 .name(name() + ".squashedLoads")
217 .desc("Number of loads squashed");
218
219 lsqIgnoredResponses
220 .name(name() + ".ignoredResponses")
221 .desc("Number of memory responses ignored because the instruction is squashed");
222
223 lsqSquashedStores
224 .name(name() + ".squashedStores")
225 .desc("Number of stores squashed");
226
227 invAddrSwpfs
228 .name(name() + ".invAddrSwpfs")
229 .desc("Number of software prefetches ignored due to an invalid address");
230
231 lsqBlockedLoads
232 .name(name() + ".blockedLoads")
233 .desc("Number of blocked loads due to partial load-store forwarding");
234
235 lsqRescheduledLoads
236 .name(name() + ".rescheduledLoads")
237 .desc("Number of loads that were rescheduled");
238
239 lsqCacheBlocked
240 .name(name() + ".cacheBlocked")
241 .desc("Number of times an access to memory failed due to the cache being blocked");
242}
243
244template<class Impl>
245void
246LSQUnit<Impl>::clearLQ()
247{
248 loadQueue.clear();
249}
250
251template<class Impl>
252void
253LSQUnit<Impl>::clearSQ()
254{
255 storeQueue.clear();
256}
257
258template<class Impl>
259void
260LSQUnit<Impl>::switchOut()
261{
262 switchedOut = true;
263 for (int i = 0; i < loadQueue.size(); ++i)
264 loadQueue[i] = NULL;
265
266 assert(storesToWB == 0);
267}
268
269template<class Impl>
270void
271LSQUnit<Impl>::takeOverFrom()
272{
273 switchedOut = false;
274 loads = stores = storesToWB = 0;
275
276 loadHead = loadTail = 0;
277
278 storeHead = storeWBIdx = storeTail = 0;
279
280 usedPorts = 0;
281
282 memDepViolator = NULL;
283
284 blockedLoadSeqNum = 0;
285
286 stalled = false;
287 isLoadBlocked = false;
288 loadBlockedHandled = false;
289}
290
291template<class Impl>
292void
293LSQUnit<Impl>::resizeLQ(unsigned size)
294{
295 unsigned size_plus_sentinel = size + 1;
296 assert(size_plus_sentinel >= LQEntries);
297
298 if (size_plus_sentinel > LQEntries) {
299 while (size_plus_sentinel > loadQueue.size()) {
300 DynInstPtr dummy;
301 loadQueue.push_back(dummy);
302 LQEntries++;
303 }
304 } else {
305 LQEntries = size_plus_sentinel;
306 }
307
308}
309
310template<class Impl>
311void
312LSQUnit<Impl>::resizeSQ(unsigned size)
313{
314 unsigned size_plus_sentinel = size + 1;
315 if (size_plus_sentinel > SQEntries) {
316 while (size_plus_sentinel > storeQueue.size()) {
317 SQEntry dummy;
318 storeQueue.push_back(dummy);
319 SQEntries++;
320 }
321 } else {
322 SQEntries = size_plus_sentinel;
323 }
324}
325
326template <class Impl>
327void
328LSQUnit<Impl>::insert(DynInstPtr &inst)
329{
330 assert(inst->isMemRef());
331
332 assert(inst->isLoad() || inst->isStore());
333
334 if (inst->isLoad()) {
335 insertLoad(inst);
336 } else {
337 insertStore(inst);
338 }
339
340 inst->setInLSQ();
341}
342
343template <class Impl>
344void
345LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
346{
347 assert((loadTail + 1) % LQEntries != loadHead);
348 assert(loads < LQEntries);
349
350 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
351 load_inst->readPC(), loadTail, load_inst->seqNum);
352
353 load_inst->lqIdx = loadTail;
354
355 if (stores == 0) {
356 load_inst->sqIdx = -1;
357 } else {
358 load_inst->sqIdx = storeTail;
359 }
360
361 loadQueue[loadTail] = load_inst;
362
363 incrLdIdx(loadTail);
364
365 ++loads;
366}
367
368template <class Impl>
369void
370LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
371{
372 // Make sure it is not full before inserting an instruction.
373 assert((storeTail + 1) % SQEntries != storeHead);
374 assert(stores < SQEntries);
375
376 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
377 store_inst->readPC(), storeTail, store_inst->seqNum);
378
379 store_inst->sqIdx = storeTail;
380 store_inst->lqIdx = loadTail;
381
382 storeQueue[storeTail] = SQEntry(store_inst);
383
384 incrStIdx(storeTail);
385
386 ++stores;
387}
388
389template <class Impl>
390typename Impl::DynInstPtr
391LSQUnit<Impl>::getMemDepViolator()
392{
393 DynInstPtr temp = memDepViolator;
394
395 memDepViolator = NULL;
396
397 return temp;
398}
399
400template <class Impl>
401unsigned
402LSQUnit<Impl>::numFreeEntries()
403{
404 unsigned free_lq_entries = LQEntries - loads;
405 unsigned free_sq_entries = SQEntries - stores;
406
407 // Both the LQ and SQ entries have an extra dummy entry to differentiate
408 // empty/full conditions. Subtract 1 from the free entries.
409 if (free_lq_entries < free_sq_entries) {
410 return free_lq_entries - 1;
411 } else {
412 return free_sq_entries - 1;
413 }
414}
415
416template <class Impl>
417int
418LSQUnit<Impl>::numLoadsReady()
419{
420 int load_idx = loadHead;
421 int retval = 0;
422
423 while (load_idx != loadTail) {
424 assert(loadQueue[load_idx]);
425
426 if (loadQueue[load_idx]->readyToIssue()) {
427 ++retval;
428 }
429 }
430
431 return retval;
432}
433
434template <class Impl>
435Fault
436LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
437{
438 // Execute a specific load.
439 Fault load_fault = NoFault;
440
441 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
442 inst->readPC(),inst->seqNum);
443
444 load_fault = inst->initiateAcc();
445
446 // If the instruction faulted, then we need to send it along to commit
447 // without the instruction completing.
448 if (load_fault != NoFault) {
449 // Send this instruction to commit, also make sure iew stage
450 // realizes there is activity.
451 iewStage->instToCommit(inst);
452 iewStage->activityThisCycle();
453 }
454
455 return load_fault;
456}
457
458template <class Impl>
459Fault
460LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
461{
462 using namespace TheISA;
463 // Make sure that a store exists.
464 assert(stores != 0);
465
466 int store_idx = store_inst->sqIdx;
467
468 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
469 store_inst->readPC(), store_inst->seqNum);
470
471 // Check the recently completed loads to see if any match this store's
472 // address. If so, then we have a memory ordering violation.
473 int load_idx = store_inst->lqIdx;
474
475 Fault store_fault = store_inst->initiateAcc();
476
477 if (storeQueue[store_idx].size == 0) {
478 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
479 store_inst->readPC(),store_inst->seqNum);
480
481 return store_fault;
482 }
483
484 assert(store_fault == NoFault);
485
486 if (store_inst->isStoreConditional()) {
487 // Store conditionals need to set themselves as able to
488 // writeback if we haven't had a fault by here.
489 storeQueue[store_idx].canWB = true;
490
491 ++storesToWB;
492 }
493
494 if (!memDepViolator) {
495 while (load_idx != loadTail) {
496 // Really only need to check loads that have actually executed
497 // It's safe to check all loads because effAddr is set to
498 // InvalAddr when the dyn inst is created.
499
500 // @todo: For now this is extra conservative, detecting a
501 // violation if the addresses match assuming all accesses
502 // are quad word accesses.
503
504 // @todo: Fix this, magic number being used here
505 if ((loadQueue[load_idx]->effAddr >> 8) ==
506 (store_inst->effAddr >> 8)) {
507 // A load incorrectly passed this store. Squash and refetch.
508 // For now return a fault to show that it was unsuccessful.
509 memDepViolator = loadQueue[load_idx];
510
511 return genMachineCheckFault();
512 }
513
514 incrLdIdx(load_idx);
515 }
516
517 // If we've reached this point, there was no violation.
518 memDepViolator = NULL;
519 }
520
521 return store_fault;
522}
523
524template <class Impl>
525void
526LSQUnit<Impl>::commitLoad()
527{
528 assert(loadQueue[loadHead]);
529
530 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
531 loadQueue[loadHead]->readPC());
532
533 loadQueue[loadHead] = NULL;
534
535 incrLdIdx(loadHead);
536
537 --loads;
538}
539
540template <class Impl>
541void
542LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
543{
544 assert(loads == 0 || loadQueue[loadHead]);
545
546 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
547 commitLoad();
548 }
549}
550
551template <class Impl>
552void
553LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
554{
555 assert(stores == 0 || storeQueue[storeHead].inst);
556
557 int store_idx = storeHead;
558
559 while (store_idx != storeTail) {
560 assert(storeQueue[store_idx].inst);
561 // Mark any stores that are now committed and have not yet
562 // been marked as able to write back.
563 if (!storeQueue[store_idx].canWB) {
564 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
565 break;
566 }
567 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
568 "%#x [sn:%lli]\n",
569 storeQueue[store_idx].inst->readPC(),
570 storeQueue[store_idx].inst->seqNum);
571
572 storeQueue[store_idx].canWB = true;
573
574 ++storesToWB;
575 }
576
577 incrStIdx(store_idx);
578 }
579}
580
581template <class Impl>
582void
583LSQUnit<Impl>::writebackStores()
584{
585 while (storesToWB > 0 &&
586 storeWBIdx != storeTail &&
587 storeQueue[storeWBIdx].inst &&
588 storeQueue[storeWBIdx].canWB &&
589 usedPorts < cachePorts) {
590
591 if (isStoreBlocked) {
592 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
593 " is blocked!\n");
594 break;
595 }
596
597 // Store didn't write any data so no need to write it back to
598 // memory.
599 if (storeQueue[storeWBIdx].size == 0) {
600 completeStore(storeWBIdx);
601
602 incrStIdx(storeWBIdx);
603
604 continue;
605 }
606
607 ++usedPorts;
608
609 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
610 incrStIdx(storeWBIdx);
611
612 continue;
613 }
614
615 assert(storeQueue[storeWBIdx].req);
616 assert(!storeQueue[storeWBIdx].committed);
617
618 DynInstPtr inst = storeQueue[storeWBIdx].inst;
619
620 Request *req = storeQueue[storeWBIdx].req;
621 storeQueue[storeWBIdx].committed = true;
622
623 assert(!inst->memData);
624 inst->memData = new uint8_t[64];
625 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
626 req->getSize());
627
628 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
629 data_pkt->dataStatic(inst->memData);
630
631 LSQSenderState *state = new LSQSenderState;
632 state->isLoad = false;
633 state->idx = storeWBIdx;
634 state->inst = inst;
635 data_pkt->senderState = state;
636
637 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
638 "to Addr:%#x, data:%#x [sn:%lli]\n",
639 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
640 req->getPaddr(), *(inst->memData),
641 storeQueue[storeWBIdx].inst->seqNum);
642
643 // @todo: Remove this SC hack once the memory system handles it.
644 if (req->getFlags() & LOCKED) {
645 if (req->getFlags() & UNCACHEABLE) {
646 req->setScResult(2);
647 } else {
648 if (cpu->lockFlag) {
649 req->setScResult(1);
650 } else {
651 req->setScResult(0);
652 // Hack: Instantly complete this store.
653 completeDataAccess(data_pkt);
654 incrStIdx(storeWBIdx);
655 continue;
656 }
657 }
658 } else {
659 // Non-store conditionals do not need a writeback.
660 state->noWB = true;
661 }
662
663 if (!dcachePort->sendTiming(data_pkt)) {
664 // Need to handle becoming blocked on a store.
665 isStoreBlocked = true;
666 ++lsqCacheBlocked;
667 assert(retryPkt == NULL);
668 retryPkt = data_pkt;
669 } else {
670 storePostSend(data_pkt);
671 }
672 }
673
674 // Not sure this should set it to 0.
675 usedPorts = 0;
676
677 assert(stores >= 0 && storesToWB >= 0);
678}
679
680/*template <class Impl>
681void
682LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
683{
684 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
685 mshrSeqNums.end(),
686 seqNum);
687
688 if (mshr_it != mshrSeqNums.end()) {
689 mshrSeqNums.erase(mshr_it);
690 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
691 }
692}*/
693
694template <class Impl>
695void
696LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
697{
698 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
699 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
700
701 int load_idx = loadTail;
702 decrLdIdx(load_idx);
703
704 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
705 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
706 "[sn:%lli]\n",
707 loadQueue[load_idx]->readPC(),
708 loadQueue[load_idx]->seqNum);
709
710 if (isStalled() && load_idx == stallingLoadIdx) {
711 stalled = false;
712 stallingStoreIsn = 0;
713 stallingLoadIdx = 0;
714 }
715
716 // Clear the smart pointer to make sure it is decremented.
717 loadQueue[load_idx]->setSquashed();
718 loadQueue[load_idx] = NULL;
719 --loads;
720
721 // Inefficient!
722 loadTail = load_idx;
723
724 decrLdIdx(load_idx);
725 ++lsqSquashedLoads;
726 }
727
728 if (isLoadBlocked) {
729 if (squashed_num < blockedLoadSeqNum) {
730 isLoadBlocked = false;
731 loadBlockedHandled = false;
732 blockedLoadSeqNum = 0;
733 }
734 }
735
736 int store_idx = storeTail;
737 decrStIdx(store_idx);
738
739 while (stores != 0 &&
740 storeQueue[store_idx].inst->seqNum > squashed_num) {
741 // Instructions marked as can WB are already committed.
742 if (storeQueue[store_idx].canWB) {
743 break;
744 }
745
746 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
747 "idx:%i [sn:%lli]\n",
748 storeQueue[store_idx].inst->readPC(),
749 store_idx, storeQueue[store_idx].inst->seqNum);
750
751 // I don't think this can happen. It should have been cleared
752 // by the stalling load.
753 if (isStalled() &&
754 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
755 panic("Is stalled should have been cleared by stalling load!\n");
756 stalled = false;
757 stallingStoreIsn = 0;
758 }
759
760 // Clear the smart pointer to make sure it is decremented.
761 storeQueue[store_idx].inst->setSquashed();
762 storeQueue[store_idx].inst = NULL;
763 storeQueue[store_idx].canWB = 0;
764
765 storeQueue[store_idx].req = NULL;
766 --stores;
767
768 // Inefficient!
769 storeTail = store_idx;
770
771 decrStIdx(store_idx);
772 ++lsqSquashedStores;
773 }
774}
775
776template <class Impl>
777void
778LSQUnit<Impl>::storePostSend(Packet *pkt)
779{
780 if (isStalled() &&
781 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
782 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
783 "load idx:%i\n",
784 stallingStoreIsn, stallingLoadIdx);
785 stalled = false;
786 stallingStoreIsn = 0;
787 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
788 }
789
790 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
791 // The store is basically completed at this time. This
792 // only works so long as the checker doesn't try to
793 // verify the value in memory for stores.
794 storeQueue[storeWBIdx].inst->setCompleted();
795#if USE_CHECKER
796 if (cpu->checker) {
797 cpu->checker->verify(storeQueue[storeWBIdx].inst);
798 }
799#endif
800 }
801
802 if (pkt->result != Packet::Success) {
803 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
804 storeWBIdx);
805
806 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
807 storeQueue[storeWBIdx].inst->seqNum);
808
809 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
810
811 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
812
813 // @todo: Increment stat here.
814 } else {
815 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
816 storeWBIdx);
817
818 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
819 storeQueue[storeWBIdx].inst->seqNum);
820 }
821
822 incrStIdx(storeWBIdx);
823}
824
825template <class Impl>
826void
827LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
828{
829 iewStage->wakeCPU();
830
831 // Squashed instructions do not need to complete their access.
832 if (inst->isSquashed()) {
833 assert(!inst->isStore());
834 ++lsqIgnoredResponses;
835 return;
836 }
837
838 if (!inst->isExecuted()) {
839 inst->setExecuted();
840
841 // Complete access to copy data to proper place.
842 inst->completeAcc(pkt);
843 }
844
845 // Need to insert instruction into queue to commit
846 iewStage->instToCommit(inst);
847
848 iewStage->activityThisCycle();
849}
850
851template <class Impl>
852void
853LSQUnit<Impl>::completeStore(int store_idx)
854{
855 assert(storeQueue[store_idx].inst);
856 storeQueue[store_idx].completed = true;
857 --storesToWB;
858 // A bit conservative because a store completion may not free up entries,
859 // but hopefully avoids two store completions in one cycle from making
860 // the CPU tick twice.
861 cpu->activityThisCycle();
862
863 if (store_idx == storeHead) {
864 do {
865 incrStIdx(storeHead);
866
867 --stores;
868 } while (storeQueue[storeHead].completed &&
869 storeHead != storeTail);
870
871 iewStage->updateLSQNextCycle = true;
872 }
873
874 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
875 "idx:%i\n",
876 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
877
878 if (isStalled() &&
879 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
880 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
881 "load idx:%i\n",
882 stallingStoreIsn, stallingLoadIdx);
883 stalled = false;
884 stallingStoreIsn = 0;
885 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
886 }
887
888 storeQueue[store_idx].inst->setCompleted();
889
890 // Tell the checker we've completed this instruction. Some stores
891 // may get reported twice to the checker, but the checker can
892 // handle that case.
893#if USE_CHECKER
894 if (cpu->checker) {
895 cpu->checker->verify(storeQueue[store_idx].inst);
896 }
897#endif
898}
899
900template <class Impl>
901void
902LSQUnit<Impl>::recvRetry()
903{
904 if (isStoreBlocked) {
905 assert(retryPkt != NULL);
906
907 if (dcachePort->sendTiming(retryPkt)) {
908 storePostSend(retryPkt);
909 retryPkt = NULL;
910 isStoreBlocked = false;
911 } else {
912 // Still blocked!
913 ++lsqCacheBlocked;
914 }
915 } else if (isLoadBlocked) {
916 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
917 "no need to resend packet.\n");
918 } else {
919 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
920 }
921}
922
923template <class Impl>
924inline void
925LSQUnit<Impl>::incrStIdx(int &store_idx)
926{
927 if (++store_idx >= SQEntries)
928 store_idx = 0;
929}
930
931template <class Impl>
932inline void
933LSQUnit<Impl>::decrStIdx(int &store_idx)
934{
935 if (--store_idx < 0)
936 store_idx += SQEntries;
937}
938
939template <class Impl>
940inline void
941LSQUnit<Impl>::incrLdIdx(int &load_idx)
942{
943 if (++load_idx >= LQEntries)
944 load_idx = 0;
945}
946
947template <class Impl>
948inline void
949LSQUnit<Impl>::decrLdIdx(int &load_idx)
950{
951 if (--load_idx < 0)
952 load_idx += LQEntries;
953}
954
955template <class Impl>
956void
957LSQUnit<Impl>::dumpInsts()
958{
959 cprintf("Load store queue: Dumping instructions.\n");
960 cprintf("Load queue size: %i\n", loads);
961 cprintf("Load queue: ");
962
963 int load_idx = loadHead;
964
965 while (load_idx != loadTail && loadQueue[load_idx]) {
966 cprintf("%#x ", loadQueue[load_idx]->readPC());
967
968 incrLdIdx(load_idx);
969 }
970
971 cprintf("Store queue size: %i\n", stores);
972 cprintf("Store queue: ");
973
974 int store_idx = storeHead;
975
976 while (store_idx != storeTail && storeQueue[store_idx].inst) {
977 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
978
979 incrStIdx(store_idx);
980 }
981
982 cprintf("\n");
983}