lsq_unit_impl.hh (2693:18c6be231eb1) lsq_unit_impl.hh (2698:d5f35d41e017)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "cpu/checker/cpu.hh"
33#include "cpu/o3/lsq_unit.hh"
34#include "base/str.hh"
35#include "mem/request.hh"
36
37template<class Impl>
38LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
39 LSQUnit *lsq_ptr)
40 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
41{
42 this->setFlags(Event::AutoDelete);
43}
44
45template<class Impl>
46void
47LSQUnit<Impl>::WritebackEvent::process()
48{
49 if (!lsqPtr->isSwitchedOut()) {
50 lsqPtr->writeback(inst, pkt);
51 }
52 delete pkt;
53}
54
55template<class Impl>
56const char *
57LSQUnit<Impl>::WritebackEvent::description()
58{
59 return "Store writeback event";
60}
61
62template<class Impl>
63void
64LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
65{
66 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
67 DynInstPtr inst = state->inst;
68 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "cpu/checker/cpu.hh"
33#include "cpu/o3/lsq_unit.hh"
34#include "base/str.hh"
35#include "mem/request.hh"
36
37template<class Impl>
38LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
39 LSQUnit *lsq_ptr)
40 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
41{
42 this->setFlags(Event::AutoDelete);
43}
44
45template<class Impl>
46void
47LSQUnit<Impl>::WritebackEvent::process()
48{
49 if (!lsqPtr->isSwitchedOut()) {
50 lsqPtr->writeback(inst, pkt);
51 }
52 delete pkt;
53}
54
55template<class Impl>
56const char *
57LSQUnit<Impl>::WritebackEvent::description()
58{
59 return "Store writeback event";
60}
61
62template<class Impl>
63void
64LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
65{
66 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
67 DynInstPtr inst = state->inst;
68 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
69// DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum);
69 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
70
71 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
72
73 if (isSwitchedOut() || inst->isSquashed()) {
74 delete state;
75 delete pkt;
76 return;
77 } else {
78 if (!state->noWB) {
79 writeback(inst, pkt);
80 }
81
82 if (inst->isStore()) {
83 completeStore(state->idx);
84 }
85 }
86
87 delete state;
88 delete pkt;
89}
90
91template <class Impl>
92Tick
93LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
94{
95 panic("O3CPU model does not work with atomic mode!");
96 return curTick;
97}
98
99template <class Impl>
100void
101LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
102{
103 panic("O3CPU doesn't expect recvFunctional callback!");
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
109{
110 if (status == RangeChange)
111 return;
112
113 panic("O3CPU doesn't expect recvStatusChange callback!");
114}
115
116template <class Impl>
117bool
118LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
119{
120 lsq->completeDataAccess(pkt);
121 return true;
122}
123
124template <class Impl>
125void
126LSQUnit<Impl>::DcachePort::recvRetry()
127{
128 lsq->recvRetry();
129}
130
131template <class Impl>
132LSQUnit<Impl>::LSQUnit()
133 : loads(0), stores(0), storesToWB(0), stalled(false),
134 isStoreBlocked(false), isLoadBlocked(false),
135 loadBlockedHandled(false)
136{
137}
138
139template<class Impl>
140void
141LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
142 unsigned maxSQEntries, unsigned id)
143{
144 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
145
146 switchedOut = false;
147
148 lsqID = id;
149
150 // Add 1 for the sentinel entry (they are circular queues).
151 LQEntries = maxLQEntries + 1;
152 SQEntries = maxSQEntries + 1;
153
154 loadQueue.resize(LQEntries);
155 storeQueue.resize(SQEntries);
156
157 loadHead = loadTail = 0;
158
159 storeHead = storeWBIdx = storeTail = 0;
160
161 usedPorts = 0;
162 cachePorts = params->cachePorts;
163
164 mem = params->mem;
165
166 memDepViolator = NULL;
167
168 blockedLoadSeqNum = 0;
169}
170
171template<class Impl>
172void
173LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
174{
175 cpu = cpu_ptr;
176 dcachePort = new DcachePort(cpu, this);
177
178 Port *mem_dport = mem->getPort("");
179 dcachePort->setPeer(mem_dport);
180 mem_dport->setPeer(dcachePort);
181
182 if (cpu->checker) {
183 cpu->checker->setDcachePort(dcachePort);
184 }
185}
186
187template<class Impl>
188std::string
189LSQUnit<Impl>::name() const
190{
191 if (Impl::MaxThreads == 1) {
192 return iewStage->name() + ".lsq";
193 } else {
194 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
195 }
196}
197
198template<class Impl>
199void
200LSQUnit<Impl>::clearLQ()
201{
202 loadQueue.clear();
203}
204
205template<class Impl>
206void
207LSQUnit<Impl>::clearSQ()
208{
209 storeQueue.clear();
210}
211
70
71 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
72
73 if (isSwitchedOut() || inst->isSquashed()) {
74 delete state;
75 delete pkt;
76 return;
77 } else {
78 if (!state->noWB) {
79 writeback(inst, pkt);
80 }
81
82 if (inst->isStore()) {
83 completeStore(state->idx);
84 }
85 }
86
87 delete state;
88 delete pkt;
89}
90
91template <class Impl>
92Tick
93LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
94{
95 panic("O3CPU model does not work with atomic mode!");
96 return curTick;
97}
98
99template <class Impl>
100void
101LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
102{
103 panic("O3CPU doesn't expect recvFunctional callback!");
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
109{
110 if (status == RangeChange)
111 return;
112
113 panic("O3CPU doesn't expect recvStatusChange callback!");
114}
115
116template <class Impl>
117bool
118LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
119{
120 lsq->completeDataAccess(pkt);
121 return true;
122}
123
124template <class Impl>
125void
126LSQUnit<Impl>::DcachePort::recvRetry()
127{
128 lsq->recvRetry();
129}
130
131template <class Impl>
132LSQUnit<Impl>::LSQUnit()
133 : loads(0), stores(0), storesToWB(0), stalled(false),
134 isStoreBlocked(false), isLoadBlocked(false),
135 loadBlockedHandled(false)
136{
137}
138
139template<class Impl>
140void
141LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
142 unsigned maxSQEntries, unsigned id)
143{
144 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
145
146 switchedOut = false;
147
148 lsqID = id;
149
150 // Add 1 for the sentinel entry (they are circular queues).
151 LQEntries = maxLQEntries + 1;
152 SQEntries = maxSQEntries + 1;
153
154 loadQueue.resize(LQEntries);
155 storeQueue.resize(SQEntries);
156
157 loadHead = loadTail = 0;
158
159 storeHead = storeWBIdx = storeTail = 0;
160
161 usedPorts = 0;
162 cachePorts = params->cachePorts;
163
164 mem = params->mem;
165
166 memDepViolator = NULL;
167
168 blockedLoadSeqNum = 0;
169}
170
171template<class Impl>
172void
173LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
174{
175 cpu = cpu_ptr;
176 dcachePort = new DcachePort(cpu, this);
177
178 Port *mem_dport = mem->getPort("");
179 dcachePort->setPeer(mem_dport);
180 mem_dport->setPeer(dcachePort);
181
182 if (cpu->checker) {
183 cpu->checker->setDcachePort(dcachePort);
184 }
185}
186
187template<class Impl>
188std::string
189LSQUnit<Impl>::name() const
190{
191 if (Impl::MaxThreads == 1) {
192 return iewStage->name() + ".lsq";
193 } else {
194 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
195 }
196}
197
198template<class Impl>
199void
200LSQUnit<Impl>::clearLQ()
201{
202 loadQueue.clear();
203}
204
205template<class Impl>
206void
207LSQUnit<Impl>::clearSQ()
208{
209 storeQueue.clear();
210}
211
212#if 0
213template<class Impl>
214void
212template<class Impl>
213void
215LSQUnit<Impl>::setPageTable(PageTable *pt_ptr)
216{
217 DPRINTF(LSQUnit, "Setting the page table pointer.\n");
218 pTable = pt_ptr;
219}
220#endif
221
222template<class Impl>
223void
224LSQUnit<Impl>::switchOut()
225{
226 switchedOut = true;
227 for (int i = 0; i < loadQueue.size(); ++i)
228 loadQueue[i] = NULL;
229
230 assert(storesToWB == 0);
231}
232
233template<class Impl>
234void
235LSQUnit<Impl>::takeOverFrom()
236{
237 switchedOut = false;
238 loads = stores = storesToWB = 0;
239
240 loadHead = loadTail = 0;
241
242 storeHead = storeWBIdx = storeTail = 0;
243
244 usedPorts = 0;
245
246 memDepViolator = NULL;
247
248 blockedLoadSeqNum = 0;
249
250 stalled = false;
251 isLoadBlocked = false;
252 loadBlockedHandled = false;
253}
254
255template<class Impl>
256void
257LSQUnit<Impl>::resizeLQ(unsigned size)
258{
259 unsigned size_plus_sentinel = size + 1;
260 assert(size_plus_sentinel >= LQEntries);
261
262 if (size_plus_sentinel > LQEntries) {
263 while (size_plus_sentinel > loadQueue.size()) {
264 DynInstPtr dummy;
265 loadQueue.push_back(dummy);
266 LQEntries++;
267 }
268 } else {
269 LQEntries = size_plus_sentinel;
270 }
271
272}
273
274template<class Impl>
275void
276LSQUnit<Impl>::resizeSQ(unsigned size)
277{
278 unsigned size_plus_sentinel = size + 1;
279 if (size_plus_sentinel > SQEntries) {
280 while (size_plus_sentinel > storeQueue.size()) {
281 SQEntry dummy;
282 storeQueue.push_back(dummy);
283 SQEntries++;
284 }
285 } else {
286 SQEntries = size_plus_sentinel;
287 }
288}
289
290template <class Impl>
291void
292LSQUnit<Impl>::insert(DynInstPtr &inst)
293{
294 assert(inst->isMemRef());
295
296 assert(inst->isLoad() || inst->isStore());
297
298 if (inst->isLoad()) {
299 insertLoad(inst);
300 } else {
301 insertStore(inst);
302 }
303
304 inst->setInLSQ();
305}
306
307template <class Impl>
308void
309LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
310{
311 assert((loadTail + 1) % LQEntries != loadHead);
312 assert(loads < LQEntries);
313
314 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
315 load_inst->readPC(), loadTail, load_inst->seqNum);
316
317 load_inst->lqIdx = loadTail;
318
319 if (stores == 0) {
320 load_inst->sqIdx = -1;
321 } else {
322 load_inst->sqIdx = storeTail;
323 }
324
325 loadQueue[loadTail] = load_inst;
326
327 incrLdIdx(loadTail);
328
329 ++loads;
330}
331
332template <class Impl>
333void
334LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
335{
336 // Make sure it is not full before inserting an instruction.
337 assert((storeTail + 1) % SQEntries != storeHead);
338 assert(stores < SQEntries);
339
340 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
341 store_inst->readPC(), storeTail, store_inst->seqNum);
342
343 store_inst->sqIdx = storeTail;
344 store_inst->lqIdx = loadTail;
345
346 storeQueue[storeTail] = SQEntry(store_inst);
347
348 incrStIdx(storeTail);
349
350 ++stores;
351}
352
353template <class Impl>
354typename Impl::DynInstPtr
355LSQUnit<Impl>::getMemDepViolator()
356{
357 DynInstPtr temp = memDepViolator;
358
359 memDepViolator = NULL;
360
361 return temp;
362}
363
364template <class Impl>
365unsigned
366LSQUnit<Impl>::numFreeEntries()
367{
368 unsigned free_lq_entries = LQEntries - loads;
369 unsigned free_sq_entries = SQEntries - stores;
370
371 // Both the LQ and SQ entries have an extra dummy entry to differentiate
372 // empty/full conditions. Subtract 1 from the free entries.
373 if (free_lq_entries < free_sq_entries) {
374 return free_lq_entries - 1;
375 } else {
376 return free_sq_entries - 1;
377 }
378}
379
380template <class Impl>
381int
382LSQUnit<Impl>::numLoadsReady()
383{
384 int load_idx = loadHead;
385 int retval = 0;
386
387 while (load_idx != loadTail) {
388 assert(loadQueue[load_idx]);
389
390 if (loadQueue[load_idx]->readyToIssue()) {
391 ++retval;
392 }
393 }
394
395 return retval;
396}
397
398template <class Impl>
399Fault
400LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
401{
402 // Execute a specific load.
403 Fault load_fault = NoFault;
404
405 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
406 inst->readPC(),inst->seqNum);
407
408 load_fault = inst->initiateAcc();
409
410 // If the instruction faulted, then we need to send it along to commit
411 // without the instruction completing.
412 if (load_fault != NoFault) {
413 // Send this instruction to commit, also make sure iew stage
414 // realizes there is activity.
415 iewStage->instToCommit(inst);
416 iewStage->activityThisCycle();
417 }
418
419 return load_fault;
420}
421
422template <class Impl>
423Fault
424LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
425{
426 using namespace TheISA;
427 // Make sure that a store exists.
428 assert(stores != 0);
429
430 int store_idx = store_inst->sqIdx;
431
432 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
433 store_inst->readPC(), store_inst->seqNum);
434
435 // Check the recently completed loads to see if any match this store's
436 // address. If so, then we have a memory ordering violation.
437 int load_idx = store_inst->lqIdx;
438
439 Fault store_fault = store_inst->initiateAcc();
440
441 if (storeQueue[store_idx].size == 0) {
442 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
443 store_inst->readPC(),store_inst->seqNum);
444
445 return store_fault;
446 }
447
448 assert(store_fault == NoFault);
449
450 if (store_inst->isStoreConditional()) {
451 // Store conditionals need to set themselves as able to
452 // writeback if we haven't had a fault by here.
453 storeQueue[store_idx].canWB = true;
454
455 ++storesToWB;
456 }
457
458 if (!memDepViolator) {
459 while (load_idx != loadTail) {
460 // Really only need to check loads that have actually executed
461 // It's safe to check all loads because effAddr is set to
462 // InvalAddr when the dyn inst is created.
463
464 // @todo: For now this is extra conservative, detecting a
465 // violation if the addresses match assuming all accesses
466 // are quad word accesses.
467
468 // @todo: Fix this, magic number being used here
469 if ((loadQueue[load_idx]->effAddr >> 8) ==
470 (store_inst->effAddr >> 8)) {
471 // A load incorrectly passed this store. Squash and refetch.
472 // For now return a fault to show that it was unsuccessful.
473 memDepViolator = loadQueue[load_idx];
474
475 return genMachineCheckFault();
476 }
477
478 incrLdIdx(load_idx);
479 }
480
481 // If we've reached this point, there was no violation.
482 memDepViolator = NULL;
483 }
484
485 return store_fault;
486}
487
488template <class Impl>
489void
490LSQUnit<Impl>::commitLoad()
491{
492 assert(loadQueue[loadHead]);
493
494 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
495 loadQueue[loadHead]->readPC());
496
497 loadQueue[loadHead] = NULL;
498
499 incrLdIdx(loadHead);
500
501 --loads;
502}
503
504template <class Impl>
505void
506LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
507{
508 assert(loads == 0 || loadQueue[loadHead]);
509
510 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
511 commitLoad();
512 }
513}
514
515template <class Impl>
516void
517LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
518{
519 assert(stores == 0 || storeQueue[storeHead].inst);
520
521 int store_idx = storeHead;
522
523 while (store_idx != storeTail) {
524 assert(storeQueue[store_idx].inst);
525 // Mark any stores that are now committed and have not yet
526 // been marked as able to write back.
527 if (!storeQueue[store_idx].canWB) {
528 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
529 break;
530 }
531 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
532 "%#x [sn:%lli]\n",
533 storeQueue[store_idx].inst->readPC(),
534 storeQueue[store_idx].inst->seqNum);
535
536 storeQueue[store_idx].canWB = true;
537
538 ++storesToWB;
539 }
540
541 incrStIdx(store_idx);
542 }
543}
544
545template <class Impl>
546void
547LSQUnit<Impl>::writebackStores()
548{
549 while (storesToWB > 0 &&
550 storeWBIdx != storeTail &&
551 storeQueue[storeWBIdx].inst &&
552 storeQueue[storeWBIdx].canWB &&
553 usedPorts < cachePorts) {
554
555 if (isStoreBlocked) {
556 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
557 " is blocked!\n");
558 break;
559 }
560
561 // Store didn't write any data so no need to write it back to
562 // memory.
563 if (storeQueue[storeWBIdx].size == 0) {
564 completeStore(storeWBIdx);
565
566 incrStIdx(storeWBIdx);
567
568 continue;
569 }
570
571 ++usedPorts;
572
573 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
574 incrStIdx(storeWBIdx);
575
576 continue;
577 }
578
579 assert(storeQueue[storeWBIdx].req);
580 assert(!storeQueue[storeWBIdx].committed);
581
582 DynInstPtr inst = storeQueue[storeWBIdx].inst;
583
584 Request *req = storeQueue[storeWBIdx].req;
585 storeQueue[storeWBIdx].committed = true;
586
587 assert(!inst->memData);
588 inst->memData = new uint8_t[64];
589 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
590 req->getSize());
591
592 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
593 data_pkt->dataStatic(inst->memData);
594
595 LSQSenderState *state = new LSQSenderState;
596 state->isLoad = false;
597 state->idx = storeWBIdx;
598 state->inst = inst;
599 data_pkt->senderState = state;
600
601 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
602 "to Addr:%#x, data:%#x [sn:%lli]\n",
603 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
604 req->getPaddr(), *(inst->memData),
605 storeQueue[storeWBIdx].inst->seqNum);
606
607 // @todo: Remove this SC hack once the memory system handles it.
608 if (req->getFlags() & LOCKED) {
609 if (req->getFlags() & UNCACHEABLE) {
610 req->setScResult(2);
611 } else {
612 if (cpu->lockFlag) {
613 req->setScResult(1);
614 } else {
615 req->setScResult(0);
616 // Hack: Instantly complete this store.
617 completeDataAccess(data_pkt);
618 incrStIdx(storeWBIdx);
619 continue;
620 }
621 }
622 } else {
623 // Non-store conditionals do not need a writeback.
624 state->noWB = true;
625 }
626
627 if (!dcachePort->sendTiming(data_pkt)) {
628 // Need to handle becoming blocked on a store.
629 isStoreBlocked = true;
630
214LSQUnit<Impl>::switchOut()
215{
216 switchedOut = true;
217 for (int i = 0; i < loadQueue.size(); ++i)
218 loadQueue[i] = NULL;
219
220 assert(storesToWB == 0);
221}
222
223template<class Impl>
224void
225LSQUnit<Impl>::takeOverFrom()
226{
227 switchedOut = false;
228 loads = stores = storesToWB = 0;
229
230 loadHead = loadTail = 0;
231
232 storeHead = storeWBIdx = storeTail = 0;
233
234 usedPorts = 0;
235
236 memDepViolator = NULL;
237
238 blockedLoadSeqNum = 0;
239
240 stalled = false;
241 isLoadBlocked = false;
242 loadBlockedHandled = false;
243}
244
245template<class Impl>
246void
247LSQUnit<Impl>::resizeLQ(unsigned size)
248{
249 unsigned size_plus_sentinel = size + 1;
250 assert(size_plus_sentinel >= LQEntries);
251
252 if (size_plus_sentinel > LQEntries) {
253 while (size_plus_sentinel > loadQueue.size()) {
254 DynInstPtr dummy;
255 loadQueue.push_back(dummy);
256 LQEntries++;
257 }
258 } else {
259 LQEntries = size_plus_sentinel;
260 }
261
262}
263
264template<class Impl>
265void
266LSQUnit<Impl>::resizeSQ(unsigned size)
267{
268 unsigned size_plus_sentinel = size + 1;
269 if (size_plus_sentinel > SQEntries) {
270 while (size_plus_sentinel > storeQueue.size()) {
271 SQEntry dummy;
272 storeQueue.push_back(dummy);
273 SQEntries++;
274 }
275 } else {
276 SQEntries = size_plus_sentinel;
277 }
278}
279
280template <class Impl>
281void
282LSQUnit<Impl>::insert(DynInstPtr &inst)
283{
284 assert(inst->isMemRef());
285
286 assert(inst->isLoad() || inst->isStore());
287
288 if (inst->isLoad()) {
289 insertLoad(inst);
290 } else {
291 insertStore(inst);
292 }
293
294 inst->setInLSQ();
295}
296
297template <class Impl>
298void
299LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
300{
301 assert((loadTail + 1) % LQEntries != loadHead);
302 assert(loads < LQEntries);
303
304 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
305 load_inst->readPC(), loadTail, load_inst->seqNum);
306
307 load_inst->lqIdx = loadTail;
308
309 if (stores == 0) {
310 load_inst->sqIdx = -1;
311 } else {
312 load_inst->sqIdx = storeTail;
313 }
314
315 loadQueue[loadTail] = load_inst;
316
317 incrLdIdx(loadTail);
318
319 ++loads;
320}
321
322template <class Impl>
323void
324LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
325{
326 // Make sure it is not full before inserting an instruction.
327 assert((storeTail + 1) % SQEntries != storeHead);
328 assert(stores < SQEntries);
329
330 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
331 store_inst->readPC(), storeTail, store_inst->seqNum);
332
333 store_inst->sqIdx = storeTail;
334 store_inst->lqIdx = loadTail;
335
336 storeQueue[storeTail] = SQEntry(store_inst);
337
338 incrStIdx(storeTail);
339
340 ++stores;
341}
342
343template <class Impl>
344typename Impl::DynInstPtr
345LSQUnit<Impl>::getMemDepViolator()
346{
347 DynInstPtr temp = memDepViolator;
348
349 memDepViolator = NULL;
350
351 return temp;
352}
353
354template <class Impl>
355unsigned
356LSQUnit<Impl>::numFreeEntries()
357{
358 unsigned free_lq_entries = LQEntries - loads;
359 unsigned free_sq_entries = SQEntries - stores;
360
361 // Both the LQ and SQ entries have an extra dummy entry to differentiate
362 // empty/full conditions. Subtract 1 from the free entries.
363 if (free_lq_entries < free_sq_entries) {
364 return free_lq_entries - 1;
365 } else {
366 return free_sq_entries - 1;
367 }
368}
369
370template <class Impl>
371int
372LSQUnit<Impl>::numLoadsReady()
373{
374 int load_idx = loadHead;
375 int retval = 0;
376
377 while (load_idx != loadTail) {
378 assert(loadQueue[load_idx]);
379
380 if (loadQueue[load_idx]->readyToIssue()) {
381 ++retval;
382 }
383 }
384
385 return retval;
386}
387
388template <class Impl>
389Fault
390LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
391{
392 // Execute a specific load.
393 Fault load_fault = NoFault;
394
395 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
396 inst->readPC(),inst->seqNum);
397
398 load_fault = inst->initiateAcc();
399
400 // If the instruction faulted, then we need to send it along to commit
401 // without the instruction completing.
402 if (load_fault != NoFault) {
403 // Send this instruction to commit, also make sure iew stage
404 // realizes there is activity.
405 iewStage->instToCommit(inst);
406 iewStage->activityThisCycle();
407 }
408
409 return load_fault;
410}
411
412template <class Impl>
413Fault
414LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
415{
416 using namespace TheISA;
417 // Make sure that a store exists.
418 assert(stores != 0);
419
420 int store_idx = store_inst->sqIdx;
421
422 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
423 store_inst->readPC(), store_inst->seqNum);
424
425 // Check the recently completed loads to see if any match this store's
426 // address. If so, then we have a memory ordering violation.
427 int load_idx = store_inst->lqIdx;
428
429 Fault store_fault = store_inst->initiateAcc();
430
431 if (storeQueue[store_idx].size == 0) {
432 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
433 store_inst->readPC(),store_inst->seqNum);
434
435 return store_fault;
436 }
437
438 assert(store_fault == NoFault);
439
440 if (store_inst->isStoreConditional()) {
441 // Store conditionals need to set themselves as able to
442 // writeback if we haven't had a fault by here.
443 storeQueue[store_idx].canWB = true;
444
445 ++storesToWB;
446 }
447
448 if (!memDepViolator) {
449 while (load_idx != loadTail) {
450 // Really only need to check loads that have actually executed
451 // It's safe to check all loads because effAddr is set to
452 // InvalAddr when the dyn inst is created.
453
454 // @todo: For now this is extra conservative, detecting a
455 // violation if the addresses match assuming all accesses
456 // are quad word accesses.
457
458 // @todo: Fix this, magic number being used here
459 if ((loadQueue[load_idx]->effAddr >> 8) ==
460 (store_inst->effAddr >> 8)) {
461 // A load incorrectly passed this store. Squash and refetch.
462 // For now return a fault to show that it was unsuccessful.
463 memDepViolator = loadQueue[load_idx];
464
465 return genMachineCheckFault();
466 }
467
468 incrLdIdx(load_idx);
469 }
470
471 // If we've reached this point, there was no violation.
472 memDepViolator = NULL;
473 }
474
475 return store_fault;
476}
477
478template <class Impl>
479void
480LSQUnit<Impl>::commitLoad()
481{
482 assert(loadQueue[loadHead]);
483
484 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
485 loadQueue[loadHead]->readPC());
486
487 loadQueue[loadHead] = NULL;
488
489 incrLdIdx(loadHead);
490
491 --loads;
492}
493
494template <class Impl>
495void
496LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
497{
498 assert(loads == 0 || loadQueue[loadHead]);
499
500 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
501 commitLoad();
502 }
503}
504
505template <class Impl>
506void
507LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
508{
509 assert(stores == 0 || storeQueue[storeHead].inst);
510
511 int store_idx = storeHead;
512
513 while (store_idx != storeTail) {
514 assert(storeQueue[store_idx].inst);
515 // Mark any stores that are now committed and have not yet
516 // been marked as able to write back.
517 if (!storeQueue[store_idx].canWB) {
518 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
519 break;
520 }
521 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
522 "%#x [sn:%lli]\n",
523 storeQueue[store_idx].inst->readPC(),
524 storeQueue[store_idx].inst->seqNum);
525
526 storeQueue[store_idx].canWB = true;
527
528 ++storesToWB;
529 }
530
531 incrStIdx(store_idx);
532 }
533}
534
535template <class Impl>
536void
537LSQUnit<Impl>::writebackStores()
538{
539 while (storesToWB > 0 &&
540 storeWBIdx != storeTail &&
541 storeQueue[storeWBIdx].inst &&
542 storeQueue[storeWBIdx].canWB &&
543 usedPorts < cachePorts) {
544
545 if (isStoreBlocked) {
546 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
547 " is blocked!\n");
548 break;
549 }
550
551 // Store didn't write any data so no need to write it back to
552 // memory.
553 if (storeQueue[storeWBIdx].size == 0) {
554 completeStore(storeWBIdx);
555
556 incrStIdx(storeWBIdx);
557
558 continue;
559 }
560
561 ++usedPorts;
562
563 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
564 incrStIdx(storeWBIdx);
565
566 continue;
567 }
568
569 assert(storeQueue[storeWBIdx].req);
570 assert(!storeQueue[storeWBIdx].committed);
571
572 DynInstPtr inst = storeQueue[storeWBIdx].inst;
573
574 Request *req = storeQueue[storeWBIdx].req;
575 storeQueue[storeWBIdx].committed = true;
576
577 assert(!inst->memData);
578 inst->memData = new uint8_t[64];
579 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
580 req->getSize());
581
582 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
583 data_pkt->dataStatic(inst->memData);
584
585 LSQSenderState *state = new LSQSenderState;
586 state->isLoad = false;
587 state->idx = storeWBIdx;
588 state->inst = inst;
589 data_pkt->senderState = state;
590
591 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
592 "to Addr:%#x, data:%#x [sn:%lli]\n",
593 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
594 req->getPaddr(), *(inst->memData),
595 storeQueue[storeWBIdx].inst->seqNum);
596
597 // @todo: Remove this SC hack once the memory system handles it.
598 if (req->getFlags() & LOCKED) {
599 if (req->getFlags() & UNCACHEABLE) {
600 req->setScResult(2);
601 } else {
602 if (cpu->lockFlag) {
603 req->setScResult(1);
604 } else {
605 req->setScResult(0);
606 // Hack: Instantly complete this store.
607 completeDataAccess(data_pkt);
608 incrStIdx(storeWBIdx);
609 continue;
610 }
611 }
612 } else {
613 // Non-store conditionals do not need a writeback.
614 state->noWB = true;
615 }
616
617 if (!dcachePort->sendTiming(data_pkt)) {
618 // Need to handle becoming blocked on a store.
619 isStoreBlocked = true;
620
631 assert(sendingPkt == NULL);
632 sendingPkt = data_pkt;
621 assert(retryPkt == NULL);
622 retryPkt = data_pkt;
633 } else {
634 storePostSend(data_pkt);
635 }
636 }
637
638 // Not sure this should set it to 0.
639 usedPorts = 0;
640
641 assert(stores >= 0 && storesToWB >= 0);
642}
643
644/*template <class Impl>
645void
646LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
647{
648 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
649 mshrSeqNums.end(),
650 seqNum);
651
652 if (mshr_it != mshrSeqNums.end()) {
653 mshrSeqNums.erase(mshr_it);
654 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
655 }
656}*/
657
658template <class Impl>
659void
660LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
661{
662 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
663 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
664
665 int load_idx = loadTail;
666 decrLdIdx(load_idx);
667
668 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
669 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
670 "[sn:%lli]\n",
671 loadQueue[load_idx]->readPC(),
672 loadQueue[load_idx]->seqNum);
673
674 if (isStalled() && load_idx == stallingLoadIdx) {
675 stalled = false;
676 stallingStoreIsn = 0;
677 stallingLoadIdx = 0;
678 }
679
680 // Clear the smart pointer to make sure it is decremented.
681 loadQueue[load_idx]->squashed = true;
682 loadQueue[load_idx] = NULL;
683 --loads;
684
685 // Inefficient!
686 loadTail = load_idx;
687
688 decrLdIdx(load_idx);
689 }
690
691 if (isLoadBlocked) {
692 if (squashed_num < blockedLoadSeqNum) {
693 isLoadBlocked = false;
694 loadBlockedHandled = false;
695 blockedLoadSeqNum = 0;
696 }
697 }
698
699 int store_idx = storeTail;
700 decrStIdx(store_idx);
701
702 while (stores != 0 &&
703 storeQueue[store_idx].inst->seqNum > squashed_num) {
704 // Instructions marked as can WB are already committed.
705 if (storeQueue[store_idx].canWB) {
706 break;
707 }
708
709 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
710 "idx:%i [sn:%lli]\n",
711 storeQueue[store_idx].inst->readPC(),
712 store_idx, storeQueue[store_idx].inst->seqNum);
713
714 // I don't think this can happen. It should have been cleared
715 // by the stalling load.
716 if (isStalled() &&
717 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
718 panic("Is stalled should have been cleared by stalling load!\n");
719 stalled = false;
720 stallingStoreIsn = 0;
721 }
722
723 // Clear the smart pointer to make sure it is decremented.
724 storeQueue[store_idx].inst->squashed = true;
725 storeQueue[store_idx].inst = NULL;
726 storeQueue[store_idx].canWB = 0;
727
728 storeQueue[store_idx].req = NULL;
729 --stores;
730
731 // Inefficient!
732 storeTail = store_idx;
733
734 decrStIdx(store_idx);
735 }
736}
737
738template <class Impl>
739void
740LSQUnit<Impl>::storePostSend(Packet *pkt)
741{
742 if (isStalled() &&
743 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
744 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
745 "load idx:%i\n",
746 stallingStoreIsn, stallingLoadIdx);
747 stalled = false;
748 stallingStoreIsn = 0;
749 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
750 }
751
752 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
753 // The store is basically completed at this time. This
754 // only works so long as the checker doesn't try to
755 // verify the value in memory for stores.
756 storeQueue[storeWBIdx].inst->setCompleted();
757 if (cpu->checker) {
758 cpu->checker->tick(storeQueue[storeWBIdx].inst);
759 }
760 }
761
762 if (pkt->result != Packet::Success) {
763 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
764 storeWBIdx);
765
766 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
767 storeQueue[storeWBIdx].inst->seqNum);
768
769 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
770
771 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
772
773 // @todo: Increment stat here.
774 } else {
775 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
776 storeWBIdx);
777
778 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
779 storeQueue[storeWBIdx].inst->seqNum);
780 }
781
782 incrStIdx(storeWBIdx);
783}
784
785template <class Impl>
786void
787LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
788{
789 iewStage->wakeCPU();
790
791 // Squashed instructions do not need to complete their access.
792 if (inst->isSquashed()) {
793 assert(!inst->isStore());
794 return;
795 }
796
797 if (!inst->isExecuted()) {
798 inst->setExecuted();
799
800 // Complete access to copy data to proper place.
801 inst->completeAcc(pkt);
802 }
803
804 // Need to insert instruction into queue to commit
805 iewStage->instToCommit(inst);
806
807 iewStage->activityThisCycle();
808}
809
810template <class Impl>
811void
812LSQUnit<Impl>::completeStore(int store_idx)
813{
814 assert(storeQueue[store_idx].inst);
815 storeQueue[store_idx].completed = true;
816 --storesToWB;
817 // A bit conservative because a store completion may not free up entries,
818 // but hopefully avoids two store completions in one cycle from making
819 // the CPU tick twice.
820 cpu->activityThisCycle();
821
822 if (store_idx == storeHead) {
823 do {
824 incrStIdx(storeHead);
825
826 --stores;
827 } while (storeQueue[storeHead].completed &&
828 storeHead != storeTail);
829
830 iewStage->updateLSQNextCycle = true;
831 }
832
833 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
834 "idx:%i\n",
835 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
836
837 if (isStalled() &&
838 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
839 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
840 "load idx:%i\n",
841 stallingStoreIsn, stallingLoadIdx);
842 stalled = false;
843 stallingStoreIsn = 0;
844 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
845 }
846
847 storeQueue[store_idx].inst->setCompleted();
848
849 // Tell the checker we've completed this instruction. Some stores
850 // may get reported twice to the checker, but the checker can
851 // handle that case.
852 if (cpu->checker) {
853 cpu->checker->tick(storeQueue[store_idx].inst);
854 }
855}
856
857template <class Impl>
858void
859LSQUnit<Impl>::recvRetry()
860{
623 } else {
624 storePostSend(data_pkt);
625 }
626 }
627
628 // Not sure this should set it to 0.
629 usedPorts = 0;
630
631 assert(stores >= 0 && storesToWB >= 0);
632}
633
634/*template <class Impl>
635void
636LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
637{
638 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
639 mshrSeqNums.end(),
640 seqNum);
641
642 if (mshr_it != mshrSeqNums.end()) {
643 mshrSeqNums.erase(mshr_it);
644 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
645 }
646}*/
647
648template <class Impl>
649void
650LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
651{
652 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
653 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
654
655 int load_idx = loadTail;
656 decrLdIdx(load_idx);
657
658 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
659 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
660 "[sn:%lli]\n",
661 loadQueue[load_idx]->readPC(),
662 loadQueue[load_idx]->seqNum);
663
664 if (isStalled() && load_idx == stallingLoadIdx) {
665 stalled = false;
666 stallingStoreIsn = 0;
667 stallingLoadIdx = 0;
668 }
669
670 // Clear the smart pointer to make sure it is decremented.
671 loadQueue[load_idx]->squashed = true;
672 loadQueue[load_idx] = NULL;
673 --loads;
674
675 // Inefficient!
676 loadTail = load_idx;
677
678 decrLdIdx(load_idx);
679 }
680
681 if (isLoadBlocked) {
682 if (squashed_num < blockedLoadSeqNum) {
683 isLoadBlocked = false;
684 loadBlockedHandled = false;
685 blockedLoadSeqNum = 0;
686 }
687 }
688
689 int store_idx = storeTail;
690 decrStIdx(store_idx);
691
692 while (stores != 0 &&
693 storeQueue[store_idx].inst->seqNum > squashed_num) {
694 // Instructions marked as can WB are already committed.
695 if (storeQueue[store_idx].canWB) {
696 break;
697 }
698
699 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
700 "idx:%i [sn:%lli]\n",
701 storeQueue[store_idx].inst->readPC(),
702 store_idx, storeQueue[store_idx].inst->seqNum);
703
704 // I don't think this can happen. It should have been cleared
705 // by the stalling load.
706 if (isStalled() &&
707 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
708 panic("Is stalled should have been cleared by stalling load!\n");
709 stalled = false;
710 stallingStoreIsn = 0;
711 }
712
713 // Clear the smart pointer to make sure it is decremented.
714 storeQueue[store_idx].inst->squashed = true;
715 storeQueue[store_idx].inst = NULL;
716 storeQueue[store_idx].canWB = 0;
717
718 storeQueue[store_idx].req = NULL;
719 --stores;
720
721 // Inefficient!
722 storeTail = store_idx;
723
724 decrStIdx(store_idx);
725 }
726}
727
728template <class Impl>
729void
730LSQUnit<Impl>::storePostSend(Packet *pkt)
731{
732 if (isStalled() &&
733 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
734 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
735 "load idx:%i\n",
736 stallingStoreIsn, stallingLoadIdx);
737 stalled = false;
738 stallingStoreIsn = 0;
739 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
740 }
741
742 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
743 // The store is basically completed at this time. This
744 // only works so long as the checker doesn't try to
745 // verify the value in memory for stores.
746 storeQueue[storeWBIdx].inst->setCompleted();
747 if (cpu->checker) {
748 cpu->checker->tick(storeQueue[storeWBIdx].inst);
749 }
750 }
751
752 if (pkt->result != Packet::Success) {
753 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
754 storeWBIdx);
755
756 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
757 storeQueue[storeWBIdx].inst->seqNum);
758
759 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
760
761 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
762
763 // @todo: Increment stat here.
764 } else {
765 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
766 storeWBIdx);
767
768 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
769 storeQueue[storeWBIdx].inst->seqNum);
770 }
771
772 incrStIdx(storeWBIdx);
773}
774
775template <class Impl>
776void
777LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
778{
779 iewStage->wakeCPU();
780
781 // Squashed instructions do not need to complete their access.
782 if (inst->isSquashed()) {
783 assert(!inst->isStore());
784 return;
785 }
786
787 if (!inst->isExecuted()) {
788 inst->setExecuted();
789
790 // Complete access to copy data to proper place.
791 inst->completeAcc(pkt);
792 }
793
794 // Need to insert instruction into queue to commit
795 iewStage->instToCommit(inst);
796
797 iewStage->activityThisCycle();
798}
799
800template <class Impl>
801void
802LSQUnit<Impl>::completeStore(int store_idx)
803{
804 assert(storeQueue[store_idx].inst);
805 storeQueue[store_idx].completed = true;
806 --storesToWB;
807 // A bit conservative because a store completion may not free up entries,
808 // but hopefully avoids two store completions in one cycle from making
809 // the CPU tick twice.
810 cpu->activityThisCycle();
811
812 if (store_idx == storeHead) {
813 do {
814 incrStIdx(storeHead);
815
816 --stores;
817 } while (storeQueue[storeHead].completed &&
818 storeHead != storeTail);
819
820 iewStage->updateLSQNextCycle = true;
821 }
822
823 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
824 "idx:%i\n",
825 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
826
827 if (isStalled() &&
828 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
829 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
830 "load idx:%i\n",
831 stallingStoreIsn, stallingLoadIdx);
832 stalled = false;
833 stallingStoreIsn = 0;
834 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
835 }
836
837 storeQueue[store_idx].inst->setCompleted();
838
839 // Tell the checker we've completed this instruction. Some stores
840 // may get reported twice to the checker, but the checker can
841 // handle that case.
842 if (cpu->checker) {
843 cpu->checker->tick(storeQueue[store_idx].inst);
844 }
845}
846
847template <class Impl>
848void
849LSQUnit<Impl>::recvRetry()
850{
861 assert(sendingPkt != NULL);
862
863 if (isStoreBlocked) {
851 if (isStoreBlocked) {
864 if (dcachePort->sendTiming(sendingPkt)) {
865 storePostSend(sendingPkt);
852 assert(retryPkt != NULL);
853
854 if (dcachePort->sendTiming(retryPkt)) {
855 storePostSend(retryPkt);
866 sendingPkt = NULL;
867 isStoreBlocked = false;
868 } else {
869 // Still blocked!
870 }
871 } else if (isLoadBlocked) {
872 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
873 "no need to resend packet.\n");
874 } else {
875 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
876 }
877}
878
879template <class Impl>
880inline void
881LSQUnit<Impl>::incrStIdx(int &store_idx)
882{
883 if (++store_idx >= SQEntries)
884 store_idx = 0;
885}
886
887template <class Impl>
888inline void
889LSQUnit<Impl>::decrStIdx(int &store_idx)
890{
891 if (--store_idx < 0)
892 store_idx += SQEntries;
893}
894
895template <class Impl>
896inline void
897LSQUnit<Impl>::incrLdIdx(int &load_idx)
898{
899 if (++load_idx >= LQEntries)
900 load_idx = 0;
901}
902
903template <class Impl>
904inline void
905LSQUnit<Impl>::decrLdIdx(int &load_idx)
906{
907 if (--load_idx < 0)
908 load_idx += LQEntries;
909}
910
911template <class Impl>
912void
913LSQUnit<Impl>::dumpInsts()
914{
915 cprintf("Load store queue: Dumping instructions.\n");
916 cprintf("Load queue size: %i\n", loads);
917 cprintf("Load queue: ");
918
919 int load_idx = loadHead;
920
921 while (load_idx != loadTail && loadQueue[load_idx]) {
922 cprintf("%#x ", loadQueue[load_idx]->readPC());
923
924 incrLdIdx(load_idx);
925 }
926
927 cprintf("Store queue size: %i\n", stores);
928 cprintf("Store queue: ");
929
930 int store_idx = storeHead;
931
932 while (store_idx != storeTail && storeQueue[store_idx].inst) {
933 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
934
935 incrStIdx(store_idx);
936 }
937
938 cprintf("\n");
939}
856 sendingPkt = NULL;
857 isStoreBlocked = false;
858 } else {
859 // Still blocked!
860 }
861 } else if (isLoadBlocked) {
862 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
863 "no need to resend packet.\n");
864 } else {
865 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
866 }
867}
868
869template <class Impl>
870inline void
871LSQUnit<Impl>::incrStIdx(int &store_idx)
872{
873 if (++store_idx >= SQEntries)
874 store_idx = 0;
875}
876
877template <class Impl>
878inline void
879LSQUnit<Impl>::decrStIdx(int &store_idx)
880{
881 if (--store_idx < 0)
882 store_idx += SQEntries;
883}
884
885template <class Impl>
886inline void
887LSQUnit<Impl>::incrLdIdx(int &load_idx)
888{
889 if (++load_idx >= LQEntries)
890 load_idx = 0;
891}
892
893template <class Impl>
894inline void
895LSQUnit<Impl>::decrLdIdx(int &load_idx)
896{
897 if (--load_idx < 0)
898 load_idx += LQEntries;
899}
900
901template <class Impl>
902void
903LSQUnit<Impl>::dumpInsts()
904{
905 cprintf("Load store queue: Dumping instructions.\n");
906 cprintf("Load queue size: %i\n", loads);
907 cprintf("Load queue: ");
908
909 int load_idx = loadHead;
910
911 while (load_idx != loadTail && loadQueue[load_idx]) {
912 cprintf("%#x ", loadQueue[load_idx]->readPC());
913
914 incrLdIdx(load_idx);
915 }
916
917 cprintf("Store queue size: %i\n", stores);
918 cprintf("Store queue: ");
919
920 int store_idx = storeHead;
921
922 while (store_idx != storeTail && storeQueue[store_idx].inst) {
923 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
924
925 incrStIdx(store_idx);
926 }
927
928 cprintf("\n");
929}