lsq_unit_impl.hh (2871:7ed5c9ef3eb6) lsq_unit_impl.hh (2907:7b0ababb4166)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#include "config/use_checker.hh"
33
34#include "cpu/o3/lsq.hh"
34#include "cpu/o3/lsq_unit.hh"
35#include "base/str.hh"
36#include "mem/packet.hh"
37#include "mem/request.hh"
38
39#if USE_CHECKER
40#include "cpu/checker/cpu.hh"
41#endif
42
43template<class Impl>
44LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
45 LSQUnit *lsq_ptr)
46 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
47{
48 this->setFlags(Event::AutoDelete);
49}
50
51template<class Impl>
52void
53LSQUnit<Impl>::WritebackEvent::process()
54{
55 if (!lsqPtr->isSwitchedOut()) {
56 lsqPtr->writeback(inst, pkt);
57 }
58 delete pkt;
59}
60
61template<class Impl>
62const char *
63LSQUnit<Impl>::WritebackEvent::description()
64{
65 return "Store writeback event";
66}
67
68template<class Impl>
69void
70LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
71{
72 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
73 DynInstPtr inst = state->inst;
74 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
75 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
76
77 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
78
79 if (isSwitchedOut() || inst->isSquashed()) {
80 iewStage->decrWb(inst->seqNum);
81 delete state;
82 delete pkt;
83 return;
84 } else {
85 if (!state->noWB) {
86 writeback(inst, pkt);
87 }
88
89 if (inst->isStore()) {
90 completeStore(state->idx);
91 }
92 }
93
94 delete state;
95 delete pkt;
96}
97
98template <class Impl>
35#include "cpu/o3/lsq_unit.hh"
36#include "base/str.hh"
37#include "mem/packet.hh"
38#include "mem/request.hh"
39
40#if USE_CHECKER
41#include "cpu/checker/cpu.hh"
42#endif
43
44template<class Impl>
45LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
46 LSQUnit *lsq_ptr)
47 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
48{
49 this->setFlags(Event::AutoDelete);
50}
51
52template<class Impl>
53void
54LSQUnit<Impl>::WritebackEvent::process()
55{
56 if (!lsqPtr->isSwitchedOut()) {
57 lsqPtr->writeback(inst, pkt);
58 }
59 delete pkt;
60}
61
62template<class Impl>
63const char *
64LSQUnit<Impl>::WritebackEvent::description()
65{
66 return "Store writeback event";
67}
68
69template<class Impl>
70void
71LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
72{
73 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
74 DynInstPtr inst = state->inst;
75 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
76 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
77
78 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
79
80 if (isSwitchedOut() || inst->isSquashed()) {
81 iewStage->decrWb(inst->seqNum);
82 delete state;
83 delete pkt;
84 return;
85 } else {
86 if (!state->noWB) {
87 writeback(inst, pkt);
88 }
89
90 if (inst->isStore()) {
91 completeStore(state->idx);
92 }
93 }
94
95 delete state;
96 delete pkt;
97}
98
99template <class Impl>
99Tick
100LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
101{
102 panic("O3CPU model does not work with atomic mode!");
103 return curTick;
104}
105
106template <class Impl>
107void
108LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
109{
110 panic("O3CPU doesn't expect recvFunctional callback!");
111}
112
113template <class Impl>
114void
115LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
116{
117 if (status == RangeChange)
118 return;
119
120 panic("O3CPU doesn't expect recvStatusChange callback!");
121}
122
123template <class Impl>
124bool
125LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
126{
127 lsq->completeDataAccess(pkt);
128 return true;
129}
130
131template <class Impl>
132void
133LSQUnit<Impl>::DcachePort::recvRetry()
134{
135 lsq->recvRetry();
136}
137
138template <class Impl>
139LSQUnit<Impl>::LSQUnit()
140 : loads(0), stores(0), storesToWB(0), stalled(false),
141 isStoreBlocked(false), isLoadBlocked(false),
142 loadBlockedHandled(false)
143{
144}
145
146template<class Impl>
147void
100LSQUnit<Impl>::LSQUnit()
101 : loads(0), stores(0), storesToWB(0), stalled(false),
102 isStoreBlocked(false), isLoadBlocked(false),
103 loadBlockedHandled(false)
104{
105}
106
107template<class Impl>
108void
148LSQUnit::init(Params *params, unsigned maxLQEntries,
109LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries,
149 unsigned maxSQEntries, unsigned id)
150{
151 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
152
153 switchedOut = false;
154
110 unsigned maxSQEntries, unsigned id)
111{
112 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
113
114 switchedOut = false;
115
116 lsq = lsq_ptr;
117
155 lsqID = id;
156
157 // Add 1 for the sentinel entry (they are circular queues).
158 LQEntries = maxLQEntries + 1;
159 SQEntries = maxSQEntries + 1;
160
161 loadQueue.resize(LQEntries);
162 storeQueue.resize(SQEntries);
163
164 loadHead = loadTail = 0;
165
166 storeHead = storeWBIdx = storeTail = 0;
167
168 usedPorts = 0;
169 cachePorts = params->cachePorts;
170
118 lsqID = id;
119
120 // Add 1 for the sentinel entry (they are circular queues).
121 LQEntries = maxLQEntries + 1;
122 SQEntries = maxSQEntries + 1;
123
124 loadQueue.resize(LQEntries);
125 storeQueue.resize(SQEntries);
126
127 loadHead = loadTail = 0;
128
129 storeHead = storeWBIdx = storeTail = 0;
130
131 usedPorts = 0;
132 cachePorts = params->cachePorts;
133
171 mem = params->mem;
172
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179void
180LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
181{
182 cpu = cpu_ptr;
134 memDepViolator = NULL;
135
136 blockedLoadSeqNum = 0;
137}
138
139template<class Impl>
140void
141LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
142{
143 cpu = cpu_ptr;
183 dcachePort = new DcachePort(cpu, this);
184
185#if USE_CHECKER
186 if (cpu->checker) {
187 cpu->checker->setDcachePort(dcachePort);
188 }
189#endif
190}
191
192template<class Impl>
193std::string
194LSQUnit<Impl>::name() const
195{
196 if (Impl::MaxThreads == 1) {
197 return iewStage->name() + ".lsq";
198 } else {
199 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
200 }
201}
202
203template<class Impl>
204void
205LSQUnit<Impl>::regStats()
206{
207 lsqForwLoads
208 .name(name() + ".forwLoads")
209 .desc("Number of loads that had data forwarded from stores");
210
211 invAddrLoads
212 .name(name() + ".invAddrLoads")
213 .desc("Number of loads ignored due to an invalid address");
214
215 lsqSquashedLoads
216 .name(name() + ".squashedLoads")
217 .desc("Number of loads squashed");
218
219 lsqIgnoredResponses
220 .name(name() + ".ignoredResponses")
221 .desc("Number of memory responses ignored because the instruction is squashed");
222
223 lsqSquashedStores
224 .name(name() + ".squashedStores")
225 .desc("Number of stores squashed");
226
227 invAddrSwpfs
228 .name(name() + ".invAddrSwpfs")
229 .desc("Number of software prefetches ignored due to an invalid address");
230
231 lsqBlockedLoads
232 .name(name() + ".blockedLoads")
233 .desc("Number of blocked loads due to partial load-store forwarding");
234
235 lsqRescheduledLoads
236 .name(name() + ".rescheduledLoads")
237 .desc("Number of loads that were rescheduled");
238
239 lsqCacheBlocked
240 .name(name() + ".cacheBlocked")
241 .desc("Number of times an access to memory failed due to the cache being blocked");
242}
243
244template<class Impl>
245void
246LSQUnit<Impl>::clearLQ()
247{
248 loadQueue.clear();
249}
250
251template<class Impl>
252void
253LSQUnit<Impl>::clearSQ()
254{
255 storeQueue.clear();
256}
257
258template<class Impl>
259void
260LSQUnit<Impl>::switchOut()
261{
262 switchedOut = true;
263 for (int i = 0; i < loadQueue.size(); ++i)
264 loadQueue[i] = NULL;
265
266 assert(storesToWB == 0);
267}
268
269template<class Impl>
270void
271LSQUnit<Impl>::takeOverFrom()
272{
273 switchedOut = false;
274 loads = stores = storesToWB = 0;
275
276 loadHead = loadTail = 0;
277
278 storeHead = storeWBIdx = storeTail = 0;
279
280 usedPorts = 0;
281
282 memDepViolator = NULL;
283
284 blockedLoadSeqNum = 0;
285
286 stalled = false;
287 isLoadBlocked = false;
288 loadBlockedHandled = false;
289}
290
291template<class Impl>
292void
293LSQUnit<Impl>::resizeLQ(unsigned size)
294{
295 unsigned size_plus_sentinel = size + 1;
296 assert(size_plus_sentinel >= LQEntries);
297
298 if (size_plus_sentinel > LQEntries) {
299 while (size_plus_sentinel > loadQueue.size()) {
300 DynInstPtr dummy;
301 loadQueue.push_back(dummy);
302 LQEntries++;
303 }
304 } else {
305 LQEntries = size_plus_sentinel;
306 }
307
308}
309
310template<class Impl>
311void
312LSQUnit<Impl>::resizeSQ(unsigned size)
313{
314 unsigned size_plus_sentinel = size + 1;
315 if (size_plus_sentinel > SQEntries) {
316 while (size_plus_sentinel > storeQueue.size()) {
317 SQEntry dummy;
318 storeQueue.push_back(dummy);
319 SQEntries++;
320 }
321 } else {
322 SQEntries = size_plus_sentinel;
323 }
324}
325
326template <class Impl>
327void
328LSQUnit<Impl>::insert(DynInstPtr &inst)
329{
330 assert(inst->isMemRef());
331
332 assert(inst->isLoad() || inst->isStore());
333
334 if (inst->isLoad()) {
335 insertLoad(inst);
336 } else {
337 insertStore(inst);
338 }
339
340 inst->setInLSQ();
341}
342
343template <class Impl>
344void
345LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
346{
347 assert((loadTail + 1) % LQEntries != loadHead);
348 assert(loads < LQEntries);
349
350 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
351 load_inst->readPC(), loadTail, load_inst->seqNum);
352
353 load_inst->lqIdx = loadTail;
354
355 if (stores == 0) {
356 load_inst->sqIdx = -1;
357 } else {
358 load_inst->sqIdx = storeTail;
359 }
360
361 loadQueue[loadTail] = load_inst;
362
363 incrLdIdx(loadTail);
364
365 ++loads;
366}
367
368template <class Impl>
369void
370LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
371{
372 // Make sure it is not full before inserting an instruction.
373 assert((storeTail + 1) % SQEntries != storeHead);
374 assert(stores < SQEntries);
375
376 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
377 store_inst->readPC(), storeTail, store_inst->seqNum);
378
379 store_inst->sqIdx = storeTail;
380 store_inst->lqIdx = loadTail;
381
382 storeQueue[storeTail] = SQEntry(store_inst);
383
384 incrStIdx(storeTail);
385
386 ++stores;
387}
388
389template <class Impl>
390typename Impl::DynInstPtr
391LSQUnit<Impl>::getMemDepViolator()
392{
393 DynInstPtr temp = memDepViolator;
394
395 memDepViolator = NULL;
396
397 return temp;
398}
399
400template <class Impl>
401unsigned
402LSQUnit<Impl>::numFreeEntries()
403{
404 unsigned free_lq_entries = LQEntries - loads;
405 unsigned free_sq_entries = SQEntries - stores;
406
407 // Both the LQ and SQ entries have an extra dummy entry to differentiate
408 // empty/full conditions. Subtract 1 from the free entries.
409 if (free_lq_entries < free_sq_entries) {
410 return free_lq_entries - 1;
411 } else {
412 return free_sq_entries - 1;
413 }
414}
415
416template <class Impl>
417int
418LSQUnit<Impl>::numLoadsReady()
419{
420 int load_idx = loadHead;
421 int retval = 0;
422
423 while (load_idx != loadTail) {
424 assert(loadQueue[load_idx]);
425
426 if (loadQueue[load_idx]->readyToIssue()) {
427 ++retval;
428 }
429 }
430
431 return retval;
432}
433
434template <class Impl>
435Fault
436LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
437{
438 // Execute a specific load.
439 Fault load_fault = NoFault;
440
441 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
442 inst->readPC(),inst->seqNum);
443
444 load_fault = inst->initiateAcc();
445
446 // If the instruction faulted, then we need to send it along to commit
447 // without the instruction completing.
448 if (load_fault != NoFault) {
449 // Send this instruction to commit, also make sure iew stage
450 // realizes there is activity.
451 iewStage->instToCommit(inst);
452 iewStage->activityThisCycle();
453 }
454
455 return load_fault;
456}
457
458template <class Impl>
459Fault
460LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
461{
462 using namespace TheISA;
463 // Make sure that a store exists.
464 assert(stores != 0);
465
466 int store_idx = store_inst->sqIdx;
467
468 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
469 store_inst->readPC(), store_inst->seqNum);
470
471 // Check the recently completed loads to see if any match this store's
472 // address. If so, then we have a memory ordering violation.
473 int load_idx = store_inst->lqIdx;
474
475 Fault store_fault = store_inst->initiateAcc();
476
477 if (storeQueue[store_idx].size == 0) {
478 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
479 store_inst->readPC(),store_inst->seqNum);
480
481 return store_fault;
482 }
483
484 assert(store_fault == NoFault);
485
486 if (store_inst->isStoreConditional()) {
487 // Store conditionals need to set themselves as able to
488 // writeback if we haven't had a fault by here.
489 storeQueue[store_idx].canWB = true;
490
491 ++storesToWB;
492 }
493
494 if (!memDepViolator) {
495 while (load_idx != loadTail) {
496 // Really only need to check loads that have actually executed
497 // It's safe to check all loads because effAddr is set to
498 // InvalAddr when the dyn inst is created.
499
500 // @todo: For now this is extra conservative, detecting a
501 // violation if the addresses match assuming all accesses
502 // are quad word accesses.
503
504 // @todo: Fix this, magic number being used here
505 if ((loadQueue[load_idx]->effAddr >> 8) ==
506 (store_inst->effAddr >> 8)) {
507 // A load incorrectly passed this store. Squash and refetch.
508 // For now return a fault to show that it was unsuccessful.
509 memDepViolator = loadQueue[load_idx];
510
511 return genMachineCheckFault();
512 }
513
514 incrLdIdx(load_idx);
515 }
516
517 // If we've reached this point, there was no violation.
518 memDepViolator = NULL;
519 }
520
521 return store_fault;
522}
523
524template <class Impl>
525void
526LSQUnit<Impl>::commitLoad()
527{
528 assert(loadQueue[loadHead]);
529
530 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
531 loadQueue[loadHead]->readPC());
532
533 loadQueue[loadHead] = NULL;
534
535 incrLdIdx(loadHead);
536
537 --loads;
538}
539
540template <class Impl>
541void
542LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
543{
544 assert(loads == 0 || loadQueue[loadHead]);
545
546 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
547 commitLoad();
548 }
549}
550
551template <class Impl>
552void
553LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
554{
555 assert(stores == 0 || storeQueue[storeHead].inst);
556
557 int store_idx = storeHead;
558
559 while (store_idx != storeTail) {
560 assert(storeQueue[store_idx].inst);
561 // Mark any stores that are now committed and have not yet
562 // been marked as able to write back.
563 if (!storeQueue[store_idx].canWB) {
564 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
565 break;
566 }
567 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
568 "%#x [sn:%lli]\n",
569 storeQueue[store_idx].inst->readPC(),
570 storeQueue[store_idx].inst->seqNum);
571
572 storeQueue[store_idx].canWB = true;
573
574 ++storesToWB;
575 }
576
577 incrStIdx(store_idx);
578 }
579}
580
581template <class Impl>
582void
583LSQUnit<Impl>::writebackStores()
584{
585 while (storesToWB > 0 &&
586 storeWBIdx != storeTail &&
587 storeQueue[storeWBIdx].inst &&
588 storeQueue[storeWBIdx].canWB &&
589 usedPorts < cachePorts) {
590
144
145#if USE_CHECKER
146 if (cpu->checker) {
147 cpu->checker->setDcachePort(dcachePort);
148 }
149#endif
150}
151
152template<class Impl>
153std::string
154LSQUnit<Impl>::name() const
155{
156 if (Impl::MaxThreads == 1) {
157 return iewStage->name() + ".lsq";
158 } else {
159 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
160 }
161}
162
163template<class Impl>
164void
165LSQUnit<Impl>::regStats()
166{
167 lsqForwLoads
168 .name(name() + ".forwLoads")
169 .desc("Number of loads that had data forwarded from stores");
170
171 invAddrLoads
172 .name(name() + ".invAddrLoads")
173 .desc("Number of loads ignored due to an invalid address");
174
175 lsqSquashedLoads
176 .name(name() + ".squashedLoads")
177 .desc("Number of loads squashed");
178
179 lsqIgnoredResponses
180 .name(name() + ".ignoredResponses")
181 .desc("Number of memory responses ignored because the instruction is squashed");
182
183 lsqSquashedStores
184 .name(name() + ".squashedStores")
185 .desc("Number of stores squashed");
186
187 invAddrSwpfs
188 .name(name() + ".invAddrSwpfs")
189 .desc("Number of software prefetches ignored due to an invalid address");
190
191 lsqBlockedLoads
192 .name(name() + ".blockedLoads")
193 .desc("Number of blocked loads due to partial load-store forwarding");
194
195 lsqRescheduledLoads
196 .name(name() + ".rescheduledLoads")
197 .desc("Number of loads that were rescheduled");
198
199 lsqCacheBlocked
200 .name(name() + ".cacheBlocked")
201 .desc("Number of times an access to memory failed due to the cache being blocked");
202}
203
204template<class Impl>
205void
206LSQUnit<Impl>::clearLQ()
207{
208 loadQueue.clear();
209}
210
211template<class Impl>
212void
213LSQUnit<Impl>::clearSQ()
214{
215 storeQueue.clear();
216}
217
218template<class Impl>
219void
220LSQUnit<Impl>::switchOut()
221{
222 switchedOut = true;
223 for (int i = 0; i < loadQueue.size(); ++i)
224 loadQueue[i] = NULL;
225
226 assert(storesToWB == 0);
227}
228
229template<class Impl>
230void
231LSQUnit<Impl>::takeOverFrom()
232{
233 switchedOut = false;
234 loads = stores = storesToWB = 0;
235
236 loadHead = loadTail = 0;
237
238 storeHead = storeWBIdx = storeTail = 0;
239
240 usedPorts = 0;
241
242 memDepViolator = NULL;
243
244 blockedLoadSeqNum = 0;
245
246 stalled = false;
247 isLoadBlocked = false;
248 loadBlockedHandled = false;
249}
250
251template<class Impl>
252void
253LSQUnit<Impl>::resizeLQ(unsigned size)
254{
255 unsigned size_plus_sentinel = size + 1;
256 assert(size_plus_sentinel >= LQEntries);
257
258 if (size_plus_sentinel > LQEntries) {
259 while (size_plus_sentinel > loadQueue.size()) {
260 DynInstPtr dummy;
261 loadQueue.push_back(dummy);
262 LQEntries++;
263 }
264 } else {
265 LQEntries = size_plus_sentinel;
266 }
267
268}
269
270template<class Impl>
271void
272LSQUnit<Impl>::resizeSQ(unsigned size)
273{
274 unsigned size_plus_sentinel = size + 1;
275 if (size_plus_sentinel > SQEntries) {
276 while (size_plus_sentinel > storeQueue.size()) {
277 SQEntry dummy;
278 storeQueue.push_back(dummy);
279 SQEntries++;
280 }
281 } else {
282 SQEntries = size_plus_sentinel;
283 }
284}
285
286template <class Impl>
287void
288LSQUnit<Impl>::insert(DynInstPtr &inst)
289{
290 assert(inst->isMemRef());
291
292 assert(inst->isLoad() || inst->isStore());
293
294 if (inst->isLoad()) {
295 insertLoad(inst);
296 } else {
297 insertStore(inst);
298 }
299
300 inst->setInLSQ();
301}
302
303template <class Impl>
304void
305LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
306{
307 assert((loadTail + 1) % LQEntries != loadHead);
308 assert(loads < LQEntries);
309
310 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
311 load_inst->readPC(), loadTail, load_inst->seqNum);
312
313 load_inst->lqIdx = loadTail;
314
315 if (stores == 0) {
316 load_inst->sqIdx = -1;
317 } else {
318 load_inst->sqIdx = storeTail;
319 }
320
321 loadQueue[loadTail] = load_inst;
322
323 incrLdIdx(loadTail);
324
325 ++loads;
326}
327
328template <class Impl>
329void
330LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
331{
332 // Make sure it is not full before inserting an instruction.
333 assert((storeTail + 1) % SQEntries != storeHead);
334 assert(stores < SQEntries);
335
336 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
337 store_inst->readPC(), storeTail, store_inst->seqNum);
338
339 store_inst->sqIdx = storeTail;
340 store_inst->lqIdx = loadTail;
341
342 storeQueue[storeTail] = SQEntry(store_inst);
343
344 incrStIdx(storeTail);
345
346 ++stores;
347}
348
349template <class Impl>
350typename Impl::DynInstPtr
351LSQUnit<Impl>::getMemDepViolator()
352{
353 DynInstPtr temp = memDepViolator;
354
355 memDepViolator = NULL;
356
357 return temp;
358}
359
360template <class Impl>
361unsigned
362LSQUnit<Impl>::numFreeEntries()
363{
364 unsigned free_lq_entries = LQEntries - loads;
365 unsigned free_sq_entries = SQEntries - stores;
366
367 // Both the LQ and SQ entries have an extra dummy entry to differentiate
368 // empty/full conditions. Subtract 1 from the free entries.
369 if (free_lq_entries < free_sq_entries) {
370 return free_lq_entries - 1;
371 } else {
372 return free_sq_entries - 1;
373 }
374}
375
376template <class Impl>
377int
378LSQUnit<Impl>::numLoadsReady()
379{
380 int load_idx = loadHead;
381 int retval = 0;
382
383 while (load_idx != loadTail) {
384 assert(loadQueue[load_idx]);
385
386 if (loadQueue[load_idx]->readyToIssue()) {
387 ++retval;
388 }
389 }
390
391 return retval;
392}
393
394template <class Impl>
395Fault
396LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
397{
398 // Execute a specific load.
399 Fault load_fault = NoFault;
400
401 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
402 inst->readPC(),inst->seqNum);
403
404 load_fault = inst->initiateAcc();
405
406 // If the instruction faulted, then we need to send it along to commit
407 // without the instruction completing.
408 if (load_fault != NoFault) {
409 // Send this instruction to commit, also make sure iew stage
410 // realizes there is activity.
411 iewStage->instToCommit(inst);
412 iewStage->activityThisCycle();
413 }
414
415 return load_fault;
416}
417
418template <class Impl>
419Fault
420LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
421{
422 using namespace TheISA;
423 // Make sure that a store exists.
424 assert(stores != 0);
425
426 int store_idx = store_inst->sqIdx;
427
428 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
429 store_inst->readPC(), store_inst->seqNum);
430
431 // Check the recently completed loads to see if any match this store's
432 // address. If so, then we have a memory ordering violation.
433 int load_idx = store_inst->lqIdx;
434
435 Fault store_fault = store_inst->initiateAcc();
436
437 if (storeQueue[store_idx].size == 0) {
438 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
439 store_inst->readPC(),store_inst->seqNum);
440
441 return store_fault;
442 }
443
444 assert(store_fault == NoFault);
445
446 if (store_inst->isStoreConditional()) {
447 // Store conditionals need to set themselves as able to
448 // writeback if we haven't had a fault by here.
449 storeQueue[store_idx].canWB = true;
450
451 ++storesToWB;
452 }
453
454 if (!memDepViolator) {
455 while (load_idx != loadTail) {
456 // Really only need to check loads that have actually executed
457 // It's safe to check all loads because effAddr is set to
458 // InvalAddr when the dyn inst is created.
459
460 // @todo: For now this is extra conservative, detecting a
461 // violation if the addresses match assuming all accesses
462 // are quad word accesses.
463
464 // @todo: Fix this, magic number being used here
465 if ((loadQueue[load_idx]->effAddr >> 8) ==
466 (store_inst->effAddr >> 8)) {
467 // A load incorrectly passed this store. Squash and refetch.
468 // For now return a fault to show that it was unsuccessful.
469 memDepViolator = loadQueue[load_idx];
470
471 return genMachineCheckFault();
472 }
473
474 incrLdIdx(load_idx);
475 }
476
477 // If we've reached this point, there was no violation.
478 memDepViolator = NULL;
479 }
480
481 return store_fault;
482}
483
484template <class Impl>
485void
486LSQUnit<Impl>::commitLoad()
487{
488 assert(loadQueue[loadHead]);
489
490 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
491 loadQueue[loadHead]->readPC());
492
493 loadQueue[loadHead] = NULL;
494
495 incrLdIdx(loadHead);
496
497 --loads;
498}
499
500template <class Impl>
501void
502LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
503{
504 assert(loads == 0 || loadQueue[loadHead]);
505
506 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
507 commitLoad();
508 }
509}
510
511template <class Impl>
512void
513LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
514{
515 assert(stores == 0 || storeQueue[storeHead].inst);
516
517 int store_idx = storeHead;
518
519 while (store_idx != storeTail) {
520 assert(storeQueue[store_idx].inst);
521 // Mark any stores that are now committed and have not yet
522 // been marked as able to write back.
523 if (!storeQueue[store_idx].canWB) {
524 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
525 break;
526 }
527 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
528 "%#x [sn:%lli]\n",
529 storeQueue[store_idx].inst->readPC(),
530 storeQueue[store_idx].inst->seqNum);
531
532 storeQueue[store_idx].canWB = true;
533
534 ++storesToWB;
535 }
536
537 incrStIdx(store_idx);
538 }
539}
540
541template <class Impl>
542void
543LSQUnit<Impl>::writebackStores()
544{
545 while (storesToWB > 0 &&
546 storeWBIdx != storeTail &&
547 storeQueue[storeWBIdx].inst &&
548 storeQueue[storeWBIdx].canWB &&
549 usedPorts < cachePorts) {
550
591 if (isStoreBlocked) {
551 if (isStoreBlocked || lsq->cacheBlocked()) {
592 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
593 " is blocked!\n");
594 break;
595 }
596
597 // Store didn't write any data so no need to write it back to
598 // memory.
599 if (storeQueue[storeWBIdx].size == 0) {
600 completeStore(storeWBIdx);
601
602 incrStIdx(storeWBIdx);
603
604 continue;
605 }
606
607 ++usedPorts;
608
609 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
610 incrStIdx(storeWBIdx);
611
612 continue;
613 }
614
615 assert(storeQueue[storeWBIdx].req);
616 assert(!storeQueue[storeWBIdx].committed);
617
618 DynInstPtr inst = storeQueue[storeWBIdx].inst;
619
620 Request *req = storeQueue[storeWBIdx].req;
621 storeQueue[storeWBIdx].committed = true;
622
623 assert(!inst->memData);
624 inst->memData = new uint8_t[64];
625 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
626 req->getSize());
627
628 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
629 data_pkt->dataStatic(inst->memData);
630
631 LSQSenderState *state = new LSQSenderState;
632 state->isLoad = false;
633 state->idx = storeWBIdx;
634 state->inst = inst;
635 data_pkt->senderState = state;
636
637 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
638 "to Addr:%#x, data:%#x [sn:%lli]\n",
639 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
640 req->getPaddr(), *(inst->memData),
641 storeQueue[storeWBIdx].inst->seqNum);
642
643 // @todo: Remove this SC hack once the memory system handles it.
644 if (req->getFlags() & LOCKED) {
645 if (req->getFlags() & UNCACHEABLE) {
646 req->setScResult(2);
647 } else {
648 if (cpu->lockFlag) {
649 req->setScResult(1);
650 } else {
651 req->setScResult(0);
652 // Hack: Instantly complete this store.
653 completeDataAccess(data_pkt);
654 incrStIdx(storeWBIdx);
655 continue;
656 }
657 }
658 } else {
659 // Non-store conditionals do not need a writeback.
660 state->noWB = true;
661 }
662
663 if (!dcachePort->sendTiming(data_pkt)) {
664 // Need to handle becoming blocked on a store.
665 isStoreBlocked = true;
666 ++lsqCacheBlocked;
667 assert(retryPkt == NULL);
668 retryPkt = data_pkt;
669 } else {
670 storePostSend(data_pkt);
671 }
672 }
673
674 // Not sure this should set it to 0.
675 usedPorts = 0;
676
677 assert(stores >= 0 && storesToWB >= 0);
678}
679
680/*template <class Impl>
681void
682LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
683{
684 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
685 mshrSeqNums.end(),
686 seqNum);
687
688 if (mshr_it != mshrSeqNums.end()) {
689 mshrSeqNums.erase(mshr_it);
690 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
691 }
692}*/
693
694template <class Impl>
695void
696LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
697{
698 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
699 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
700
701 int load_idx = loadTail;
702 decrLdIdx(load_idx);
703
704 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
705 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
706 "[sn:%lli]\n",
707 loadQueue[load_idx]->readPC(),
708 loadQueue[load_idx]->seqNum);
709
710 if (isStalled() && load_idx == stallingLoadIdx) {
711 stalled = false;
712 stallingStoreIsn = 0;
713 stallingLoadIdx = 0;
714 }
715
716 // Clear the smart pointer to make sure it is decremented.
717 loadQueue[load_idx]->setSquashed();
718 loadQueue[load_idx] = NULL;
719 --loads;
720
721 // Inefficient!
722 loadTail = load_idx;
723
724 decrLdIdx(load_idx);
725 ++lsqSquashedLoads;
726 }
727
728 if (isLoadBlocked) {
729 if (squashed_num < blockedLoadSeqNum) {
730 isLoadBlocked = false;
731 loadBlockedHandled = false;
732 blockedLoadSeqNum = 0;
733 }
734 }
735
736 int store_idx = storeTail;
737 decrStIdx(store_idx);
738
739 while (stores != 0 &&
740 storeQueue[store_idx].inst->seqNum > squashed_num) {
741 // Instructions marked as can WB are already committed.
742 if (storeQueue[store_idx].canWB) {
743 break;
744 }
745
746 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
747 "idx:%i [sn:%lli]\n",
748 storeQueue[store_idx].inst->readPC(),
749 store_idx, storeQueue[store_idx].inst->seqNum);
750
751 // I don't think this can happen. It should have been cleared
752 // by the stalling load.
753 if (isStalled() &&
754 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
755 panic("Is stalled should have been cleared by stalling load!\n");
756 stalled = false;
757 stallingStoreIsn = 0;
758 }
759
760 // Clear the smart pointer to make sure it is decremented.
761 storeQueue[store_idx].inst->setSquashed();
762 storeQueue[store_idx].inst = NULL;
763 storeQueue[store_idx].canWB = 0;
764
765 storeQueue[store_idx].req = NULL;
766 --stores;
767
768 // Inefficient!
769 storeTail = store_idx;
770
771 decrStIdx(store_idx);
772 ++lsqSquashedStores;
773 }
774}
775
776template <class Impl>
777void
778LSQUnit<Impl>::storePostSend(Packet *pkt)
779{
780 if (isStalled() &&
781 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
782 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
783 "load idx:%i\n",
784 stallingStoreIsn, stallingLoadIdx);
785 stalled = false;
786 stallingStoreIsn = 0;
787 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
788 }
789
790 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
791 // The store is basically completed at this time. This
792 // only works so long as the checker doesn't try to
793 // verify the value in memory for stores.
794 storeQueue[storeWBIdx].inst->setCompleted();
795#if USE_CHECKER
796 if (cpu->checker) {
797 cpu->checker->verify(storeQueue[storeWBIdx].inst);
798 }
799#endif
800 }
801
802 if (pkt->result != Packet::Success) {
803 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
804 storeWBIdx);
805
806 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
807 storeQueue[storeWBIdx].inst->seqNum);
808
809 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
810
811 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
812
813 // @todo: Increment stat here.
814 } else {
815 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
816 storeWBIdx);
817
818 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
819 storeQueue[storeWBIdx].inst->seqNum);
820 }
821
822 incrStIdx(storeWBIdx);
823}
824
825template <class Impl>
826void
827LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
828{
829 iewStage->wakeCPU();
830
831 // Squashed instructions do not need to complete their access.
832 if (inst->isSquashed()) {
833 assert(!inst->isStore());
834 ++lsqIgnoredResponses;
835 return;
836 }
837
838 if (!inst->isExecuted()) {
839 inst->setExecuted();
840
841 // Complete access to copy data to proper place.
842 inst->completeAcc(pkt);
843 }
844
845 // Need to insert instruction into queue to commit
846 iewStage->instToCommit(inst);
847
848 iewStage->activityThisCycle();
849}
850
851template <class Impl>
852void
853LSQUnit<Impl>::completeStore(int store_idx)
854{
855 assert(storeQueue[store_idx].inst);
856 storeQueue[store_idx].completed = true;
857 --storesToWB;
858 // A bit conservative because a store completion may not free up entries,
859 // but hopefully avoids two store completions in one cycle from making
860 // the CPU tick twice.
861 cpu->activityThisCycle();
862
863 if (store_idx == storeHead) {
864 do {
865 incrStIdx(storeHead);
866
867 --stores;
868 } while (storeQueue[storeHead].completed &&
869 storeHead != storeTail);
870
871 iewStage->updateLSQNextCycle = true;
872 }
873
874 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
875 "idx:%i\n",
876 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
877
878 if (isStalled() &&
879 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
880 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
881 "load idx:%i\n",
882 stallingStoreIsn, stallingLoadIdx);
883 stalled = false;
884 stallingStoreIsn = 0;
885 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
886 }
887
888 storeQueue[store_idx].inst->setCompleted();
889
890 // Tell the checker we've completed this instruction. Some stores
891 // may get reported twice to the checker, but the checker can
892 // handle that case.
893#if USE_CHECKER
894 if (cpu->checker) {
895 cpu->checker->verify(storeQueue[store_idx].inst);
896 }
897#endif
898}
899
900template <class Impl>
901void
902LSQUnit<Impl>::recvRetry()
903{
904 if (isStoreBlocked) {
905 assert(retryPkt != NULL);
906
907 if (dcachePort->sendTiming(retryPkt)) {
908 storePostSend(retryPkt);
909 retryPkt = NULL;
910 isStoreBlocked = false;
911 } else {
912 // Still blocked!
913 ++lsqCacheBlocked;
552 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
553 " is blocked!\n");
554 break;
555 }
556
557 // Store didn't write any data so no need to write it back to
558 // memory.
559 if (storeQueue[storeWBIdx].size == 0) {
560 completeStore(storeWBIdx);
561
562 incrStIdx(storeWBIdx);
563
564 continue;
565 }
566
567 ++usedPorts;
568
569 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
570 incrStIdx(storeWBIdx);
571
572 continue;
573 }
574
575 assert(storeQueue[storeWBIdx].req);
576 assert(!storeQueue[storeWBIdx].committed);
577
578 DynInstPtr inst = storeQueue[storeWBIdx].inst;
579
580 Request *req = storeQueue[storeWBIdx].req;
581 storeQueue[storeWBIdx].committed = true;
582
583 assert(!inst->memData);
584 inst->memData = new uint8_t[64];
585 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
586 req->getSize());
587
588 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
589 data_pkt->dataStatic(inst->memData);
590
591 LSQSenderState *state = new LSQSenderState;
592 state->isLoad = false;
593 state->idx = storeWBIdx;
594 state->inst = inst;
595 data_pkt->senderState = state;
596
597 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
598 "to Addr:%#x, data:%#x [sn:%lli]\n",
599 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
600 req->getPaddr(), *(inst->memData),
601 storeQueue[storeWBIdx].inst->seqNum);
602
603 // @todo: Remove this SC hack once the memory system handles it.
604 if (req->getFlags() & LOCKED) {
605 if (req->getFlags() & UNCACHEABLE) {
606 req->setScResult(2);
607 } else {
608 if (cpu->lockFlag) {
609 req->setScResult(1);
610 } else {
611 req->setScResult(0);
612 // Hack: Instantly complete this store.
613 completeDataAccess(data_pkt);
614 incrStIdx(storeWBIdx);
615 continue;
616 }
617 }
618 } else {
619 // Non-store conditionals do not need a writeback.
620 state->noWB = true;
621 }
622
623 if (!dcachePort->sendTiming(data_pkt)) {
624 // Need to handle becoming blocked on a store.
625 isStoreBlocked = true;
626 ++lsqCacheBlocked;
627 assert(retryPkt == NULL);
628 retryPkt = data_pkt;
629 } else {
630 storePostSend(data_pkt);
631 }
632 }
633
634 // Not sure this should set it to 0.
635 usedPorts = 0;
636
637 assert(stores >= 0 && storesToWB >= 0);
638}
639
640/*template <class Impl>
641void
642LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
643{
644 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
645 mshrSeqNums.end(),
646 seqNum);
647
648 if (mshr_it != mshrSeqNums.end()) {
649 mshrSeqNums.erase(mshr_it);
650 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
651 }
652}*/
653
654template <class Impl>
655void
656LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
657{
658 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
659 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
660
661 int load_idx = loadTail;
662 decrLdIdx(load_idx);
663
664 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
665 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
666 "[sn:%lli]\n",
667 loadQueue[load_idx]->readPC(),
668 loadQueue[load_idx]->seqNum);
669
670 if (isStalled() && load_idx == stallingLoadIdx) {
671 stalled = false;
672 stallingStoreIsn = 0;
673 stallingLoadIdx = 0;
674 }
675
676 // Clear the smart pointer to make sure it is decremented.
677 loadQueue[load_idx]->setSquashed();
678 loadQueue[load_idx] = NULL;
679 --loads;
680
681 // Inefficient!
682 loadTail = load_idx;
683
684 decrLdIdx(load_idx);
685 ++lsqSquashedLoads;
686 }
687
688 if (isLoadBlocked) {
689 if (squashed_num < blockedLoadSeqNum) {
690 isLoadBlocked = false;
691 loadBlockedHandled = false;
692 blockedLoadSeqNum = 0;
693 }
694 }
695
696 int store_idx = storeTail;
697 decrStIdx(store_idx);
698
699 while (stores != 0 &&
700 storeQueue[store_idx].inst->seqNum > squashed_num) {
701 // Instructions marked as can WB are already committed.
702 if (storeQueue[store_idx].canWB) {
703 break;
704 }
705
706 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
707 "idx:%i [sn:%lli]\n",
708 storeQueue[store_idx].inst->readPC(),
709 store_idx, storeQueue[store_idx].inst->seqNum);
710
711 // I don't think this can happen. It should have been cleared
712 // by the stalling load.
713 if (isStalled() &&
714 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
715 panic("Is stalled should have been cleared by stalling load!\n");
716 stalled = false;
717 stallingStoreIsn = 0;
718 }
719
720 // Clear the smart pointer to make sure it is decremented.
721 storeQueue[store_idx].inst->setSquashed();
722 storeQueue[store_idx].inst = NULL;
723 storeQueue[store_idx].canWB = 0;
724
725 storeQueue[store_idx].req = NULL;
726 --stores;
727
728 // Inefficient!
729 storeTail = store_idx;
730
731 decrStIdx(store_idx);
732 ++lsqSquashedStores;
733 }
734}
735
736template <class Impl>
737void
738LSQUnit<Impl>::storePostSend(Packet *pkt)
739{
740 if (isStalled() &&
741 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
742 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
743 "load idx:%i\n",
744 stallingStoreIsn, stallingLoadIdx);
745 stalled = false;
746 stallingStoreIsn = 0;
747 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
748 }
749
750 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
751 // The store is basically completed at this time. This
752 // only works so long as the checker doesn't try to
753 // verify the value in memory for stores.
754 storeQueue[storeWBIdx].inst->setCompleted();
755#if USE_CHECKER
756 if (cpu->checker) {
757 cpu->checker->verify(storeQueue[storeWBIdx].inst);
758 }
759#endif
760 }
761
762 if (pkt->result != Packet::Success) {
763 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
764 storeWBIdx);
765
766 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
767 storeQueue[storeWBIdx].inst->seqNum);
768
769 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
770
771 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
772
773 // @todo: Increment stat here.
774 } else {
775 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
776 storeWBIdx);
777
778 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
779 storeQueue[storeWBIdx].inst->seqNum);
780 }
781
782 incrStIdx(storeWBIdx);
783}
784
785template <class Impl>
786void
787LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
788{
789 iewStage->wakeCPU();
790
791 // Squashed instructions do not need to complete their access.
792 if (inst->isSquashed()) {
793 assert(!inst->isStore());
794 ++lsqIgnoredResponses;
795 return;
796 }
797
798 if (!inst->isExecuted()) {
799 inst->setExecuted();
800
801 // Complete access to copy data to proper place.
802 inst->completeAcc(pkt);
803 }
804
805 // Need to insert instruction into queue to commit
806 iewStage->instToCommit(inst);
807
808 iewStage->activityThisCycle();
809}
810
811template <class Impl>
812void
813LSQUnit<Impl>::completeStore(int store_idx)
814{
815 assert(storeQueue[store_idx].inst);
816 storeQueue[store_idx].completed = true;
817 --storesToWB;
818 // A bit conservative because a store completion may not free up entries,
819 // but hopefully avoids two store completions in one cycle from making
820 // the CPU tick twice.
821 cpu->activityThisCycle();
822
823 if (store_idx == storeHead) {
824 do {
825 incrStIdx(storeHead);
826
827 --stores;
828 } while (storeQueue[storeHead].completed &&
829 storeHead != storeTail);
830
831 iewStage->updateLSQNextCycle = true;
832 }
833
834 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
835 "idx:%i\n",
836 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
837
838 if (isStalled() &&
839 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
840 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
841 "load idx:%i\n",
842 stallingStoreIsn, stallingLoadIdx);
843 stalled = false;
844 stallingStoreIsn = 0;
845 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
846 }
847
848 storeQueue[store_idx].inst->setCompleted();
849
850 // Tell the checker we've completed this instruction. Some stores
851 // may get reported twice to the checker, but the checker can
852 // handle that case.
853#if USE_CHECKER
854 if (cpu->checker) {
855 cpu->checker->verify(storeQueue[store_idx].inst);
856 }
857#endif
858}
859
860template <class Impl>
861void
862LSQUnit<Impl>::recvRetry()
863{
864 if (isStoreBlocked) {
865 assert(retryPkt != NULL);
866
867 if (dcachePort->sendTiming(retryPkt)) {
868 storePostSend(retryPkt);
869 retryPkt = NULL;
870 isStoreBlocked = false;
871 } else {
872 // Still blocked!
873 ++lsqCacheBlocked;
874 lsq->setRetryTid(lsqID);
914 }
915 } else if (isLoadBlocked) {
916 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
917 "no need to resend packet.\n");
918 } else {
919 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
920 }
921}
922
923template <class Impl>
924inline void
925LSQUnit<Impl>::incrStIdx(int &store_idx)
926{
927 if (++store_idx >= SQEntries)
928 store_idx = 0;
929}
930
931template <class Impl>
932inline void
933LSQUnit<Impl>::decrStIdx(int &store_idx)
934{
935 if (--store_idx < 0)
936 store_idx += SQEntries;
937}
938
939template <class Impl>
940inline void
941LSQUnit<Impl>::incrLdIdx(int &load_idx)
942{
943 if (++load_idx >= LQEntries)
944 load_idx = 0;
945}
946
947template <class Impl>
948inline void
949LSQUnit<Impl>::decrLdIdx(int &load_idx)
950{
951 if (--load_idx < 0)
952 load_idx += LQEntries;
953}
954
955template <class Impl>
956void
957LSQUnit<Impl>::dumpInsts()
958{
959 cprintf("Load store queue: Dumping instructions.\n");
960 cprintf("Load queue size: %i\n", loads);
961 cprintf("Load queue: ");
962
963 int load_idx = loadHead;
964
965 while (load_idx != loadTail && loadQueue[load_idx]) {
966 cprintf("%#x ", loadQueue[load_idx]->readPC());
967
968 incrLdIdx(load_idx);
969 }
970
971 cprintf("Store queue size: %i\n", stores);
972 cprintf("Store queue: ");
973
974 int store_idx = storeHead;
975
976 while (store_idx != storeTail && storeQueue[store_idx].inst) {
977 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
978
979 incrStIdx(store_idx);
980 }
981
982 cprintf("\n");
983}
875 }
876 } else if (isLoadBlocked) {
877 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
878 "no need to resend packet.\n");
879 } else {
880 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
881 }
882}
883
884template <class Impl>
885inline void
886LSQUnit<Impl>::incrStIdx(int &store_idx)
887{
888 if (++store_idx >= SQEntries)
889 store_idx = 0;
890}
891
892template <class Impl>
893inline void
894LSQUnit<Impl>::decrStIdx(int &store_idx)
895{
896 if (--store_idx < 0)
897 store_idx += SQEntries;
898}
899
900template <class Impl>
901inline void
902LSQUnit<Impl>::incrLdIdx(int &load_idx)
903{
904 if (++load_idx >= LQEntries)
905 load_idx = 0;
906}
907
908template <class Impl>
909inline void
910LSQUnit<Impl>::decrLdIdx(int &load_idx)
911{
912 if (--load_idx < 0)
913 load_idx += LQEntries;
914}
915
916template <class Impl>
917void
918LSQUnit<Impl>::dumpInsts()
919{
920 cprintf("Load store queue: Dumping instructions.\n");
921 cprintf("Load queue size: %i\n", loads);
922 cprintf("Load queue: ");
923
924 int load_idx = loadHead;
925
926 while (load_idx != loadTail && loadQueue[load_idx]) {
927 cprintf("%#x ", loadQueue[load_idx]->readPC());
928
929 incrLdIdx(load_idx);
930 }
931
932 cprintf("Store queue size: %i\n", stores);
933 cprintf("Store queue: ");
934
935 int store_idx = storeHead;
936
937 while (store_idx != storeTail && storeQueue[store_idx].inst) {
938 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
939
940 incrStIdx(store_idx);
941 }
942
943 cprintf("\n");
944}