lsq_unit_impl.hh (7600:eff7f79f7dfd) lsq_unit_impl.hh (7616:1a0ab2308bbe)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include "arch/locked_mem.hh"
45#include "config/the_isa.hh"
46#include "config/use_checker.hh"
47#include "cpu/o3/lsq.hh"
48#include "cpu/o3/lsq_unit.hh"
49#include "base/str.hh"
50#include "mem/packet.hh"
51#include "mem/request.hh"
52
53#if USE_CHECKER
54#include "cpu/checker/cpu.hh"
55#endif
56
57template<class Impl>
58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
59 LSQUnit *lsq_ptr)
60 : inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
61{
62 this->setFlags(Event::AutoDelete);
63}
64
65template<class Impl>
66void
67LSQUnit<Impl>::WritebackEvent::process()
68{
69 if (!lsqPtr->isSwitchedOut()) {
70 lsqPtr->writeback(inst, pkt);
71 }
72
73 if (pkt->senderState)
74 delete pkt->senderState;
75
76 delete pkt->req;
77 delete pkt;
78}
79
80template<class Impl>
81const char *
82LSQUnit<Impl>::WritebackEvent::description() const
83{
84 return "Store writeback";
85}
86
87template<class Impl>
88void
89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
90{
91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
92 DynInstPtr inst = state->inst;
93 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
95
96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
97
98 assert(!pkt->wasNacked());
99
100 // If this is a split access, wait until all packets are received.
101 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
102 delete pkt->req;
103 delete pkt;
104 return;
105 }
106
107 if (isSwitchedOut() || inst->isSquashed()) {
108 iewStage->decrWb(inst->seqNum);
109 } else {
110 if (!state->noWB) {
111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
112 !state->isLoad) {
113 writeback(inst, pkt);
114 } else {
115 writeback(inst, state->mainPkt);
116 }
117 }
118
119 if (inst->isStore()) {
120 completeStore(state->idx);
121 }
122 }
123
124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
125 delete state->mainPkt->req;
126 delete state->mainPkt;
127 }
128 delete state;
129 delete pkt->req;
130 delete pkt;
131}
132
133template <class Impl>
134LSQUnit<Impl>::LSQUnit()
135 : loads(0), stores(0), storesToWB(0), stalled(false),
136 isStoreBlocked(false), isLoadBlocked(false),
137 loadBlockedHandled(false), hasPendingPkt(false)
138{
139}
140
141template<class Impl>
142void
143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
145 unsigned id)
146{
147 cpu = cpu_ptr;
148 iewStage = iew_ptr;
149
150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
151
152 switchedOut = false;
153
154 lsq = lsq_ptr;
155
156 lsqID = id;
157
158 // Add 1 for the sentinel entry (they are circular queues).
159 LQEntries = maxLQEntries + 1;
160 SQEntries = maxSQEntries + 1;
161
162 loadQueue.resize(LQEntries);
163 storeQueue.resize(SQEntries);
164
165 loadHead = loadTail = 0;
166
167 storeHead = storeWBIdx = storeTail = 0;
168
169 usedPorts = 0;
170 cachePorts = params->cachePorts;
171
172 retryPkt = NULL;
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179std::string
180LSQUnit<Impl>::name() const
181{
182 if (Impl::MaxThreads == 1) {
183 return iewStage->name() + ".lsq";
184 } else {
185 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
186 }
187}
188
189template<class Impl>
190void
191LSQUnit<Impl>::regStats()
192{
193 lsqForwLoads
194 .name(name() + ".forwLoads")
195 .desc("Number of loads that had data forwarded from stores");
196
197 invAddrLoads
198 .name(name() + ".invAddrLoads")
199 .desc("Number of loads ignored due to an invalid address");
200
201 lsqSquashedLoads
202 .name(name() + ".squashedLoads")
203 .desc("Number of loads squashed");
204
205 lsqIgnoredResponses
206 .name(name() + ".ignoredResponses")
207 .desc("Number of memory responses ignored because the instruction is squashed");
208
209 lsqMemOrderViolation
210 .name(name() + ".memOrderViolation")
211 .desc("Number of memory ordering violations");
212
213 lsqSquashedStores
214 .name(name() + ".squashedStores")
215 .desc("Number of stores squashed");
216
217 invAddrSwpfs
218 .name(name() + ".invAddrSwpfs")
219 .desc("Number of software prefetches ignored due to an invalid address");
220
221 lsqBlockedLoads
222 .name(name() + ".blockedLoads")
223 .desc("Number of blocked loads due to partial load-store forwarding");
224
225 lsqRescheduledLoads
226 .name(name() + ".rescheduledLoads")
227 .desc("Number of loads that were rescheduled");
228
229 lsqCacheBlocked
230 .name(name() + ".cacheBlocked")
231 .desc("Number of times an access to memory failed due to the cache being blocked");
232}
233
234template<class Impl>
235void
236LSQUnit<Impl>::setDcachePort(Port *dcache_port)
237{
238 dcachePort = dcache_port;
239
240#if USE_CHECKER
241 if (cpu->checker) {
242 cpu->checker->setDcachePort(dcachePort);
243 }
244#endif
245}
246
247template<class Impl>
248void
249LSQUnit<Impl>::clearLQ()
250{
251 loadQueue.clear();
252}
253
254template<class Impl>
255void
256LSQUnit<Impl>::clearSQ()
257{
258 storeQueue.clear();
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::switchOut()
264{
265 switchedOut = true;
266 for (int i = 0; i < loadQueue.size(); ++i) {
267 assert(!loadQueue[i]);
268 loadQueue[i] = NULL;
269 }
270
271 assert(storesToWB == 0);
272}
273
274template<class Impl>
275void
276LSQUnit<Impl>::takeOverFrom()
277{
278 switchedOut = false;
279 loads = stores = storesToWB = 0;
280
281 loadHead = loadTail = 0;
282
283 storeHead = storeWBIdx = storeTail = 0;
284
285 usedPorts = 0;
286
287 memDepViolator = NULL;
288
289 blockedLoadSeqNum = 0;
290
291 stalled = false;
292 isLoadBlocked = false;
293 loadBlockedHandled = false;
294}
295
296template<class Impl>
297void
298LSQUnit<Impl>::resizeLQ(unsigned size)
299{
300 unsigned size_plus_sentinel = size + 1;
301 assert(size_plus_sentinel >= LQEntries);
302
303 if (size_plus_sentinel > LQEntries) {
304 while (size_plus_sentinel > loadQueue.size()) {
305 DynInstPtr dummy;
306 loadQueue.push_back(dummy);
307 LQEntries++;
308 }
309 } else {
310 LQEntries = size_plus_sentinel;
311 }
312
313}
314
315template<class Impl>
316void
317LSQUnit<Impl>::resizeSQ(unsigned size)
318{
319 unsigned size_plus_sentinel = size + 1;
320 if (size_plus_sentinel > SQEntries) {
321 while (size_plus_sentinel > storeQueue.size()) {
322 SQEntry dummy;
323 storeQueue.push_back(dummy);
324 SQEntries++;
325 }
326 } else {
327 SQEntries = size_plus_sentinel;
328 }
329}
330
331template <class Impl>
332void
333LSQUnit<Impl>::insert(DynInstPtr &inst)
334{
335 assert(inst->isMemRef());
336
337 assert(inst->isLoad() || inst->isStore());
338
339 if (inst->isLoad()) {
340 insertLoad(inst);
341 } else {
342 insertStore(inst);
343 }
344
345 inst->setInLSQ();
346}
347
348template <class Impl>
349void
350LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
351{
352 assert((loadTail + 1) % LQEntries != loadHead);
353 assert(loads < LQEntries);
354
355 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
356 load_inst->readPC(), loadTail, load_inst->seqNum);
357
358 load_inst->lqIdx = loadTail;
359
360 if (stores == 0) {
361 load_inst->sqIdx = -1;
362 } else {
363 load_inst->sqIdx = storeTail;
364 }
365
366 loadQueue[loadTail] = load_inst;
367
368 incrLdIdx(loadTail);
369
370 ++loads;
371}
372
373template <class Impl>
374void
375LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
376{
377 // Make sure it is not full before inserting an instruction.
378 assert((storeTail + 1) % SQEntries != storeHead);
379 assert(stores < SQEntries);
380
381 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
382 store_inst->readPC(), storeTail, store_inst->seqNum);
383
384 store_inst->sqIdx = storeTail;
385 store_inst->lqIdx = loadTail;
386
387 storeQueue[storeTail] = SQEntry(store_inst);
388
389 incrStIdx(storeTail);
390
391 ++stores;
392}
393
394template <class Impl>
395typename Impl::DynInstPtr
396LSQUnit<Impl>::getMemDepViolator()
397{
398 DynInstPtr temp = memDepViolator;
399
400 memDepViolator = NULL;
401
402 return temp;
403}
404
405template <class Impl>
406unsigned
407LSQUnit<Impl>::numFreeEntries()
408{
409 unsigned free_lq_entries = LQEntries - loads;
410 unsigned free_sq_entries = SQEntries - stores;
411
412 // Both the LQ and SQ entries have an extra dummy entry to differentiate
413 // empty/full conditions. Subtract 1 from the free entries.
414 if (free_lq_entries < free_sq_entries) {
415 return free_lq_entries - 1;
416 } else {
417 return free_sq_entries - 1;
418 }
419}
420
421template <class Impl>
422int
423LSQUnit<Impl>::numLoadsReady()
424{
425 int load_idx = loadHead;
426 int retval = 0;
427
428 while (load_idx != loadTail) {
429 assert(loadQueue[load_idx]);
430
431 if (loadQueue[load_idx]->readyToIssue()) {
432 ++retval;
433 }
434 }
435
436 return retval;
437}
438
439template <class Impl>
440Fault
441LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
442{
443 using namespace TheISA;
444 // Execute a specific load.
445 Fault load_fault = NoFault;
446
447 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
448 inst->readPC(),inst->seqNum);
449
450 assert(!inst->isSquashed());
451
452 load_fault = inst->initiateAcc();
453
454 // If the instruction faulted or predicated false, then we need to send it
455 // along to commit without the instruction completing.
456 if (load_fault != NoFault || inst->readPredicate() == false) {
457 // Send this instruction to commit, also make sure iew stage
458 // realizes there is activity.
459 // Mark it as executed unless it is an uncached load that
460 // needs to hit the head of commit.
461 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
462 inst->seqNum,
463 (load_fault != NoFault ? "fault" : "predication"));
464 if (!(inst->hasRequest() && inst->uncacheable()) ||
465 inst->isAtCommit()) {
466 inst->setExecuted();
467 }
468 iewStage->instToCommit(inst);
469 iewStage->activityThisCycle();
470 } else if (!loadBlocked()) {
471 assert(inst->effAddrValid);
472 int load_idx = inst->lqIdx;
473 incrLdIdx(load_idx);
474 while (load_idx != loadTail) {
475 // Really only need to check loads that have actually executed
476
477 // @todo: For now this is extra conservative, detecting a
478 // violation if the addresses match assuming all accesses
479 // are quad word accesses.
480
481 // @todo: Fix this, magic number being used here
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Kevin Lim
41 * Korey Sewell
42 */
43
44#include "arch/locked_mem.hh"
45#include "config/the_isa.hh"
46#include "config/use_checker.hh"
47#include "cpu/o3/lsq.hh"
48#include "cpu/o3/lsq_unit.hh"
49#include "base/str.hh"
50#include "mem/packet.hh"
51#include "mem/request.hh"
52
53#if USE_CHECKER
54#include "cpu/checker/cpu.hh"
55#endif
56
57template<class Impl>
58LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
59 LSQUnit *lsq_ptr)
60 : inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
61{
62 this->setFlags(Event::AutoDelete);
63}
64
65template<class Impl>
66void
67LSQUnit<Impl>::WritebackEvent::process()
68{
69 if (!lsqPtr->isSwitchedOut()) {
70 lsqPtr->writeback(inst, pkt);
71 }
72
73 if (pkt->senderState)
74 delete pkt->senderState;
75
76 delete pkt->req;
77 delete pkt;
78}
79
80template<class Impl>
81const char *
82LSQUnit<Impl>::WritebackEvent::description() const
83{
84 return "Store writeback";
85}
86
87template<class Impl>
88void
89LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
90{
91 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
92 DynInstPtr inst = state->inst;
93 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
94 DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
95
96 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
97
98 assert(!pkt->wasNacked());
99
100 // If this is a split access, wait until all packets are received.
101 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
102 delete pkt->req;
103 delete pkt;
104 return;
105 }
106
107 if (isSwitchedOut() || inst->isSquashed()) {
108 iewStage->decrWb(inst->seqNum);
109 } else {
110 if (!state->noWB) {
111 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
112 !state->isLoad) {
113 writeback(inst, pkt);
114 } else {
115 writeback(inst, state->mainPkt);
116 }
117 }
118
119 if (inst->isStore()) {
120 completeStore(state->idx);
121 }
122 }
123
124 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
125 delete state->mainPkt->req;
126 delete state->mainPkt;
127 }
128 delete state;
129 delete pkt->req;
130 delete pkt;
131}
132
133template <class Impl>
134LSQUnit<Impl>::LSQUnit()
135 : loads(0), stores(0), storesToWB(0), stalled(false),
136 isStoreBlocked(false), isLoadBlocked(false),
137 loadBlockedHandled(false), hasPendingPkt(false)
138{
139}
140
141template<class Impl>
142void
143LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
144 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
145 unsigned id)
146{
147 cpu = cpu_ptr;
148 iewStage = iew_ptr;
149
150 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
151
152 switchedOut = false;
153
154 lsq = lsq_ptr;
155
156 lsqID = id;
157
158 // Add 1 for the sentinel entry (they are circular queues).
159 LQEntries = maxLQEntries + 1;
160 SQEntries = maxSQEntries + 1;
161
162 loadQueue.resize(LQEntries);
163 storeQueue.resize(SQEntries);
164
165 loadHead = loadTail = 0;
166
167 storeHead = storeWBIdx = storeTail = 0;
168
169 usedPorts = 0;
170 cachePorts = params->cachePorts;
171
172 retryPkt = NULL;
173 memDepViolator = NULL;
174
175 blockedLoadSeqNum = 0;
176}
177
178template<class Impl>
179std::string
180LSQUnit<Impl>::name() const
181{
182 if (Impl::MaxThreads == 1) {
183 return iewStage->name() + ".lsq";
184 } else {
185 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
186 }
187}
188
189template<class Impl>
190void
191LSQUnit<Impl>::regStats()
192{
193 lsqForwLoads
194 .name(name() + ".forwLoads")
195 .desc("Number of loads that had data forwarded from stores");
196
197 invAddrLoads
198 .name(name() + ".invAddrLoads")
199 .desc("Number of loads ignored due to an invalid address");
200
201 lsqSquashedLoads
202 .name(name() + ".squashedLoads")
203 .desc("Number of loads squashed");
204
205 lsqIgnoredResponses
206 .name(name() + ".ignoredResponses")
207 .desc("Number of memory responses ignored because the instruction is squashed");
208
209 lsqMemOrderViolation
210 .name(name() + ".memOrderViolation")
211 .desc("Number of memory ordering violations");
212
213 lsqSquashedStores
214 .name(name() + ".squashedStores")
215 .desc("Number of stores squashed");
216
217 invAddrSwpfs
218 .name(name() + ".invAddrSwpfs")
219 .desc("Number of software prefetches ignored due to an invalid address");
220
221 lsqBlockedLoads
222 .name(name() + ".blockedLoads")
223 .desc("Number of blocked loads due to partial load-store forwarding");
224
225 lsqRescheduledLoads
226 .name(name() + ".rescheduledLoads")
227 .desc("Number of loads that were rescheduled");
228
229 lsqCacheBlocked
230 .name(name() + ".cacheBlocked")
231 .desc("Number of times an access to memory failed due to the cache being blocked");
232}
233
234template<class Impl>
235void
236LSQUnit<Impl>::setDcachePort(Port *dcache_port)
237{
238 dcachePort = dcache_port;
239
240#if USE_CHECKER
241 if (cpu->checker) {
242 cpu->checker->setDcachePort(dcachePort);
243 }
244#endif
245}
246
247template<class Impl>
248void
249LSQUnit<Impl>::clearLQ()
250{
251 loadQueue.clear();
252}
253
254template<class Impl>
255void
256LSQUnit<Impl>::clearSQ()
257{
258 storeQueue.clear();
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::switchOut()
264{
265 switchedOut = true;
266 for (int i = 0; i < loadQueue.size(); ++i) {
267 assert(!loadQueue[i]);
268 loadQueue[i] = NULL;
269 }
270
271 assert(storesToWB == 0);
272}
273
274template<class Impl>
275void
276LSQUnit<Impl>::takeOverFrom()
277{
278 switchedOut = false;
279 loads = stores = storesToWB = 0;
280
281 loadHead = loadTail = 0;
282
283 storeHead = storeWBIdx = storeTail = 0;
284
285 usedPorts = 0;
286
287 memDepViolator = NULL;
288
289 blockedLoadSeqNum = 0;
290
291 stalled = false;
292 isLoadBlocked = false;
293 loadBlockedHandled = false;
294}
295
296template<class Impl>
297void
298LSQUnit<Impl>::resizeLQ(unsigned size)
299{
300 unsigned size_plus_sentinel = size + 1;
301 assert(size_plus_sentinel >= LQEntries);
302
303 if (size_plus_sentinel > LQEntries) {
304 while (size_plus_sentinel > loadQueue.size()) {
305 DynInstPtr dummy;
306 loadQueue.push_back(dummy);
307 LQEntries++;
308 }
309 } else {
310 LQEntries = size_plus_sentinel;
311 }
312
313}
314
315template<class Impl>
316void
317LSQUnit<Impl>::resizeSQ(unsigned size)
318{
319 unsigned size_plus_sentinel = size + 1;
320 if (size_plus_sentinel > SQEntries) {
321 while (size_plus_sentinel > storeQueue.size()) {
322 SQEntry dummy;
323 storeQueue.push_back(dummy);
324 SQEntries++;
325 }
326 } else {
327 SQEntries = size_plus_sentinel;
328 }
329}
330
331template <class Impl>
332void
333LSQUnit<Impl>::insert(DynInstPtr &inst)
334{
335 assert(inst->isMemRef());
336
337 assert(inst->isLoad() || inst->isStore());
338
339 if (inst->isLoad()) {
340 insertLoad(inst);
341 } else {
342 insertStore(inst);
343 }
344
345 inst->setInLSQ();
346}
347
348template <class Impl>
349void
350LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
351{
352 assert((loadTail + 1) % LQEntries != loadHead);
353 assert(loads < LQEntries);
354
355 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
356 load_inst->readPC(), loadTail, load_inst->seqNum);
357
358 load_inst->lqIdx = loadTail;
359
360 if (stores == 0) {
361 load_inst->sqIdx = -1;
362 } else {
363 load_inst->sqIdx = storeTail;
364 }
365
366 loadQueue[loadTail] = load_inst;
367
368 incrLdIdx(loadTail);
369
370 ++loads;
371}
372
373template <class Impl>
374void
375LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
376{
377 // Make sure it is not full before inserting an instruction.
378 assert((storeTail + 1) % SQEntries != storeHead);
379 assert(stores < SQEntries);
380
381 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
382 store_inst->readPC(), storeTail, store_inst->seqNum);
383
384 store_inst->sqIdx = storeTail;
385 store_inst->lqIdx = loadTail;
386
387 storeQueue[storeTail] = SQEntry(store_inst);
388
389 incrStIdx(storeTail);
390
391 ++stores;
392}
393
394template <class Impl>
395typename Impl::DynInstPtr
396LSQUnit<Impl>::getMemDepViolator()
397{
398 DynInstPtr temp = memDepViolator;
399
400 memDepViolator = NULL;
401
402 return temp;
403}
404
405template <class Impl>
406unsigned
407LSQUnit<Impl>::numFreeEntries()
408{
409 unsigned free_lq_entries = LQEntries - loads;
410 unsigned free_sq_entries = SQEntries - stores;
411
412 // Both the LQ and SQ entries have an extra dummy entry to differentiate
413 // empty/full conditions. Subtract 1 from the free entries.
414 if (free_lq_entries < free_sq_entries) {
415 return free_lq_entries - 1;
416 } else {
417 return free_sq_entries - 1;
418 }
419}
420
421template <class Impl>
422int
423LSQUnit<Impl>::numLoadsReady()
424{
425 int load_idx = loadHead;
426 int retval = 0;
427
428 while (load_idx != loadTail) {
429 assert(loadQueue[load_idx]);
430
431 if (loadQueue[load_idx]->readyToIssue()) {
432 ++retval;
433 }
434 }
435
436 return retval;
437}
438
439template <class Impl>
440Fault
441LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
442{
443 using namespace TheISA;
444 // Execute a specific load.
445 Fault load_fault = NoFault;
446
447 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
448 inst->readPC(),inst->seqNum);
449
450 assert(!inst->isSquashed());
451
452 load_fault = inst->initiateAcc();
453
454 // If the instruction faulted or predicated false, then we need to send it
455 // along to commit without the instruction completing.
456 if (load_fault != NoFault || inst->readPredicate() == false) {
457 // Send this instruction to commit, also make sure iew stage
458 // realizes there is activity.
459 // Mark it as executed unless it is an uncached load that
460 // needs to hit the head of commit.
461 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
462 inst->seqNum,
463 (load_fault != NoFault ? "fault" : "predication"));
464 if (!(inst->hasRequest() && inst->uncacheable()) ||
465 inst->isAtCommit()) {
466 inst->setExecuted();
467 }
468 iewStage->instToCommit(inst);
469 iewStage->activityThisCycle();
470 } else if (!loadBlocked()) {
471 assert(inst->effAddrValid);
472 int load_idx = inst->lqIdx;
473 incrLdIdx(load_idx);
474 while (load_idx != loadTail) {
475 // Really only need to check loads that have actually executed
476
477 // @todo: For now this is extra conservative, detecting a
478 // violation if the addresses match assuming all accesses
479 // are quad word accesses.
480
481 // @todo: Fix this, magic number being used here
482
483 // @todo: Uncachable load is not executed until it reaches
484 // the head of the ROB. Once this if checks only the executed
485 // loads(as noted above), this check can be removed
482 if (loadQueue[load_idx]->effAddrValid &&
486 if (loadQueue[load_idx]->effAddrValid &&
483 (loadQueue[load_idx]->effAddr >> 8) ==
484 (inst->effAddr >> 8)) {
487 ((loadQueue[load_idx]->effAddr >> 8)
488 == (inst->effAddr >> 8)) &&
489 !loadQueue[load_idx]->uncacheable()) {
485 // A load incorrectly passed this load. Squash and refetch.
486 // For now return a fault to show that it was unsuccessful.
487 DynInstPtr violator = loadQueue[load_idx];
488 if (!memDepViolator ||
489 (violator->seqNum < memDepViolator->seqNum)) {
490 memDepViolator = violator;
491 } else {
492 break;
493 }
494
495 ++lsqMemOrderViolation;
496
497 return genMachineCheckFault();
498 }
499
500 incrLdIdx(load_idx);
501 }
502 }
503
504 return load_fault;
505}
506
507template <class Impl>
508Fault
509LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
510{
511 using namespace TheISA;
512 // Make sure that a store exists.
513 assert(stores != 0);
514
515 int store_idx = store_inst->sqIdx;
516
517 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
518 store_inst->readPC(), store_inst->seqNum);
519
520 assert(!store_inst->isSquashed());
521
522 // Check the recently completed loads to see if any match this store's
523 // address. If so, then we have a memory ordering violation.
524 int load_idx = store_inst->lqIdx;
525
526 Fault store_fault = store_inst->initiateAcc();
527
528 if (storeQueue[store_idx].size == 0) {
529 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
530 store_inst->readPC(),store_inst->seqNum);
531
532 return store_fault;
533 }
534
535 assert(store_fault == NoFault);
536
537 if (store_inst->isStoreConditional()) {
538 // Store conditionals need to set themselves as able to
539 // writeback if we haven't had a fault by here.
540 storeQueue[store_idx].canWB = true;
541
542 ++storesToWB;
543 }
544
545 assert(store_inst->effAddrValid);
546 while (load_idx != loadTail) {
547 // Really only need to check loads that have actually executed
548 // It's safe to check all loads because effAddr is set to
549 // InvalAddr when the dyn inst is created.
550
551 // @todo: For now this is extra conservative, detecting a
552 // violation if the addresses match assuming all accesses
553 // are quad word accesses.
554
555 // @todo: Fix this, magic number being used here
490 // A load incorrectly passed this load. Squash and refetch.
491 // For now return a fault to show that it was unsuccessful.
492 DynInstPtr violator = loadQueue[load_idx];
493 if (!memDepViolator ||
494 (violator->seqNum < memDepViolator->seqNum)) {
495 memDepViolator = violator;
496 } else {
497 break;
498 }
499
500 ++lsqMemOrderViolation;
501
502 return genMachineCheckFault();
503 }
504
505 incrLdIdx(load_idx);
506 }
507 }
508
509 return load_fault;
510}
511
512template <class Impl>
513Fault
514LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
515{
516 using namespace TheISA;
517 // Make sure that a store exists.
518 assert(stores != 0);
519
520 int store_idx = store_inst->sqIdx;
521
522 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
523 store_inst->readPC(), store_inst->seqNum);
524
525 assert(!store_inst->isSquashed());
526
527 // Check the recently completed loads to see if any match this store's
528 // address. If so, then we have a memory ordering violation.
529 int load_idx = store_inst->lqIdx;
530
531 Fault store_fault = store_inst->initiateAcc();
532
533 if (storeQueue[store_idx].size == 0) {
534 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
535 store_inst->readPC(),store_inst->seqNum);
536
537 return store_fault;
538 }
539
540 assert(store_fault == NoFault);
541
542 if (store_inst->isStoreConditional()) {
543 // Store conditionals need to set themselves as able to
544 // writeback if we haven't had a fault by here.
545 storeQueue[store_idx].canWB = true;
546
547 ++storesToWB;
548 }
549
550 assert(store_inst->effAddrValid);
551 while (load_idx != loadTail) {
552 // Really only need to check loads that have actually executed
553 // It's safe to check all loads because effAddr is set to
554 // InvalAddr when the dyn inst is created.
555
556 // @todo: For now this is extra conservative, detecting a
557 // violation if the addresses match assuming all accesses
558 // are quad word accesses.
559
560 // @todo: Fix this, magic number being used here
561
562 // @todo: Uncachable load is not executed until it reaches
563 // the head of the ROB. Once this if checks only the executed
564 // loads(as noted above), this check can be removed
556 if (loadQueue[load_idx]->effAddrValid &&
565 if (loadQueue[load_idx]->effAddrValid &&
557 (loadQueue[load_idx]->effAddr >> 8) ==
558 (store_inst->effAddr >> 8)) {
566 ((loadQueue[load_idx]->effAddr >> 8)
567 == (store_inst->effAddr >> 8)) &&
568 !loadQueue[load_idx]->uncacheable()) {
559 // A load incorrectly passed this store. Squash and refetch.
560 // For now return a fault to show that it was unsuccessful.
561 DynInstPtr violator = loadQueue[load_idx];
562 if (!memDepViolator ||
563 (violator->seqNum < memDepViolator->seqNum)) {
564 memDepViolator = violator;
565 } else {
566 break;
567 }
568
569 ++lsqMemOrderViolation;
570
571 return genMachineCheckFault();
572 }
573
574 incrLdIdx(load_idx);
575 }
576
577 return store_fault;
578}
579
580template <class Impl>
581void
582LSQUnit<Impl>::commitLoad()
583{
584 assert(loadQueue[loadHead]);
585
586 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
587 loadQueue[loadHead]->readPC());
588
589 loadQueue[loadHead] = NULL;
590
591 incrLdIdx(loadHead);
592
593 --loads;
594}
595
596template <class Impl>
597void
598LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
599{
600 assert(loads == 0 || loadQueue[loadHead]);
601
602 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
603 commitLoad();
604 }
605}
606
607template <class Impl>
608void
609LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
610{
611 assert(stores == 0 || storeQueue[storeHead].inst);
612
613 int store_idx = storeHead;
614
615 while (store_idx != storeTail) {
616 assert(storeQueue[store_idx].inst);
617 // Mark any stores that are now committed and have not yet
618 // been marked as able to write back.
619 if (!storeQueue[store_idx].canWB) {
620 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
621 break;
622 }
623 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
624 "%#x [sn:%lli]\n",
625 storeQueue[store_idx].inst->readPC(),
626 storeQueue[store_idx].inst->seqNum);
627
628 storeQueue[store_idx].canWB = true;
629
630 ++storesToWB;
631 }
632
633 incrStIdx(store_idx);
634 }
635}
636
637template <class Impl>
638void
639LSQUnit<Impl>::writebackPendingStore()
640{
641 if (hasPendingPkt) {
642 assert(pendingPkt != NULL);
643
644 // If the cache is blocked, this will store the packet for retry.
645 if (sendStore(pendingPkt)) {
646 storePostSend(pendingPkt);
647 }
648 pendingPkt = NULL;
649 hasPendingPkt = false;
650 }
651}
652
653template <class Impl>
654void
655LSQUnit<Impl>::writebackStores()
656{
657 // First writeback the second packet from any split store that didn't
658 // complete last cycle because there weren't enough cache ports available.
659 if (TheISA::HasUnalignedMemAcc) {
660 writebackPendingStore();
661 }
662
663 while (storesToWB > 0 &&
664 storeWBIdx != storeTail &&
665 storeQueue[storeWBIdx].inst &&
666 storeQueue[storeWBIdx].canWB &&
667 usedPorts < cachePorts) {
668
669 if (isStoreBlocked || lsq->cacheBlocked()) {
670 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
671 " is blocked!\n");
672 break;
673 }
674
675 // Store didn't write any data so no need to write it back to
676 // memory.
677 if (storeQueue[storeWBIdx].size == 0) {
678 completeStore(storeWBIdx);
679
680 incrStIdx(storeWBIdx);
681
682 continue;
683 }
684
685 ++usedPorts;
686
687 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
688 incrStIdx(storeWBIdx);
689
690 continue;
691 }
692
693 assert(storeQueue[storeWBIdx].req);
694 assert(!storeQueue[storeWBIdx].committed);
695
696 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
697 assert(storeQueue[storeWBIdx].sreqLow);
698 assert(storeQueue[storeWBIdx].sreqHigh);
699 }
700
701 DynInstPtr inst = storeQueue[storeWBIdx].inst;
702
703 Request *req = storeQueue[storeWBIdx].req;
704 storeQueue[storeWBIdx].committed = true;
705
706 assert(!inst->memData);
707 inst->memData = new uint8_t[64];
708
709 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
710
711 MemCmd command =
712 req->isSwap() ? MemCmd::SwapReq :
713 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
714 PacketPtr data_pkt;
715 PacketPtr snd_data_pkt = NULL;
716
717 LSQSenderState *state = new LSQSenderState;
718 state->isLoad = false;
719 state->idx = storeWBIdx;
720 state->inst = inst;
721
722 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
723
724 // Build a single data packet if the store isn't split.
725 data_pkt = new Packet(req, command, Packet::Broadcast);
726 data_pkt->dataStatic(inst->memData);
727 data_pkt->senderState = state;
728 } else {
729 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
730 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
731
732 // Create two packets if the store is split in two.
733 data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
734 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
735
736 data_pkt->dataStatic(inst->memData);
737 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
738
739 data_pkt->senderState = state;
740 snd_data_pkt->senderState = state;
741
742 state->isSplit = true;
743 state->outstanding = 2;
744
745 // Can delete the main request now.
746 delete req;
747 req = sreqLow;
748 }
749
750 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
751 "to Addr:%#x, data:%#x [sn:%lli]\n",
752 storeWBIdx, inst->readPC(),
753 req->getPaddr(), (int)*(inst->memData),
754 inst->seqNum);
755
756 // @todo: Remove this SC hack once the memory system handles it.
757 if (inst->isStoreConditional()) {
758 assert(!storeQueue[storeWBIdx].isSplit);
759 // Disable recording the result temporarily. Writing to
760 // misc regs normally updates the result, but this is not
761 // the desired behavior when handling store conditionals.
762 inst->recordResult = false;
763 bool success = TheISA::handleLockedWrite(inst.get(), req);
764 inst->recordResult = true;
765
766 if (!success) {
767 // Instantly complete this store.
768 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
769 "Instantly completing it.\n",
770 inst->seqNum);
771 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
772 cpu->schedule(wb, curTick + 1);
773 completeStore(storeWBIdx);
774 incrStIdx(storeWBIdx);
775 continue;
776 }
777 } else {
778 // Non-store conditionals do not need a writeback.
779 state->noWB = true;
780 }
781
782 if (!sendStore(data_pkt)) {
783 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
784 "retry later\n",
785 inst->seqNum);
786
787 // Need to store the second packet, if split.
788 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
789 state->pktToSend = true;
790 state->pendingPacket = snd_data_pkt;
791 }
792 } else {
793
794 // If split, try to send the second packet too
795 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
796 assert(snd_data_pkt);
797
798 // Ensure there are enough ports to use.
799 if (usedPorts < cachePorts) {
800 ++usedPorts;
801 if (sendStore(snd_data_pkt)) {
802 storePostSend(snd_data_pkt);
803 } else {
804 DPRINTF(IEW, "D-Cache became blocked when writing"
805 " [sn:%lli] second packet, will retry later\n",
806 inst->seqNum);
807 }
808 } else {
809
810 // Store the packet for when there's free ports.
811 assert(pendingPkt == NULL);
812 pendingPkt = snd_data_pkt;
813 hasPendingPkt = true;
814 }
815 } else {
816
817 // Not a split store.
818 storePostSend(data_pkt);
819 }
820 }
821 }
822
823 // Not sure this should set it to 0.
824 usedPorts = 0;
825
826 assert(stores >= 0 && storesToWB >= 0);
827}
828
829/*template <class Impl>
830void
831LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
832{
833 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
834 mshrSeqNums.end(),
835 seqNum);
836
837 if (mshr_it != mshrSeqNums.end()) {
838 mshrSeqNums.erase(mshr_it);
839 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
840 }
841}*/
842
843template <class Impl>
844void
845LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
846{
847 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
848 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
849
850 int load_idx = loadTail;
851 decrLdIdx(load_idx);
852
853 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
854 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
855 "[sn:%lli]\n",
856 loadQueue[load_idx]->readPC(),
857 loadQueue[load_idx]->seqNum);
858
859 if (isStalled() && load_idx == stallingLoadIdx) {
860 stalled = false;
861 stallingStoreIsn = 0;
862 stallingLoadIdx = 0;
863 }
864
865 // Clear the smart pointer to make sure it is decremented.
866 loadQueue[load_idx]->setSquashed();
867 loadQueue[load_idx] = NULL;
868 --loads;
869
870 // Inefficient!
871 loadTail = load_idx;
872
873 decrLdIdx(load_idx);
874 ++lsqSquashedLoads;
875 }
876
877 if (isLoadBlocked) {
878 if (squashed_num < blockedLoadSeqNum) {
879 isLoadBlocked = false;
880 loadBlockedHandled = false;
881 blockedLoadSeqNum = 0;
882 }
883 }
884
885 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
886 memDepViolator = NULL;
887 }
888
889 int store_idx = storeTail;
890 decrStIdx(store_idx);
891
892 while (stores != 0 &&
893 storeQueue[store_idx].inst->seqNum > squashed_num) {
894 // Instructions marked as can WB are already committed.
895 if (storeQueue[store_idx].canWB) {
896 break;
897 }
898
899 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
900 "idx:%i [sn:%lli]\n",
901 storeQueue[store_idx].inst->readPC(),
902 store_idx, storeQueue[store_idx].inst->seqNum);
903
904 // I don't think this can happen. It should have been cleared
905 // by the stalling load.
906 if (isStalled() &&
907 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
908 panic("Is stalled should have been cleared by stalling load!\n");
909 stalled = false;
910 stallingStoreIsn = 0;
911 }
912
913 // Clear the smart pointer to make sure it is decremented.
914 storeQueue[store_idx].inst->setSquashed();
915 storeQueue[store_idx].inst = NULL;
916 storeQueue[store_idx].canWB = 0;
917
918 // Must delete request now that it wasn't handed off to
919 // memory. This is quite ugly. @todo: Figure out the proper
920 // place to really handle request deletes.
921 delete storeQueue[store_idx].req;
922 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
923 delete storeQueue[store_idx].sreqLow;
924 delete storeQueue[store_idx].sreqHigh;
925
926 storeQueue[store_idx].sreqLow = NULL;
927 storeQueue[store_idx].sreqHigh = NULL;
928 }
929
930 storeQueue[store_idx].req = NULL;
931 --stores;
932
933 // Inefficient!
934 storeTail = store_idx;
935
936 decrStIdx(store_idx);
937 ++lsqSquashedStores;
938 }
939}
940
941template <class Impl>
942void
943LSQUnit<Impl>::storePostSend(PacketPtr pkt)
944{
945 if (isStalled() &&
946 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
947 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
948 "load idx:%i\n",
949 stallingStoreIsn, stallingLoadIdx);
950 stalled = false;
951 stallingStoreIsn = 0;
952 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
953 }
954
955 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
956 // The store is basically completed at this time. This
957 // only works so long as the checker doesn't try to
958 // verify the value in memory for stores.
959 storeQueue[storeWBIdx].inst->setCompleted();
960#if USE_CHECKER
961 if (cpu->checker) {
962 cpu->checker->verify(storeQueue[storeWBIdx].inst);
963 }
964#endif
965 }
966
967 incrStIdx(storeWBIdx);
968}
969
970template <class Impl>
971void
972LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
973{
974 iewStage->wakeCPU();
975
976 // Squashed instructions do not need to complete their access.
977 if (inst->isSquashed()) {
978 iewStage->decrWb(inst->seqNum);
979 assert(!inst->isStore());
980 ++lsqIgnoredResponses;
981 return;
982 }
983
984 if (!inst->isExecuted()) {
985 inst->setExecuted();
986
987 // Complete access to copy data to proper place.
988 inst->completeAcc(pkt);
989 }
990
991 // Need to insert instruction into queue to commit
992 iewStage->instToCommit(inst);
993
994 iewStage->activityThisCycle();
995
996 // see if this load changed the PC
997 iewStage->checkMisprediction(inst);
998}
999
1000template <class Impl>
1001void
1002LSQUnit<Impl>::completeStore(int store_idx)
1003{
1004 assert(storeQueue[store_idx].inst);
1005 storeQueue[store_idx].completed = true;
1006 --storesToWB;
1007 // A bit conservative because a store completion may not free up entries,
1008 // but hopefully avoids two store completions in one cycle from making
1009 // the CPU tick twice.
1010 cpu->wakeCPU();
1011 cpu->activityThisCycle();
1012
1013 if (store_idx == storeHead) {
1014 do {
1015 incrStIdx(storeHead);
1016
1017 --stores;
1018 } while (storeQueue[storeHead].completed &&
1019 storeHead != storeTail);
1020
1021 iewStage->updateLSQNextCycle = true;
1022 }
1023
1024 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1025 "idx:%i\n",
1026 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1027
1028 if (isStalled() &&
1029 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1030 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1031 "load idx:%i\n",
1032 stallingStoreIsn, stallingLoadIdx);
1033 stalled = false;
1034 stallingStoreIsn = 0;
1035 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1036 }
1037
1038 storeQueue[store_idx].inst->setCompleted();
1039
1040 // Tell the checker we've completed this instruction. Some stores
1041 // may get reported twice to the checker, but the checker can
1042 // handle that case.
1043#if USE_CHECKER
1044 if (cpu->checker) {
1045 cpu->checker->verify(storeQueue[store_idx].inst);
1046 }
1047#endif
1048}
1049
1050template <class Impl>
1051bool
1052LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1053{
1054 if (!dcachePort->sendTiming(data_pkt)) {
1055 // Need to handle becoming blocked on a store.
1056 isStoreBlocked = true;
1057 ++lsqCacheBlocked;
1058 assert(retryPkt == NULL);
1059 retryPkt = data_pkt;
1060 lsq->setRetryTid(lsqID);
1061 return false;
1062 }
1063 return true;
1064}
1065
1066template <class Impl>
1067void
1068LSQUnit<Impl>::recvRetry()
1069{
1070 if (isStoreBlocked) {
1071 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1072 assert(retryPkt != NULL);
1073
1074 if (dcachePort->sendTiming(retryPkt)) {
1075 LSQSenderState *state =
1076 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1077
1078 // Don't finish the store unless this is the last packet.
1079 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend) {
1080 storePostSend(retryPkt);
1081 }
1082 retryPkt = NULL;
1083 isStoreBlocked = false;
1084 lsq->setRetryTid(InvalidThreadID);
1085
1086 // Send any outstanding packet.
1087 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1088 assert(state->pendingPacket);
1089 if (sendStore(state->pendingPacket)) {
1090 storePostSend(state->pendingPacket);
1091 }
1092 }
1093 } else {
1094 // Still blocked!
1095 ++lsqCacheBlocked;
1096 lsq->setRetryTid(lsqID);
1097 }
1098 } else if (isLoadBlocked) {
1099 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1100 "no need to resend packet.\n");
1101 } else {
1102 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1103 }
1104}
1105
1106template <class Impl>
1107inline void
1108LSQUnit<Impl>::incrStIdx(int &store_idx)
1109{
1110 if (++store_idx >= SQEntries)
1111 store_idx = 0;
1112}
1113
1114template <class Impl>
1115inline void
1116LSQUnit<Impl>::decrStIdx(int &store_idx)
1117{
1118 if (--store_idx < 0)
1119 store_idx += SQEntries;
1120}
1121
1122template <class Impl>
1123inline void
1124LSQUnit<Impl>::incrLdIdx(int &load_idx)
1125{
1126 if (++load_idx >= LQEntries)
1127 load_idx = 0;
1128}
1129
1130template <class Impl>
1131inline void
1132LSQUnit<Impl>::decrLdIdx(int &load_idx)
1133{
1134 if (--load_idx < 0)
1135 load_idx += LQEntries;
1136}
1137
1138template <class Impl>
1139void
1140LSQUnit<Impl>::dumpInsts()
1141{
1142 cprintf("Load store queue: Dumping instructions.\n");
1143 cprintf("Load queue size: %i\n", loads);
1144 cprintf("Load queue: ");
1145
1146 int load_idx = loadHead;
1147
1148 while (load_idx != loadTail && loadQueue[load_idx]) {
1149 cprintf("%#x ", loadQueue[load_idx]->readPC());
1150
1151 incrLdIdx(load_idx);
1152 }
1153
1154 cprintf("Store queue size: %i\n", stores);
1155 cprintf("Store queue: ");
1156
1157 int store_idx = storeHead;
1158
1159 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1160 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
1161
1162 incrStIdx(store_idx);
1163 }
1164
1165 cprintf("\n");
1166}
569 // A load incorrectly passed this store. Squash and refetch.
570 // For now return a fault to show that it was unsuccessful.
571 DynInstPtr violator = loadQueue[load_idx];
572 if (!memDepViolator ||
573 (violator->seqNum < memDepViolator->seqNum)) {
574 memDepViolator = violator;
575 } else {
576 break;
577 }
578
579 ++lsqMemOrderViolation;
580
581 return genMachineCheckFault();
582 }
583
584 incrLdIdx(load_idx);
585 }
586
587 return store_fault;
588}
589
590template <class Impl>
591void
592LSQUnit<Impl>::commitLoad()
593{
594 assert(loadQueue[loadHead]);
595
596 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
597 loadQueue[loadHead]->readPC());
598
599 loadQueue[loadHead] = NULL;
600
601 incrLdIdx(loadHead);
602
603 --loads;
604}
605
606template <class Impl>
607void
608LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
609{
610 assert(loads == 0 || loadQueue[loadHead]);
611
612 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
613 commitLoad();
614 }
615}
616
617template <class Impl>
618void
619LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
620{
621 assert(stores == 0 || storeQueue[storeHead].inst);
622
623 int store_idx = storeHead;
624
625 while (store_idx != storeTail) {
626 assert(storeQueue[store_idx].inst);
627 // Mark any stores that are now committed and have not yet
628 // been marked as able to write back.
629 if (!storeQueue[store_idx].canWB) {
630 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
631 break;
632 }
633 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
634 "%#x [sn:%lli]\n",
635 storeQueue[store_idx].inst->readPC(),
636 storeQueue[store_idx].inst->seqNum);
637
638 storeQueue[store_idx].canWB = true;
639
640 ++storesToWB;
641 }
642
643 incrStIdx(store_idx);
644 }
645}
646
647template <class Impl>
648void
649LSQUnit<Impl>::writebackPendingStore()
650{
651 if (hasPendingPkt) {
652 assert(pendingPkt != NULL);
653
654 // If the cache is blocked, this will store the packet for retry.
655 if (sendStore(pendingPkt)) {
656 storePostSend(pendingPkt);
657 }
658 pendingPkt = NULL;
659 hasPendingPkt = false;
660 }
661}
662
663template <class Impl>
664void
665LSQUnit<Impl>::writebackStores()
666{
667 // First writeback the second packet from any split store that didn't
668 // complete last cycle because there weren't enough cache ports available.
669 if (TheISA::HasUnalignedMemAcc) {
670 writebackPendingStore();
671 }
672
673 while (storesToWB > 0 &&
674 storeWBIdx != storeTail &&
675 storeQueue[storeWBIdx].inst &&
676 storeQueue[storeWBIdx].canWB &&
677 usedPorts < cachePorts) {
678
679 if (isStoreBlocked || lsq->cacheBlocked()) {
680 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
681 " is blocked!\n");
682 break;
683 }
684
685 // Store didn't write any data so no need to write it back to
686 // memory.
687 if (storeQueue[storeWBIdx].size == 0) {
688 completeStore(storeWBIdx);
689
690 incrStIdx(storeWBIdx);
691
692 continue;
693 }
694
695 ++usedPorts;
696
697 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
698 incrStIdx(storeWBIdx);
699
700 continue;
701 }
702
703 assert(storeQueue[storeWBIdx].req);
704 assert(!storeQueue[storeWBIdx].committed);
705
706 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
707 assert(storeQueue[storeWBIdx].sreqLow);
708 assert(storeQueue[storeWBIdx].sreqHigh);
709 }
710
711 DynInstPtr inst = storeQueue[storeWBIdx].inst;
712
713 Request *req = storeQueue[storeWBIdx].req;
714 storeQueue[storeWBIdx].committed = true;
715
716 assert(!inst->memData);
717 inst->memData = new uint8_t[64];
718
719 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
720
721 MemCmd command =
722 req->isSwap() ? MemCmd::SwapReq :
723 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
724 PacketPtr data_pkt;
725 PacketPtr snd_data_pkt = NULL;
726
727 LSQSenderState *state = new LSQSenderState;
728 state->isLoad = false;
729 state->idx = storeWBIdx;
730 state->inst = inst;
731
732 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
733
734 // Build a single data packet if the store isn't split.
735 data_pkt = new Packet(req, command, Packet::Broadcast);
736 data_pkt->dataStatic(inst->memData);
737 data_pkt->senderState = state;
738 } else {
739 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
740 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
741
742 // Create two packets if the store is split in two.
743 data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
744 snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
745
746 data_pkt->dataStatic(inst->memData);
747 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
748
749 data_pkt->senderState = state;
750 snd_data_pkt->senderState = state;
751
752 state->isSplit = true;
753 state->outstanding = 2;
754
755 // Can delete the main request now.
756 delete req;
757 req = sreqLow;
758 }
759
760 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
761 "to Addr:%#x, data:%#x [sn:%lli]\n",
762 storeWBIdx, inst->readPC(),
763 req->getPaddr(), (int)*(inst->memData),
764 inst->seqNum);
765
766 // @todo: Remove this SC hack once the memory system handles it.
767 if (inst->isStoreConditional()) {
768 assert(!storeQueue[storeWBIdx].isSplit);
769 // Disable recording the result temporarily. Writing to
770 // misc regs normally updates the result, but this is not
771 // the desired behavior when handling store conditionals.
772 inst->recordResult = false;
773 bool success = TheISA::handleLockedWrite(inst.get(), req);
774 inst->recordResult = true;
775
776 if (!success) {
777 // Instantly complete this store.
778 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
779 "Instantly completing it.\n",
780 inst->seqNum);
781 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
782 cpu->schedule(wb, curTick + 1);
783 completeStore(storeWBIdx);
784 incrStIdx(storeWBIdx);
785 continue;
786 }
787 } else {
788 // Non-store conditionals do not need a writeback.
789 state->noWB = true;
790 }
791
792 if (!sendStore(data_pkt)) {
793 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
794 "retry later\n",
795 inst->seqNum);
796
797 // Need to store the second packet, if split.
798 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
799 state->pktToSend = true;
800 state->pendingPacket = snd_data_pkt;
801 }
802 } else {
803
804 // If split, try to send the second packet too
805 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
806 assert(snd_data_pkt);
807
808 // Ensure there are enough ports to use.
809 if (usedPorts < cachePorts) {
810 ++usedPorts;
811 if (sendStore(snd_data_pkt)) {
812 storePostSend(snd_data_pkt);
813 } else {
814 DPRINTF(IEW, "D-Cache became blocked when writing"
815 " [sn:%lli] second packet, will retry later\n",
816 inst->seqNum);
817 }
818 } else {
819
820 // Store the packet for when there's free ports.
821 assert(pendingPkt == NULL);
822 pendingPkt = snd_data_pkt;
823 hasPendingPkt = true;
824 }
825 } else {
826
827 // Not a split store.
828 storePostSend(data_pkt);
829 }
830 }
831 }
832
833 // Not sure this should set it to 0.
834 usedPorts = 0;
835
836 assert(stores >= 0 && storesToWB >= 0);
837}
838
839/*template <class Impl>
840void
841LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
842{
843 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
844 mshrSeqNums.end(),
845 seqNum);
846
847 if (mshr_it != mshrSeqNums.end()) {
848 mshrSeqNums.erase(mshr_it);
849 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
850 }
851}*/
852
853template <class Impl>
854void
855LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
856{
857 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
858 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
859
860 int load_idx = loadTail;
861 decrLdIdx(load_idx);
862
863 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
864 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
865 "[sn:%lli]\n",
866 loadQueue[load_idx]->readPC(),
867 loadQueue[load_idx]->seqNum);
868
869 if (isStalled() && load_idx == stallingLoadIdx) {
870 stalled = false;
871 stallingStoreIsn = 0;
872 stallingLoadIdx = 0;
873 }
874
875 // Clear the smart pointer to make sure it is decremented.
876 loadQueue[load_idx]->setSquashed();
877 loadQueue[load_idx] = NULL;
878 --loads;
879
880 // Inefficient!
881 loadTail = load_idx;
882
883 decrLdIdx(load_idx);
884 ++lsqSquashedLoads;
885 }
886
887 if (isLoadBlocked) {
888 if (squashed_num < blockedLoadSeqNum) {
889 isLoadBlocked = false;
890 loadBlockedHandled = false;
891 blockedLoadSeqNum = 0;
892 }
893 }
894
895 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
896 memDepViolator = NULL;
897 }
898
899 int store_idx = storeTail;
900 decrStIdx(store_idx);
901
902 while (stores != 0 &&
903 storeQueue[store_idx].inst->seqNum > squashed_num) {
904 // Instructions marked as can WB are already committed.
905 if (storeQueue[store_idx].canWB) {
906 break;
907 }
908
909 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
910 "idx:%i [sn:%lli]\n",
911 storeQueue[store_idx].inst->readPC(),
912 store_idx, storeQueue[store_idx].inst->seqNum);
913
914 // I don't think this can happen. It should have been cleared
915 // by the stalling load.
916 if (isStalled() &&
917 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
918 panic("Is stalled should have been cleared by stalling load!\n");
919 stalled = false;
920 stallingStoreIsn = 0;
921 }
922
923 // Clear the smart pointer to make sure it is decremented.
924 storeQueue[store_idx].inst->setSquashed();
925 storeQueue[store_idx].inst = NULL;
926 storeQueue[store_idx].canWB = 0;
927
928 // Must delete request now that it wasn't handed off to
929 // memory. This is quite ugly. @todo: Figure out the proper
930 // place to really handle request deletes.
931 delete storeQueue[store_idx].req;
932 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
933 delete storeQueue[store_idx].sreqLow;
934 delete storeQueue[store_idx].sreqHigh;
935
936 storeQueue[store_idx].sreqLow = NULL;
937 storeQueue[store_idx].sreqHigh = NULL;
938 }
939
940 storeQueue[store_idx].req = NULL;
941 --stores;
942
943 // Inefficient!
944 storeTail = store_idx;
945
946 decrStIdx(store_idx);
947 ++lsqSquashedStores;
948 }
949}
950
951template <class Impl>
952void
953LSQUnit<Impl>::storePostSend(PacketPtr pkt)
954{
955 if (isStalled() &&
956 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
957 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
958 "load idx:%i\n",
959 stallingStoreIsn, stallingLoadIdx);
960 stalled = false;
961 stallingStoreIsn = 0;
962 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
963 }
964
965 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
966 // The store is basically completed at this time. This
967 // only works so long as the checker doesn't try to
968 // verify the value in memory for stores.
969 storeQueue[storeWBIdx].inst->setCompleted();
970#if USE_CHECKER
971 if (cpu->checker) {
972 cpu->checker->verify(storeQueue[storeWBIdx].inst);
973 }
974#endif
975 }
976
977 incrStIdx(storeWBIdx);
978}
979
980template <class Impl>
981void
982LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
983{
984 iewStage->wakeCPU();
985
986 // Squashed instructions do not need to complete their access.
987 if (inst->isSquashed()) {
988 iewStage->decrWb(inst->seqNum);
989 assert(!inst->isStore());
990 ++lsqIgnoredResponses;
991 return;
992 }
993
994 if (!inst->isExecuted()) {
995 inst->setExecuted();
996
997 // Complete access to copy data to proper place.
998 inst->completeAcc(pkt);
999 }
1000
1001 // Need to insert instruction into queue to commit
1002 iewStage->instToCommit(inst);
1003
1004 iewStage->activityThisCycle();
1005
1006 // see if this load changed the PC
1007 iewStage->checkMisprediction(inst);
1008}
1009
1010template <class Impl>
1011void
1012LSQUnit<Impl>::completeStore(int store_idx)
1013{
1014 assert(storeQueue[store_idx].inst);
1015 storeQueue[store_idx].completed = true;
1016 --storesToWB;
1017 // A bit conservative because a store completion may not free up entries,
1018 // but hopefully avoids two store completions in one cycle from making
1019 // the CPU tick twice.
1020 cpu->wakeCPU();
1021 cpu->activityThisCycle();
1022
1023 if (store_idx == storeHead) {
1024 do {
1025 incrStIdx(storeHead);
1026
1027 --stores;
1028 } while (storeQueue[storeHead].completed &&
1029 storeHead != storeTail);
1030
1031 iewStage->updateLSQNextCycle = true;
1032 }
1033
1034 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1035 "idx:%i\n",
1036 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1037
1038 if (isStalled() &&
1039 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1040 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1041 "load idx:%i\n",
1042 stallingStoreIsn, stallingLoadIdx);
1043 stalled = false;
1044 stallingStoreIsn = 0;
1045 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1046 }
1047
1048 storeQueue[store_idx].inst->setCompleted();
1049
1050 // Tell the checker we've completed this instruction. Some stores
1051 // may get reported twice to the checker, but the checker can
1052 // handle that case.
1053#if USE_CHECKER
1054 if (cpu->checker) {
1055 cpu->checker->verify(storeQueue[store_idx].inst);
1056 }
1057#endif
1058}
1059
1060template <class Impl>
1061bool
1062LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1063{
1064 if (!dcachePort->sendTiming(data_pkt)) {
1065 // Need to handle becoming blocked on a store.
1066 isStoreBlocked = true;
1067 ++lsqCacheBlocked;
1068 assert(retryPkt == NULL);
1069 retryPkt = data_pkt;
1070 lsq->setRetryTid(lsqID);
1071 return false;
1072 }
1073 return true;
1074}
1075
1076template <class Impl>
1077void
1078LSQUnit<Impl>::recvRetry()
1079{
1080 if (isStoreBlocked) {
1081 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1082 assert(retryPkt != NULL);
1083
1084 if (dcachePort->sendTiming(retryPkt)) {
1085 LSQSenderState *state =
1086 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1087
1088 // Don't finish the store unless this is the last packet.
1089 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend) {
1090 storePostSend(retryPkt);
1091 }
1092 retryPkt = NULL;
1093 isStoreBlocked = false;
1094 lsq->setRetryTid(InvalidThreadID);
1095
1096 // Send any outstanding packet.
1097 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1098 assert(state->pendingPacket);
1099 if (sendStore(state->pendingPacket)) {
1100 storePostSend(state->pendingPacket);
1101 }
1102 }
1103 } else {
1104 // Still blocked!
1105 ++lsqCacheBlocked;
1106 lsq->setRetryTid(lsqID);
1107 }
1108 } else if (isLoadBlocked) {
1109 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1110 "no need to resend packet.\n");
1111 } else {
1112 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1113 }
1114}
1115
1116template <class Impl>
1117inline void
1118LSQUnit<Impl>::incrStIdx(int &store_idx)
1119{
1120 if (++store_idx >= SQEntries)
1121 store_idx = 0;
1122}
1123
1124template <class Impl>
1125inline void
1126LSQUnit<Impl>::decrStIdx(int &store_idx)
1127{
1128 if (--store_idx < 0)
1129 store_idx += SQEntries;
1130}
1131
1132template <class Impl>
1133inline void
1134LSQUnit<Impl>::incrLdIdx(int &load_idx)
1135{
1136 if (++load_idx >= LQEntries)
1137 load_idx = 0;
1138}
1139
1140template <class Impl>
1141inline void
1142LSQUnit<Impl>::decrLdIdx(int &load_idx)
1143{
1144 if (--load_idx < 0)
1145 load_idx += LQEntries;
1146}
1147
1148template <class Impl>
1149void
1150LSQUnit<Impl>::dumpInsts()
1151{
1152 cprintf("Load store queue: Dumping instructions.\n");
1153 cprintf("Load queue size: %i\n", loads);
1154 cprintf("Load queue: ");
1155
1156 int load_idx = loadHead;
1157
1158 while (load_idx != loadTail && loadQueue[load_idx]) {
1159 cprintf("%#x ", loadQueue[load_idx]->readPC());
1160
1161 incrLdIdx(load_idx);
1162 }
1163
1164 cprintf("Store queue size: %i\n", stores);
1165 cprintf("Store queue: ");
1166
1167 int store_idx = storeHead;
1168
1169 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1170 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
1171
1172 incrStIdx(store_idx);
1173 }
1174
1175 cprintf("\n");
1176}