lsq_unit_impl.hh (2674:6d4afef73a20) lsq_unit_impl.hh (2678:1f86b91dc3bb)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "cpu/checker/cpu.hh"
30#include "cpu/o3/lsq_unit.hh"
31#include "base/str.hh"
32#include "mem/request.hh"
33
34template<class Impl>
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "cpu/checker/cpu.hh"
30#include "cpu/o3/lsq_unit.hh"
31#include "base/str.hh"
32#include "mem/request.hh"
33
34template<class Impl>
35void
36LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
35LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
36 LSQUnit *lsq_ptr)
37 : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
37{
38{
38/*
39 DPRINTF(IEW, "Load writeback event [sn:%lli]\n", inst->seqNum);
40 DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum);
39 this->setFlags(Event::AutoDelete);
40}
41
41
42 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
43
44 if (iewStage->isSwitchedOut()) {
45 inst = NULL;
46 return;
47 } else if (inst->isSquashed()) {
48 iewStage->wakeCPU();
49 inst = NULL;
50 return;
42template<class Impl>
43void
44LSQUnit<Impl>::WritebackEvent::process()
45{
46 if (!lsqPtr->isSwitchedOut()) {
47 lsqPtr->writeback(inst, pkt);
51 }
48 }
49 delete pkt;
50}
52
51
53 iewStage->wakeCPU();
54
55 if (!inst->isExecuted()) {
56 inst->setExecuted();
57
58 // Complete access to copy data to proper place.
59 inst->completeAcc();
60 }
61
62 // Need to insert instruction into queue to commit
63 iewStage->instToCommit(inst);
64
65 iewStage->activityThisCycle();
66
67 inst = NULL;
68*/
52template<class Impl>
53const char *
54LSQUnit<Impl>::WritebackEvent::description()
55{
56 return "Store writeback event";
69}
70
71template<class Impl>
72void
57}
58
59template<class Impl>
60void
73LSQUnit<Impl>::completeStoreDataAccess(DynInstPtr &inst)
61LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
74{
62{
75/*
76 DPRINTF(LSQ, "Cache miss complete for store idx:%i\n", storeIdx);
77 DPRINTF(Activity, "Activity: st writeback event idx:%i\n", storeIdx);
63 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
64 DynInstPtr inst = state->inst;
65 DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
66// DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum);
78
67
79 //lsqPtr->removeMSHR(lsqPtr->storeQueue[storeIdx].inst->seqNum);
68 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
80
69
81 if (lsqPtr->isSwitchedOut()) {
82 if (wbEvent)
83 delete wbEvent;
84
70 if (isSwitchedOut() || inst->isSquashed()) {
71 delete state;
72 delete pkt;
85 return;
73 return;
74 } else {
75 if (!state->noWB) {
76 writeback(inst, pkt);
77 }
78
79 if (inst->isStore()) {
80 completeStore(state->idx);
81 }
86 }
87
82 }
83
88 lsqPtr->cpu->wakeCPU();
89
90 if (wb)
91 lsqPtr->completeDataAccess(storeIdx);
92 lsqPtr->completeStore(storeIdx);
93*/
84 delete state;
85 delete pkt;
94}
95
96template <class Impl>
97Tick
98LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
99{
100 panic("O3CPU model does not work with atomic mode!");
101 return curTick;
102}
103
104template <class Impl>
105void
106LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
107{
108 panic("O3CPU doesn't expect recvFunctional callback!");
109}
110
111template <class Impl>
112void
113LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
114{
115 if (status == RangeChange)
116 return;
117
118 panic("O3CPU doesn't expect recvStatusChange callback!");
119}
120
121template <class Impl>
122bool
123LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
124{
125 lsq->completeDataAccess(pkt);
126 return true;
127}
128
129template <class Impl>
130void
131LSQUnit<Impl>::DcachePort::recvRetry()
132{
133 panic("Retry unsupported for now!");
134 // we shouldn't get a retry unless we have a packet that we're
135 // waiting to transmit
136/*
137 assert(cpu->dcache_pkt != NULL);
138 assert(cpu->_status == DcacheRetry);
139 PacketPtr tmp = cpu->dcache_pkt;
140 if (sendTiming(tmp)) {
141 cpu->_status = DcacheWaitResponse;
142 cpu->dcache_pkt = NULL;
143 }
144*/
145}
146
147template <class Impl>
148LSQUnit<Impl>::LSQUnit()
86}
87
88template <class Impl>
89Tick
90LSQUnit<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
91{
92 panic("O3CPU model does not work with atomic mode!");
93 return curTick;
94}
95
96template <class Impl>
97void
98LSQUnit<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
99{
100 panic("O3CPU doesn't expect recvFunctional callback!");
101}
102
103template <class Impl>
104void
105LSQUnit<Impl>::DcachePort::recvStatusChange(Status status)
106{
107 if (status == RangeChange)
108 return;
109
110 panic("O3CPU doesn't expect recvStatusChange callback!");
111}
112
113template <class Impl>
114bool
115LSQUnit<Impl>::DcachePort::recvTiming(PacketPtr pkt)
116{
117 lsq->completeDataAccess(pkt);
118 return true;
119}
120
121template <class Impl>
122void
123LSQUnit<Impl>::DcachePort::recvRetry()
124{
125 panic("Retry unsupported for now!");
126 // we shouldn't get a retry unless we have a packet that we're
127 // waiting to transmit
128/*
129 assert(cpu->dcache_pkt != NULL);
130 assert(cpu->_status == DcacheRetry);
131 PacketPtr tmp = cpu->dcache_pkt;
132 if (sendTiming(tmp)) {
133 cpu->_status = DcacheWaitResponse;
134 cpu->dcache_pkt = NULL;
135 }
136*/
137}
138
139template <class Impl>
140LSQUnit<Impl>::LSQUnit()
149 : loads(0), stores(0), storesToWB(0), stalled(false), isLoadBlocked(false),
141 : loads(0), stores(0), storesToWB(0), stalled(false),
142 isStoreBlocked(false), isLoadBlocked(false),
150 loadBlockedHandled(false)
151{
152}
153
154template<class Impl>
155void
156LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
157 unsigned maxSQEntries, unsigned id)
158{
159 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
160
161 switchedOut = false;
162
163 lsqID = id;
164
165 // Add 1 for the sentinel entry (they are circular queues).
166 LQEntries = maxLQEntries + 1;
167 SQEntries = maxSQEntries + 1;
168
169 loadQueue.resize(LQEntries);
170 storeQueue.resize(SQEntries);
171
172 loadHead = loadTail = 0;
173
174 storeHead = storeWBIdx = storeTail = 0;
175
176 usedPorts = 0;
177 cachePorts = params->cachePorts;
178
143 loadBlockedHandled(false)
144{
145}
146
147template<class Impl>
148void
149LSQUnit<Impl>::init(Params *params, unsigned maxLQEntries,
150 unsigned maxSQEntries, unsigned id)
151{
152 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
153
154 switchedOut = false;
155
156 lsqID = id;
157
158 // Add 1 for the sentinel entry (they are circular queues).
159 LQEntries = maxLQEntries + 1;
160 SQEntries = maxSQEntries + 1;
161
162 loadQueue.resize(LQEntries);
163 storeQueue.resize(SQEntries);
164
165 loadHead = loadTail = 0;
166
167 storeHead = storeWBIdx = storeTail = 0;
168
169 usedPorts = 0;
170 cachePorts = params->cachePorts;
171
179 Port *mem_dport = params->mem->getPort("");
180 dcachePort->setPeer(mem_dport);
181 mem_dport->setPeer(dcachePort);
172 mem = params->mem;
182
183 memDepViolator = NULL;
184
185 blockedLoadSeqNum = 0;
186}
187
188template<class Impl>
189void
190LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
191{
192 cpu = cpu_ptr;
193 dcachePort = new DcachePort(cpu, this);
173
174 memDepViolator = NULL;
175
176 blockedLoadSeqNum = 0;
177}
178
179template<class Impl>
180void
181LSQUnit<Impl>::setCPU(FullCPU *cpu_ptr)
182{
183 cpu = cpu_ptr;
184 dcachePort = new DcachePort(cpu, this);
185
186 Port *mem_dport = mem->getPort("");
187 dcachePort->setPeer(mem_dport);
188 mem_dport->setPeer(dcachePort);
194}
195
196template<class Impl>
197std::string
198LSQUnit<Impl>::name() const
199{
200 if (Impl::MaxThreads == 1) {
201 return iewStage->name() + ".lsq";
202 } else {
203 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
204 }
205}
206
207template<class Impl>
208void
209LSQUnit<Impl>::clearLQ()
210{
211 loadQueue.clear();
212}
213
214template<class Impl>
215void
216LSQUnit<Impl>::clearSQ()
217{
218 storeQueue.clear();
219}
220
221#if 0
222template<class Impl>
223void
224LSQUnit<Impl>::setPageTable(PageTable *pt_ptr)
225{
226 DPRINTF(LSQUnit, "Setting the page table pointer.\n");
227 pTable = pt_ptr;
228}
229#endif
230
231template<class Impl>
232void
233LSQUnit<Impl>::switchOut()
234{
235 switchedOut = true;
236 for (int i = 0; i < loadQueue.size(); ++i)
237 loadQueue[i] = NULL;
238
239 assert(storesToWB == 0);
240}
241
242template<class Impl>
243void
244LSQUnit<Impl>::takeOverFrom()
245{
246 switchedOut = false;
247 loads = stores = storesToWB = 0;
248
249 loadHead = loadTail = 0;
250
251 storeHead = storeWBIdx = storeTail = 0;
252
253 usedPorts = 0;
254
255 memDepViolator = NULL;
256
257 blockedLoadSeqNum = 0;
258
259 stalled = false;
260 isLoadBlocked = false;
261 loadBlockedHandled = false;
262}
263
264template<class Impl>
265void
266LSQUnit<Impl>::resizeLQ(unsigned size)
267{
268 unsigned size_plus_sentinel = size + 1;
269 assert(size_plus_sentinel >= LQEntries);
270
271 if (size_plus_sentinel > LQEntries) {
272 while (size_plus_sentinel > loadQueue.size()) {
273 DynInstPtr dummy;
274 loadQueue.push_back(dummy);
275 LQEntries++;
276 }
277 } else {
278 LQEntries = size_plus_sentinel;
279 }
280
281}
282
283template<class Impl>
284void
285LSQUnit<Impl>::resizeSQ(unsigned size)
286{
287 unsigned size_plus_sentinel = size + 1;
288 if (size_plus_sentinel > SQEntries) {
289 while (size_plus_sentinel > storeQueue.size()) {
290 SQEntry dummy;
291 storeQueue.push_back(dummy);
292 SQEntries++;
293 }
294 } else {
295 SQEntries = size_plus_sentinel;
296 }
297}
298
299template <class Impl>
300void
301LSQUnit<Impl>::insert(DynInstPtr &inst)
302{
303 assert(inst->isMemRef());
304
305 assert(inst->isLoad() || inst->isStore());
306
307 if (inst->isLoad()) {
308 insertLoad(inst);
309 } else {
310 insertStore(inst);
311 }
312
313 inst->setInLSQ();
314}
315
316template <class Impl>
317void
318LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
319{
320 assert((loadTail + 1) % LQEntries != loadHead);
321 assert(loads < LQEntries);
322
323 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
324 load_inst->readPC(), loadTail, load_inst->seqNum);
325
326 load_inst->lqIdx = loadTail;
327
328 if (stores == 0) {
329 load_inst->sqIdx = -1;
330 } else {
331 load_inst->sqIdx = storeTail;
332 }
333
334 loadQueue[loadTail] = load_inst;
335
336 incrLdIdx(loadTail);
337
338 ++loads;
339}
340
341template <class Impl>
342void
343LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
344{
345 // Make sure it is not full before inserting an instruction.
346 assert((storeTail + 1) % SQEntries != storeHead);
347 assert(stores < SQEntries);
348
349 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
350 store_inst->readPC(), storeTail, store_inst->seqNum);
351
352 store_inst->sqIdx = storeTail;
353 store_inst->lqIdx = loadTail;
354
355 storeQueue[storeTail] = SQEntry(store_inst);
356
357 incrStIdx(storeTail);
358
359 ++stores;
360}
361
362template <class Impl>
363typename Impl::DynInstPtr
364LSQUnit<Impl>::getMemDepViolator()
365{
366 DynInstPtr temp = memDepViolator;
367
368 memDepViolator = NULL;
369
370 return temp;
371}
372
373template <class Impl>
374unsigned
375LSQUnit<Impl>::numFreeEntries()
376{
377 unsigned free_lq_entries = LQEntries - loads;
378 unsigned free_sq_entries = SQEntries - stores;
379
380 // Both the LQ and SQ entries have an extra dummy entry to differentiate
381 // empty/full conditions. Subtract 1 from the free entries.
382 if (free_lq_entries < free_sq_entries) {
383 return free_lq_entries - 1;
384 } else {
385 return free_sq_entries - 1;
386 }
387}
388
389template <class Impl>
390int
391LSQUnit<Impl>::numLoadsReady()
392{
393 int load_idx = loadHead;
394 int retval = 0;
395
396 while (load_idx != loadTail) {
397 assert(loadQueue[load_idx]);
398
399 if (loadQueue[load_idx]->readyToIssue()) {
400 ++retval;
401 }
402 }
403
404 return retval;
405}
406
407template <class Impl>
408Fault
409LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
410{
411 // Execute a specific load.
412 Fault load_fault = NoFault;
413
414 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
415 inst->readPC(),inst->seqNum);
416
417 load_fault = inst->initiateAcc();
418
419 // If the instruction faulted, then we need to send it along to commit
420 // without the instruction completing.
421 if (load_fault != NoFault) {
422 // Send this instruction to commit, also make sure iew stage
423 // realizes there is activity.
424 iewStage->instToCommit(inst);
425 iewStage->activityThisCycle();
426 }
427
428 return load_fault;
429}
430
431template <class Impl>
432Fault
433LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
434{
435 using namespace TheISA;
436 // Make sure that a store exists.
437 assert(stores != 0);
438
439 int store_idx = store_inst->sqIdx;
440
441 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
442 store_inst->readPC(), store_inst->seqNum);
443
444 // Check the recently completed loads to see if any match this store's
445 // address. If so, then we have a memory ordering violation.
446 int load_idx = store_inst->lqIdx;
447
448 Fault store_fault = store_inst->initiateAcc();
189}
190
191template<class Impl>
192std::string
193LSQUnit<Impl>::name() const
194{
195 if (Impl::MaxThreads == 1) {
196 return iewStage->name() + ".lsq";
197 } else {
198 return iewStage->name() + ".lsq.thread." + to_string(lsqID);
199 }
200}
201
202template<class Impl>
203void
204LSQUnit<Impl>::clearLQ()
205{
206 loadQueue.clear();
207}
208
209template<class Impl>
210void
211LSQUnit<Impl>::clearSQ()
212{
213 storeQueue.clear();
214}
215
216#if 0
217template<class Impl>
218void
219LSQUnit<Impl>::setPageTable(PageTable *pt_ptr)
220{
221 DPRINTF(LSQUnit, "Setting the page table pointer.\n");
222 pTable = pt_ptr;
223}
224#endif
225
226template<class Impl>
227void
228LSQUnit<Impl>::switchOut()
229{
230 switchedOut = true;
231 for (int i = 0; i < loadQueue.size(); ++i)
232 loadQueue[i] = NULL;
233
234 assert(storesToWB == 0);
235}
236
237template<class Impl>
238void
239LSQUnit<Impl>::takeOverFrom()
240{
241 switchedOut = false;
242 loads = stores = storesToWB = 0;
243
244 loadHead = loadTail = 0;
245
246 storeHead = storeWBIdx = storeTail = 0;
247
248 usedPorts = 0;
249
250 memDepViolator = NULL;
251
252 blockedLoadSeqNum = 0;
253
254 stalled = false;
255 isLoadBlocked = false;
256 loadBlockedHandled = false;
257}
258
259template<class Impl>
260void
261LSQUnit<Impl>::resizeLQ(unsigned size)
262{
263 unsigned size_plus_sentinel = size + 1;
264 assert(size_plus_sentinel >= LQEntries);
265
266 if (size_plus_sentinel > LQEntries) {
267 while (size_plus_sentinel > loadQueue.size()) {
268 DynInstPtr dummy;
269 loadQueue.push_back(dummy);
270 LQEntries++;
271 }
272 } else {
273 LQEntries = size_plus_sentinel;
274 }
275
276}
277
278template<class Impl>
279void
280LSQUnit<Impl>::resizeSQ(unsigned size)
281{
282 unsigned size_plus_sentinel = size + 1;
283 if (size_plus_sentinel > SQEntries) {
284 while (size_plus_sentinel > storeQueue.size()) {
285 SQEntry dummy;
286 storeQueue.push_back(dummy);
287 SQEntries++;
288 }
289 } else {
290 SQEntries = size_plus_sentinel;
291 }
292}
293
294template <class Impl>
295void
296LSQUnit<Impl>::insert(DynInstPtr &inst)
297{
298 assert(inst->isMemRef());
299
300 assert(inst->isLoad() || inst->isStore());
301
302 if (inst->isLoad()) {
303 insertLoad(inst);
304 } else {
305 insertStore(inst);
306 }
307
308 inst->setInLSQ();
309}
310
311template <class Impl>
312void
313LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
314{
315 assert((loadTail + 1) % LQEntries != loadHead);
316 assert(loads < LQEntries);
317
318 DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
319 load_inst->readPC(), loadTail, load_inst->seqNum);
320
321 load_inst->lqIdx = loadTail;
322
323 if (stores == 0) {
324 load_inst->sqIdx = -1;
325 } else {
326 load_inst->sqIdx = storeTail;
327 }
328
329 loadQueue[loadTail] = load_inst;
330
331 incrLdIdx(loadTail);
332
333 ++loads;
334}
335
336template <class Impl>
337void
338LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
339{
340 // Make sure it is not full before inserting an instruction.
341 assert((storeTail + 1) % SQEntries != storeHead);
342 assert(stores < SQEntries);
343
344 DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
345 store_inst->readPC(), storeTail, store_inst->seqNum);
346
347 store_inst->sqIdx = storeTail;
348 store_inst->lqIdx = loadTail;
349
350 storeQueue[storeTail] = SQEntry(store_inst);
351
352 incrStIdx(storeTail);
353
354 ++stores;
355}
356
357template <class Impl>
358typename Impl::DynInstPtr
359LSQUnit<Impl>::getMemDepViolator()
360{
361 DynInstPtr temp = memDepViolator;
362
363 memDepViolator = NULL;
364
365 return temp;
366}
367
368template <class Impl>
369unsigned
370LSQUnit<Impl>::numFreeEntries()
371{
372 unsigned free_lq_entries = LQEntries - loads;
373 unsigned free_sq_entries = SQEntries - stores;
374
375 // Both the LQ and SQ entries have an extra dummy entry to differentiate
376 // empty/full conditions. Subtract 1 from the free entries.
377 if (free_lq_entries < free_sq_entries) {
378 return free_lq_entries - 1;
379 } else {
380 return free_sq_entries - 1;
381 }
382}
383
384template <class Impl>
385int
386LSQUnit<Impl>::numLoadsReady()
387{
388 int load_idx = loadHead;
389 int retval = 0;
390
391 while (load_idx != loadTail) {
392 assert(loadQueue[load_idx]);
393
394 if (loadQueue[load_idx]->readyToIssue()) {
395 ++retval;
396 }
397 }
398
399 return retval;
400}
401
402template <class Impl>
403Fault
404LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
405{
406 // Execute a specific load.
407 Fault load_fault = NoFault;
408
409 DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
410 inst->readPC(),inst->seqNum);
411
412 load_fault = inst->initiateAcc();
413
414 // If the instruction faulted, then we need to send it along to commit
415 // without the instruction completing.
416 if (load_fault != NoFault) {
417 // Send this instruction to commit, also make sure iew stage
418 // realizes there is activity.
419 iewStage->instToCommit(inst);
420 iewStage->activityThisCycle();
421 }
422
423 return load_fault;
424}
425
426template <class Impl>
427Fault
428LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
429{
430 using namespace TheISA;
431 // Make sure that a store exists.
432 assert(stores != 0);
433
434 int store_idx = store_inst->sqIdx;
435
436 DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
437 store_inst->readPC(), store_inst->seqNum);
438
439 // Check the recently completed loads to see if any match this store's
440 // address. If so, then we have a memory ordering violation.
441 int load_idx = store_inst->lqIdx;
442
443 Fault store_fault = store_inst->initiateAcc();
449// Fault store_fault = store_inst->execute();
450
451 if (storeQueue[store_idx].size == 0) {
452 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
453 store_inst->readPC(),store_inst->seqNum);
454
455 return store_fault;
456 }
457
458 assert(store_fault == NoFault);
459
460 if (store_inst->isStoreConditional()) {
461 // Store conditionals need to set themselves as able to
462 // writeback if we haven't had a fault by here.
463 storeQueue[store_idx].canWB = true;
464
465 ++storesToWB;
466 }
467
468 if (!memDepViolator) {
469 while (load_idx != loadTail) {
470 // Really only need to check loads that have actually executed
471 // It's safe to check all loads because effAddr is set to
472 // InvalAddr when the dyn inst is created.
473
474 // @todo: For now this is extra conservative, detecting a
475 // violation if the addresses match assuming all accesses
476 // are quad word accesses.
477
478 // @todo: Fix this, magic number being used here
479 if ((loadQueue[load_idx]->effAddr >> 8) ==
480 (store_inst->effAddr >> 8)) {
481 // A load incorrectly passed this store. Squash and refetch.
482 // For now return a fault to show that it was unsuccessful.
483 memDepViolator = loadQueue[load_idx];
484
485 return genMachineCheckFault();
486 }
487
488 incrLdIdx(load_idx);
489 }
490
491 // If we've reached this point, there was no violation.
492 memDepViolator = NULL;
493 }
494
495 return store_fault;
496}
497
498template <class Impl>
499void
500LSQUnit<Impl>::commitLoad()
501{
502 assert(loadQueue[loadHead]);
503
504 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
505 loadQueue[loadHead]->readPC());
506
507 loadQueue[loadHead] = NULL;
508
509 incrLdIdx(loadHead);
510
511 --loads;
512}
513
514template <class Impl>
515void
516LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
517{
518 assert(loads == 0 || loadQueue[loadHead]);
519
520 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
521 commitLoad();
522 }
523}
524
525template <class Impl>
526void
527LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
528{
529 assert(stores == 0 || storeQueue[storeHead].inst);
530
531 int store_idx = storeHead;
532
533 while (store_idx != storeTail) {
534 assert(storeQueue[store_idx].inst);
535 // Mark any stores that are now committed and have not yet
536 // been marked as able to write back.
537 if (!storeQueue[store_idx].canWB) {
538 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
539 break;
540 }
541 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
542 "%#x [sn:%lli]\n",
543 storeQueue[store_idx].inst->readPC(),
544 storeQueue[store_idx].inst->seqNum);
545
546 storeQueue[store_idx].canWB = true;
547
548 ++storesToWB;
549 }
550
551 incrStIdx(store_idx);
552 }
553}
554
555template <class Impl>
556void
557LSQUnit<Impl>::writebackStores()
558{
559 while (storesToWB > 0 &&
560 storeWBIdx != storeTail &&
561 storeQueue[storeWBIdx].inst &&
562 storeQueue[storeWBIdx].canWB &&
563 usedPorts < cachePorts) {
564
444
445 if (storeQueue[store_idx].size == 0) {
446 DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
447 store_inst->readPC(),store_inst->seqNum);
448
449 return store_fault;
450 }
451
452 assert(store_fault == NoFault);
453
454 if (store_inst->isStoreConditional()) {
455 // Store conditionals need to set themselves as able to
456 // writeback if we haven't had a fault by here.
457 storeQueue[store_idx].canWB = true;
458
459 ++storesToWB;
460 }
461
462 if (!memDepViolator) {
463 while (load_idx != loadTail) {
464 // Really only need to check loads that have actually executed
465 // It's safe to check all loads because effAddr is set to
466 // InvalAddr when the dyn inst is created.
467
468 // @todo: For now this is extra conservative, detecting a
469 // violation if the addresses match assuming all accesses
470 // are quad word accesses.
471
472 // @todo: Fix this, magic number being used here
473 if ((loadQueue[load_idx]->effAddr >> 8) ==
474 (store_inst->effAddr >> 8)) {
475 // A load incorrectly passed this store. Squash and refetch.
476 // For now return a fault to show that it was unsuccessful.
477 memDepViolator = loadQueue[load_idx];
478
479 return genMachineCheckFault();
480 }
481
482 incrLdIdx(load_idx);
483 }
484
485 // If we've reached this point, there was no violation.
486 memDepViolator = NULL;
487 }
488
489 return store_fault;
490}
491
492template <class Impl>
493void
494LSQUnit<Impl>::commitLoad()
495{
496 assert(loadQueue[loadHead]);
497
498 DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
499 loadQueue[loadHead]->readPC());
500
501 loadQueue[loadHead] = NULL;
502
503 incrLdIdx(loadHead);
504
505 --loads;
506}
507
508template <class Impl>
509void
510LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
511{
512 assert(loads == 0 || loadQueue[loadHead]);
513
514 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
515 commitLoad();
516 }
517}
518
519template <class Impl>
520void
521LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
522{
523 assert(stores == 0 || storeQueue[storeHead].inst);
524
525 int store_idx = storeHead;
526
527 while (store_idx != storeTail) {
528 assert(storeQueue[store_idx].inst);
529 // Mark any stores that are now committed and have not yet
530 // been marked as able to write back.
531 if (!storeQueue[store_idx].canWB) {
532 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
533 break;
534 }
535 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
536 "%#x [sn:%lli]\n",
537 storeQueue[store_idx].inst->readPC(),
538 storeQueue[store_idx].inst->seqNum);
539
540 storeQueue[store_idx].canWB = true;
541
542 ++storesToWB;
543 }
544
545 incrStIdx(store_idx);
546 }
547}
548
549template <class Impl>
550void
551LSQUnit<Impl>::writebackStores()
552{
553 while (storesToWB > 0 &&
554 storeWBIdx != storeTail &&
555 storeQueue[storeWBIdx].inst &&
556 storeQueue[storeWBIdx].canWB &&
557 usedPorts < cachePorts) {
558
559 if (isStoreBlocked) {
560 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
561 " is blocked!\n");
562 break;
563 }
564
565 // Store didn't write any data so no need to write it back to
566 // memory.
567 if (storeQueue[storeWBIdx].size == 0) {
568 completeStore(storeWBIdx);
569
570 incrStIdx(storeWBIdx);
571
572 continue;
573 }
565 // Store didn't write any data so no need to write it back to
566 // memory.
567 if (storeQueue[storeWBIdx].size == 0) {
568 completeStore(storeWBIdx);
569
570 incrStIdx(storeWBIdx);
571
572 continue;
573 }
574/*
575 if (dcacheInterface && dcacheInterface->isBlocked()) {
576 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
577 " is blocked!\n");
578 break;
579 }
580*/
574
581 ++usedPorts;
582
583 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
584 incrStIdx(storeWBIdx);
585
586 continue;
587 }
588
589 assert(storeQueue[storeWBIdx].req);
590 assert(!storeQueue[storeWBIdx].committed);
591
592 DynInstPtr inst = storeQueue[storeWBIdx].inst;
593
594 Request *req = storeQueue[storeWBIdx].req;
595 storeQueue[storeWBIdx].committed = true;
596
597 assert(!inst->memData);
598 inst->memData = new uint8_t[64];
575 ++usedPorts;
576
577 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
578 incrStIdx(storeWBIdx);
579
580 continue;
581 }
582
583 assert(storeQueue[storeWBIdx].req);
584 assert(!storeQueue[storeWBIdx].committed);
585
586 DynInstPtr inst = storeQueue[storeWBIdx].inst;
587
588 Request *req = storeQueue[storeWBIdx].req;
589 storeQueue[storeWBIdx].committed = true;
590
591 assert(!inst->memData);
592 inst->memData = new uint8_t[64];
599 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data, req->getSize());
593 memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data,
594 req->getSize());
600
601 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
602 data_pkt->dataStatic(inst->memData);
603
595
596 PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
597 data_pkt->dataStatic(inst->memData);
598
599 LSQSenderState *state = new LSQSenderState;
600 state->isLoad = false;
601 state->idx = storeWBIdx;
602 state->inst = inst;
603 data_pkt->senderState = state;
604
604 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
605 "to Addr:%#x, data:%#x [sn:%lli]\n",
606 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
607 req->getPaddr(), *(inst->memData),
608 storeQueue[storeWBIdx].inst->seqNum);
609
610 if (!dcachePort->sendTiming(data_pkt)) {
611 // Need to handle becoming blocked on a store.
605 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
606 "to Addr:%#x, data:%#x [sn:%lli]\n",
607 storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
608 req->getPaddr(), *(inst->memData),
609 storeQueue[storeWBIdx].inst->seqNum);
610
611 if (!dcachePort->sendTiming(data_pkt)) {
612 // Need to handle becoming blocked on a store.
613 isStoreBlocked = true;
612 } else {
614 } else {
613 /*
614 StoreCompletionEvent *store_event = new
615 StoreCompletionEvent(storeWBIdx, NULL, this);
616 */
617 if (isStalled() &&
618 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
619 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
620 "load idx:%i\n",
621 stallingStoreIsn, stallingLoadIdx);
622 stalled = false;
623 stallingStoreIsn = 0;
624 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
625 }
615 if (isStalled() &&
616 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
617 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
618 "load idx:%i\n",
619 stallingStoreIsn, stallingLoadIdx);
620 stalled = false;
621 stallingStoreIsn = 0;
622 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
623 }
626/*
627 typename LdWritebackEvent *wb = NULL;
628 if (req->flags & LOCKED) {
629 // Stx_C should not generate a system port transaction
630 // if it misses in the cache, but that might be hard
631 // to accomplish without explicit cache support.
632 wb = new typename
633 LdWritebackEvent(storeQueue[storeWBIdx].inst,
634 iewStage);
635 store_event->wbEvent = wb;
624
625 if (!(req->getFlags() & LOCKED)) {
626 assert(!storeQueue[storeWBIdx].inst->isStoreConditional());
627 // Non-store conditionals do not need a writeback.
628 state->noWB = true;
636 }
629 }
637*/
630
638 if (data_pkt->result != Packet::Success) {
639 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
640 storeWBIdx);
641
642 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
643 storeQueue[storeWBIdx].inst->seqNum);
644
645 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
646
647 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
648
649 // @todo: Increment stat here.
650 } else {
651 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
652 storeWBIdx);
653
654 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
655 storeQueue[storeWBIdx].inst->seqNum);
656 }
657
658 incrStIdx(storeWBIdx);
659 }
660 }
661
662 // Not sure this should set it to 0.
663 usedPorts = 0;
664
665 assert(stores >= 0 && storesToWB >= 0);
666}
667
668/*template <class Impl>
669void
670LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
671{
672 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
673 mshrSeqNums.end(),
674 seqNum);
675
676 if (mshr_it != mshrSeqNums.end()) {
677 mshrSeqNums.erase(mshr_it);
678 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
679 }
680}*/
681
682template <class Impl>
683void
684LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
685{
686 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
687 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
688
689 int load_idx = loadTail;
690 decrLdIdx(load_idx);
691
692 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
693 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
694 "[sn:%lli]\n",
695 loadQueue[load_idx]->readPC(),
696 loadQueue[load_idx]->seqNum);
697
698 if (isStalled() && load_idx == stallingLoadIdx) {
699 stalled = false;
700 stallingStoreIsn = 0;
701 stallingLoadIdx = 0;
702 }
703
704 // Clear the smart pointer to make sure it is decremented.
705 loadQueue[load_idx]->squashed = true;
706 loadQueue[load_idx] = NULL;
707 --loads;
708
709 // Inefficient!
710 loadTail = load_idx;
711
712 decrLdIdx(load_idx);
713 }
714
715 if (isLoadBlocked) {
716 if (squashed_num < blockedLoadSeqNum) {
717 isLoadBlocked = false;
718 loadBlockedHandled = false;
719 blockedLoadSeqNum = 0;
720 }
721 }
722
723 int store_idx = storeTail;
724 decrStIdx(store_idx);
725
726 while (stores != 0 &&
727 storeQueue[store_idx].inst->seqNum > squashed_num) {
728 // Instructions marked as can WB are already committed.
729 if (storeQueue[store_idx].canWB) {
730 break;
731 }
732
733 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
734 "idx:%i [sn:%lli]\n",
735 storeQueue[store_idx].inst->readPC(),
736 store_idx, storeQueue[store_idx].inst->seqNum);
737
738 // I don't think this can happen. It should have been cleared
739 // by the stalling load.
740 if (isStalled() &&
741 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
742 panic("Is stalled should have been cleared by stalling load!\n");
743 stalled = false;
744 stallingStoreIsn = 0;
745 }
746
747 // Clear the smart pointer to make sure it is decremented.
748 storeQueue[store_idx].inst->squashed = true;
749 storeQueue[store_idx].inst = NULL;
750 storeQueue[store_idx].canWB = 0;
751
752 storeQueue[store_idx].req = NULL;
753 --stores;
754
755 // Inefficient!
756 storeTail = store_idx;
757
758 decrStIdx(store_idx);
759 }
760}
761
762template <class Impl>
763void
631 if (data_pkt->result != Packet::Success) {
632 DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
633 storeWBIdx);
634
635 DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
636 storeQueue[storeWBIdx].inst->seqNum);
637
638 //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
639
640 //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
641
642 // @todo: Increment stat here.
643 } else {
644 DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
645 storeWBIdx);
646
647 DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
648 storeQueue[storeWBIdx].inst->seqNum);
649 }
650
651 incrStIdx(storeWBIdx);
652 }
653 }
654
655 // Not sure this should set it to 0.
656 usedPorts = 0;
657
658 assert(stores >= 0 && storesToWB >= 0);
659}
660
661/*template <class Impl>
662void
663LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
664{
665 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
666 mshrSeqNums.end(),
667 seqNum);
668
669 if (mshr_it != mshrSeqNums.end()) {
670 mshrSeqNums.erase(mshr_it);
671 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
672 }
673}*/
674
675template <class Impl>
676void
677LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
678{
679 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
680 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
681
682 int load_idx = loadTail;
683 decrLdIdx(load_idx);
684
685 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
686 DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
687 "[sn:%lli]\n",
688 loadQueue[load_idx]->readPC(),
689 loadQueue[load_idx]->seqNum);
690
691 if (isStalled() && load_idx == stallingLoadIdx) {
692 stalled = false;
693 stallingStoreIsn = 0;
694 stallingLoadIdx = 0;
695 }
696
697 // Clear the smart pointer to make sure it is decremented.
698 loadQueue[load_idx]->squashed = true;
699 loadQueue[load_idx] = NULL;
700 --loads;
701
702 // Inefficient!
703 loadTail = load_idx;
704
705 decrLdIdx(load_idx);
706 }
707
708 if (isLoadBlocked) {
709 if (squashed_num < blockedLoadSeqNum) {
710 isLoadBlocked = false;
711 loadBlockedHandled = false;
712 blockedLoadSeqNum = 0;
713 }
714 }
715
716 int store_idx = storeTail;
717 decrStIdx(store_idx);
718
719 while (stores != 0 &&
720 storeQueue[store_idx].inst->seqNum > squashed_num) {
721 // Instructions marked as can WB are already committed.
722 if (storeQueue[store_idx].canWB) {
723 break;
724 }
725
726 DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
727 "idx:%i [sn:%lli]\n",
728 storeQueue[store_idx].inst->readPC(),
729 store_idx, storeQueue[store_idx].inst->seqNum);
730
731 // I don't think this can happen. It should have been cleared
732 // by the stalling load.
733 if (isStalled() &&
734 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
735 panic("Is stalled should have been cleared by stalling load!\n");
736 stalled = false;
737 stallingStoreIsn = 0;
738 }
739
740 // Clear the smart pointer to make sure it is decremented.
741 storeQueue[store_idx].inst->squashed = true;
742 storeQueue[store_idx].inst = NULL;
743 storeQueue[store_idx].canWB = 0;
744
745 storeQueue[store_idx].req = NULL;
746 --stores;
747
748 // Inefficient!
749 storeTail = store_idx;
750
751 decrStIdx(store_idx);
752 }
753}
754
755template <class Impl>
756void
757LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
758{
759 iewStage->wakeCPU();
760
761 // Squashed instructions do not need to complete their access.
762 if (inst->isSquashed()) {
763 assert(!inst->isStore());
764 return;
765 }
766
767 if (!inst->isExecuted()) {
768 inst->setExecuted();
769
770 // Complete access to copy data to proper place.
771 inst->completeAcc(pkt);
772 }
773
774 // Need to insert instruction into queue to commit
775 iewStage->instToCommit(inst);
776
777 iewStage->activityThisCycle();
778}
779
780template <class Impl>
781void
764LSQUnit<Impl>::completeStore(int store_idx)
765{
766 assert(storeQueue[store_idx].inst);
767 storeQueue[store_idx].completed = true;
768 --storesToWB;
769 // A bit conservative because a store completion may not free up entries,
770 // but hopefully avoids two store completions in one cycle from making
771 // the CPU tick twice.
772 cpu->activityThisCycle();
773
774 if (store_idx == storeHead) {
775 do {
776 incrStIdx(storeHead);
777
778 --stores;
779 } while (storeQueue[storeHead].completed &&
780 storeHead != storeTail);
781
782 iewStage->updateLSQNextCycle = true;
783 }
784
785 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
786 "idx:%i\n",
787 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
788
789 if (isStalled() &&
790 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
791 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
792 "load idx:%i\n",
793 stallingStoreIsn, stallingLoadIdx);
794 stalled = false;
795 stallingStoreIsn = 0;
796 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
797 }
798
799 storeQueue[store_idx].inst->setCompleted();
800
801 // Tell the checker we've completed this instruction. Some stores
802 // may get reported twice to the checker, but the checker can
803 // handle that case.
804 if (cpu->checker) {
805 cpu->checker->tick(storeQueue[store_idx].inst);
806 }
807}
808
809template <class Impl>
810inline void
811LSQUnit<Impl>::incrStIdx(int &store_idx)
812{
813 if (++store_idx >= SQEntries)
814 store_idx = 0;
815}
816
817template <class Impl>
818inline void
819LSQUnit<Impl>::decrStIdx(int &store_idx)
820{
821 if (--store_idx < 0)
822 store_idx += SQEntries;
823}
824
825template <class Impl>
826inline void
827LSQUnit<Impl>::incrLdIdx(int &load_idx)
828{
829 if (++load_idx >= LQEntries)
830 load_idx = 0;
831}
832
833template <class Impl>
834inline void
835LSQUnit<Impl>::decrLdIdx(int &load_idx)
836{
837 if (--load_idx < 0)
838 load_idx += LQEntries;
839}
840
841template <class Impl>
842void
843LSQUnit<Impl>::dumpInsts()
844{
845 cprintf("Load store queue: Dumping instructions.\n");
846 cprintf("Load queue size: %i\n", loads);
847 cprintf("Load queue: ");
848
849 int load_idx = loadHead;
850
851 while (load_idx != loadTail && loadQueue[load_idx]) {
852 cprintf("%#x ", loadQueue[load_idx]->readPC());
853
854 incrLdIdx(load_idx);
855 }
856
857 cprintf("Store queue size: %i\n", stores);
858 cprintf("Store queue: ");
859
860 int store_idx = storeHead;
861
862 while (store_idx != storeTail && storeQueue[store_idx].inst) {
863 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
864
865 incrStIdx(store_idx);
866 }
867
868 cprintf("\n");
869}
782LSQUnit<Impl>::completeStore(int store_idx)
783{
784 assert(storeQueue[store_idx].inst);
785 storeQueue[store_idx].completed = true;
786 --storesToWB;
787 // A bit conservative because a store completion may not free up entries,
788 // but hopefully avoids two store completions in one cycle from making
789 // the CPU tick twice.
790 cpu->activityThisCycle();
791
792 if (store_idx == storeHead) {
793 do {
794 incrStIdx(storeHead);
795
796 --stores;
797 } while (storeQueue[storeHead].completed &&
798 storeHead != storeTail);
799
800 iewStage->updateLSQNextCycle = true;
801 }
802
803 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
804 "idx:%i\n",
805 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
806
807 if (isStalled() &&
808 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
809 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
810 "load idx:%i\n",
811 stallingStoreIsn, stallingLoadIdx);
812 stalled = false;
813 stallingStoreIsn = 0;
814 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
815 }
816
817 storeQueue[store_idx].inst->setCompleted();
818
819 // Tell the checker we've completed this instruction. Some stores
820 // may get reported twice to the checker, but the checker can
821 // handle that case.
822 if (cpu->checker) {
823 cpu->checker->tick(storeQueue[store_idx].inst);
824 }
825}
826
827template <class Impl>
828inline void
829LSQUnit<Impl>::incrStIdx(int &store_idx)
830{
831 if (++store_idx >= SQEntries)
832 store_idx = 0;
833}
834
835template <class Impl>
836inline void
837LSQUnit<Impl>::decrStIdx(int &store_idx)
838{
839 if (--store_idx < 0)
840 store_idx += SQEntries;
841}
842
843template <class Impl>
844inline void
845LSQUnit<Impl>::incrLdIdx(int &load_idx)
846{
847 if (++load_idx >= LQEntries)
848 load_idx = 0;
849}
850
851template <class Impl>
852inline void
853LSQUnit<Impl>::decrLdIdx(int &load_idx)
854{
855 if (--load_idx < 0)
856 load_idx += LQEntries;
857}
858
859template <class Impl>
860void
861LSQUnit<Impl>::dumpInsts()
862{
863 cprintf("Load store queue: Dumping instructions.\n");
864 cprintf("Load queue size: %i\n", loads);
865 cprintf("Load queue: ");
866
867 int load_idx = loadHead;
868
869 while (load_idx != loadTail && loadQueue[load_idx]) {
870 cprintf("%#x ", loadQueue[load_idx]->readPC());
871
872 incrLdIdx(load_idx);
873 }
874
875 cprintf("Store queue size: %i\n", stores);
876 cprintf("Store queue: ");
877
878 int store_idx = storeHead;
879
880 while (store_idx != storeTail && storeQueue[store_idx].inst) {
881 cprintf("%#x ", storeQueue[store_idx].inst->readPC());
882
883 incrStIdx(store_idx);
884 }
885
886 cprintf("\n");
887}