lsq_unit_impl.hh (10023:91faf6649de0) lsq_unit_impl.hh (10030:b531e328342d)
1
2/*
1
2/*
3 * Copyright (c) 2010-2012 ARM Limited
3 * Copyright (c) 2010-2013 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
46#define __CPU_O3_LSQ_UNIT_IMPL_HH__
47
48#include "arch/generic/debugfaults.hh"
49#include "arch/locked_mem.hh"
50#include "base/str.hh"
51#include "config/the_isa.hh"
52#include "cpu/checker/cpu.hh"
53#include "cpu/o3/lsq.hh"
54#include "cpu/o3/lsq_unit.hh"
55#include "debug/Activity.hh"
56#include "debug/IEW.hh"
57#include "debug/LSQUnit.hh"
58#include "debug/O3PipeView.hh"
59#include "mem/packet.hh"
60#include "mem/request.hh"
61
62template<class Impl>
63LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
64 LSQUnit *lsq_ptr)
65 : Event(Default_Pri, AutoDelete),
66 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
67{
68}
69
70template<class Impl>
71void
72LSQUnit<Impl>::WritebackEvent::process()
73{
74 assert(!lsqPtr->cpu->switchedOut());
75
76 lsqPtr->writeback(inst, pkt);
77
78 if (pkt->senderState)
79 delete pkt->senderState;
80
81 delete pkt->req;
82 delete pkt;
83}
84
85template<class Impl>
86const char *
87LSQUnit<Impl>::WritebackEvent::description() const
88{
89 return "Store writeback";
90}
91
92template<class Impl>
93void
94LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
95{
96 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
97 DynInstPtr inst = state->inst;
98 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
99 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
100
101 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
102
103 // If this is a split access, wait until all packets are received.
104 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
105 delete pkt->req;
106 delete pkt;
107 return;
108 }
109
110 assert(!cpu->switchedOut());
111 if (inst->isSquashed()) {
112 iewStage->decrWb(inst->seqNum);
113 } else {
114 if (!state->noWB) {
115 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
116 !state->isLoad) {
117 writeback(inst, pkt);
118 } else {
119 writeback(inst, state->mainPkt);
120 }
121 }
122
123 if (inst->isStore()) {
124 completeStore(state->idx);
125 }
126 }
127
128 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
129 delete state->mainPkt->req;
130 delete state->mainPkt;
131 }
132
133 pkt->req->setAccessLatency();
134 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
135
136 delete state;
137 delete pkt->req;
138 delete pkt;
139}
140
141template <class Impl>
142LSQUnit<Impl>::LSQUnit()
143 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
144 isStoreBlocked(false), isLoadBlocked(false),
145 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
146{
147}
148
149template<class Impl>
150void
151LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
152 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
153 unsigned id)
154{
155 cpu = cpu_ptr;
156 iewStage = iew_ptr;
157
158 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
159
160 lsq = lsq_ptr;
161
162 lsqID = id;
163
164 // Add 1 for the sentinel entry (they are circular queues).
165 LQEntries = maxLQEntries + 1;
166 SQEntries = maxSQEntries + 1;
167
168 //Due to uint8_t index in LSQSenderState
169 assert(LQEntries <= 256);
170 assert(SQEntries <= 256);
171
172 loadQueue.resize(LQEntries);
173 storeQueue.resize(SQEntries);
174
175 depCheckShift = params->LSQDepCheckShift;
176 checkLoads = params->LSQCheckLoads;
177 cachePorts = params->cachePorts;
178 needsTSO = params->needsTSO;
179
180 resetState();
181}
182
183
184template<class Impl>
185void
186LSQUnit<Impl>::resetState()
187{
188 loads = stores = storesToWB = 0;
189
190 loadHead = loadTail = 0;
191
192 storeHead = storeWBIdx = storeTail = 0;
193
194 usedPorts = 0;
195
196 retryPkt = NULL;
197 memDepViolator = NULL;
198
199 blockedLoadSeqNum = 0;
200
201 stalled = false;
202 isLoadBlocked = false;
203 loadBlockedHandled = false;
204
205 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
206}
207
208template<class Impl>
209std::string
210LSQUnit<Impl>::name() const
211{
212 if (Impl::MaxThreads == 1) {
213 return iewStage->name() + ".lsq";
214 } else {
215 return iewStage->name() + ".lsq.thread" + to_string(lsqID);
216 }
217}
218
219template<class Impl>
220void
221LSQUnit<Impl>::regStats()
222{
223 lsqForwLoads
224 .name(name() + ".forwLoads")
225 .desc("Number of loads that had data forwarded from stores");
226
227 invAddrLoads
228 .name(name() + ".invAddrLoads")
229 .desc("Number of loads ignored due to an invalid address");
230
231 lsqSquashedLoads
232 .name(name() + ".squashedLoads")
233 .desc("Number of loads squashed");
234
235 lsqIgnoredResponses
236 .name(name() + ".ignoredResponses")
237 .desc("Number of memory responses ignored because the instruction is squashed");
238
239 lsqMemOrderViolation
240 .name(name() + ".memOrderViolation")
241 .desc("Number of memory ordering violations");
242
243 lsqSquashedStores
244 .name(name() + ".squashedStores")
245 .desc("Number of stores squashed");
246
247 invAddrSwpfs
248 .name(name() + ".invAddrSwpfs")
249 .desc("Number of software prefetches ignored due to an invalid address");
250
251 lsqBlockedLoads
252 .name(name() + ".blockedLoads")
253 .desc("Number of blocked loads due to partial load-store forwarding");
254
255 lsqRescheduledLoads
256 .name(name() + ".rescheduledLoads")
257 .desc("Number of loads that were rescheduled");
258
259 lsqCacheBlocked
260 .name(name() + ".cacheBlocked")
261 .desc("Number of times an access to memory failed due to the cache being blocked");
262}
263
264template<class Impl>
265void
266LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
267{
268 dcachePort = dcache_port;
269}
270
271template<class Impl>
272void
273LSQUnit<Impl>::clearLQ()
274{
275 loadQueue.clear();
276}
277
278template<class Impl>
279void
280LSQUnit<Impl>::clearSQ()
281{
282 storeQueue.clear();
283}
284
285template<class Impl>
286void
287LSQUnit<Impl>::drainSanityCheck() const
288{
289 for (int i = 0; i < loadQueue.size(); ++i)
290 assert(!loadQueue[i]);
291
292 assert(storesToWB == 0);
293 assert(!retryPkt);
294}
295
296template<class Impl>
297void
298LSQUnit<Impl>::takeOverFrom()
299{
300 resetState();
301}
302
303template<class Impl>
304void
305LSQUnit<Impl>::resizeLQ(unsigned size)
306{
307 unsigned size_plus_sentinel = size + 1;
308 assert(size_plus_sentinel >= LQEntries);
309
310 if (size_plus_sentinel > LQEntries) {
311 while (size_plus_sentinel > loadQueue.size()) {
312 DynInstPtr dummy;
313 loadQueue.push_back(dummy);
314 LQEntries++;
315 }
316 } else {
317 LQEntries = size_plus_sentinel;
318 }
319
320 assert(LQEntries <= 256);
321}
322
323template<class Impl>
324void
325LSQUnit<Impl>::resizeSQ(unsigned size)
326{
327 unsigned size_plus_sentinel = size + 1;
328 if (size_plus_sentinel > SQEntries) {
329 while (size_plus_sentinel > storeQueue.size()) {
330 SQEntry dummy;
331 storeQueue.push_back(dummy);
332 SQEntries++;
333 }
334 } else {
335 SQEntries = size_plus_sentinel;
336 }
337
338 assert(SQEntries <= 256);
339}
340
341template <class Impl>
342void
343LSQUnit<Impl>::insert(DynInstPtr &inst)
344{
345 assert(inst->isMemRef());
346
347 assert(inst->isLoad() || inst->isStore());
348
349 if (inst->isLoad()) {
350 insertLoad(inst);
351 } else {
352 insertStore(inst);
353 }
354
355 inst->setInLSQ();
356}
357
358template <class Impl>
359void
360LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
361{
362 assert((loadTail + 1) % LQEntries != loadHead);
363 assert(loads < LQEntries);
364
365 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
366 load_inst->pcState(), loadTail, load_inst->seqNum);
367
368 load_inst->lqIdx = loadTail;
369
370 if (stores == 0) {
371 load_inst->sqIdx = -1;
372 } else {
373 load_inst->sqIdx = storeTail;
374 }
375
376 loadQueue[loadTail] = load_inst;
377
378 incrLdIdx(loadTail);
379
380 ++loads;
381}
382
383template <class Impl>
384void
385LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
386{
387 // Make sure it is not full before inserting an instruction.
388 assert((storeTail + 1) % SQEntries != storeHead);
389 assert(stores < SQEntries);
390
391 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
392 store_inst->pcState(), storeTail, store_inst->seqNum);
393
394 store_inst->sqIdx = storeTail;
395 store_inst->lqIdx = loadTail;
396
397 storeQueue[storeTail] = SQEntry(store_inst);
398
399 incrStIdx(storeTail);
400
401 ++stores;
402}
403
404template <class Impl>
405typename Impl::DynInstPtr
406LSQUnit<Impl>::getMemDepViolator()
407{
408 DynInstPtr temp = memDepViolator;
409
410 memDepViolator = NULL;
411
412 return temp;
413}
414
415template <class Impl>
416unsigned
417LSQUnit<Impl>::numFreeEntries()
418{
419 unsigned free_lq_entries = LQEntries - loads;
420 unsigned free_sq_entries = SQEntries - stores;
421
422 // Both the LQ and SQ entries have an extra dummy entry to differentiate
423 // empty/full conditions. Subtract 1 from the free entries.
424 if (free_lq_entries < free_sq_entries) {
425 return free_lq_entries - 1;
426 } else {
427 return free_sq_entries - 1;
428 }
429}
430
431template <class Impl>
432void
433LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
434{
435 int load_idx = loadHead;
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
46#define __CPU_O3_LSQ_UNIT_IMPL_HH__
47
48#include "arch/generic/debugfaults.hh"
49#include "arch/locked_mem.hh"
50#include "base/str.hh"
51#include "config/the_isa.hh"
52#include "cpu/checker/cpu.hh"
53#include "cpu/o3/lsq.hh"
54#include "cpu/o3/lsq_unit.hh"
55#include "debug/Activity.hh"
56#include "debug/IEW.hh"
57#include "debug/LSQUnit.hh"
58#include "debug/O3PipeView.hh"
59#include "mem/packet.hh"
60#include "mem/request.hh"
61
62template<class Impl>
63LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
64 LSQUnit *lsq_ptr)
65 : Event(Default_Pri, AutoDelete),
66 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
67{
68}
69
70template<class Impl>
71void
72LSQUnit<Impl>::WritebackEvent::process()
73{
74 assert(!lsqPtr->cpu->switchedOut());
75
76 lsqPtr->writeback(inst, pkt);
77
78 if (pkt->senderState)
79 delete pkt->senderState;
80
81 delete pkt->req;
82 delete pkt;
83}
84
85template<class Impl>
86const char *
87LSQUnit<Impl>::WritebackEvent::description() const
88{
89 return "Store writeback";
90}
91
92template<class Impl>
93void
94LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
95{
96 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
97 DynInstPtr inst = state->inst;
98 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
99 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
100
101 //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
102
103 // If this is a split access, wait until all packets are received.
104 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
105 delete pkt->req;
106 delete pkt;
107 return;
108 }
109
110 assert(!cpu->switchedOut());
111 if (inst->isSquashed()) {
112 iewStage->decrWb(inst->seqNum);
113 } else {
114 if (!state->noWB) {
115 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
116 !state->isLoad) {
117 writeback(inst, pkt);
118 } else {
119 writeback(inst, state->mainPkt);
120 }
121 }
122
123 if (inst->isStore()) {
124 completeStore(state->idx);
125 }
126 }
127
128 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
129 delete state->mainPkt->req;
130 delete state->mainPkt;
131 }
132
133 pkt->req->setAccessLatency();
134 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
135
136 delete state;
137 delete pkt->req;
138 delete pkt;
139}
140
141template <class Impl>
142LSQUnit<Impl>::LSQUnit()
143 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
144 isStoreBlocked(false), isLoadBlocked(false),
145 loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
146{
147}
148
149template<class Impl>
150void
151LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
152 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
153 unsigned id)
154{
155 cpu = cpu_ptr;
156 iewStage = iew_ptr;
157
158 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
159
160 lsq = lsq_ptr;
161
162 lsqID = id;
163
164 // Add 1 for the sentinel entry (they are circular queues).
165 LQEntries = maxLQEntries + 1;
166 SQEntries = maxSQEntries + 1;
167
168 //Due to uint8_t index in LSQSenderState
169 assert(LQEntries <= 256);
170 assert(SQEntries <= 256);
171
172 loadQueue.resize(LQEntries);
173 storeQueue.resize(SQEntries);
174
175 depCheckShift = params->LSQDepCheckShift;
176 checkLoads = params->LSQCheckLoads;
177 cachePorts = params->cachePorts;
178 needsTSO = params->needsTSO;
179
180 resetState();
181}
182
183
184template<class Impl>
185void
186LSQUnit<Impl>::resetState()
187{
188 loads = stores = storesToWB = 0;
189
190 loadHead = loadTail = 0;
191
192 storeHead = storeWBIdx = storeTail = 0;
193
194 usedPorts = 0;
195
196 retryPkt = NULL;
197 memDepViolator = NULL;
198
199 blockedLoadSeqNum = 0;
200
201 stalled = false;
202 isLoadBlocked = false;
203 loadBlockedHandled = false;
204
205 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
206}
207
208template<class Impl>
209std::string
210LSQUnit<Impl>::name() const
211{
212 if (Impl::MaxThreads == 1) {
213 return iewStage->name() + ".lsq";
214 } else {
215 return iewStage->name() + ".lsq.thread" + to_string(lsqID);
216 }
217}
218
219template<class Impl>
220void
221LSQUnit<Impl>::regStats()
222{
223 lsqForwLoads
224 .name(name() + ".forwLoads")
225 .desc("Number of loads that had data forwarded from stores");
226
227 invAddrLoads
228 .name(name() + ".invAddrLoads")
229 .desc("Number of loads ignored due to an invalid address");
230
231 lsqSquashedLoads
232 .name(name() + ".squashedLoads")
233 .desc("Number of loads squashed");
234
235 lsqIgnoredResponses
236 .name(name() + ".ignoredResponses")
237 .desc("Number of memory responses ignored because the instruction is squashed");
238
239 lsqMemOrderViolation
240 .name(name() + ".memOrderViolation")
241 .desc("Number of memory ordering violations");
242
243 lsqSquashedStores
244 .name(name() + ".squashedStores")
245 .desc("Number of stores squashed");
246
247 invAddrSwpfs
248 .name(name() + ".invAddrSwpfs")
249 .desc("Number of software prefetches ignored due to an invalid address");
250
251 lsqBlockedLoads
252 .name(name() + ".blockedLoads")
253 .desc("Number of blocked loads due to partial load-store forwarding");
254
255 lsqRescheduledLoads
256 .name(name() + ".rescheduledLoads")
257 .desc("Number of loads that were rescheduled");
258
259 lsqCacheBlocked
260 .name(name() + ".cacheBlocked")
261 .desc("Number of times an access to memory failed due to the cache being blocked");
262}
263
264template<class Impl>
265void
266LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
267{
268 dcachePort = dcache_port;
269}
270
271template<class Impl>
272void
273LSQUnit<Impl>::clearLQ()
274{
275 loadQueue.clear();
276}
277
278template<class Impl>
279void
280LSQUnit<Impl>::clearSQ()
281{
282 storeQueue.clear();
283}
284
285template<class Impl>
286void
287LSQUnit<Impl>::drainSanityCheck() const
288{
289 for (int i = 0; i < loadQueue.size(); ++i)
290 assert(!loadQueue[i]);
291
292 assert(storesToWB == 0);
293 assert(!retryPkt);
294}
295
296template<class Impl>
297void
298LSQUnit<Impl>::takeOverFrom()
299{
300 resetState();
301}
302
303template<class Impl>
304void
305LSQUnit<Impl>::resizeLQ(unsigned size)
306{
307 unsigned size_plus_sentinel = size + 1;
308 assert(size_plus_sentinel >= LQEntries);
309
310 if (size_plus_sentinel > LQEntries) {
311 while (size_plus_sentinel > loadQueue.size()) {
312 DynInstPtr dummy;
313 loadQueue.push_back(dummy);
314 LQEntries++;
315 }
316 } else {
317 LQEntries = size_plus_sentinel;
318 }
319
320 assert(LQEntries <= 256);
321}
322
323template<class Impl>
324void
325LSQUnit<Impl>::resizeSQ(unsigned size)
326{
327 unsigned size_plus_sentinel = size + 1;
328 if (size_plus_sentinel > SQEntries) {
329 while (size_plus_sentinel > storeQueue.size()) {
330 SQEntry dummy;
331 storeQueue.push_back(dummy);
332 SQEntries++;
333 }
334 } else {
335 SQEntries = size_plus_sentinel;
336 }
337
338 assert(SQEntries <= 256);
339}
340
341template <class Impl>
342void
343LSQUnit<Impl>::insert(DynInstPtr &inst)
344{
345 assert(inst->isMemRef());
346
347 assert(inst->isLoad() || inst->isStore());
348
349 if (inst->isLoad()) {
350 insertLoad(inst);
351 } else {
352 insertStore(inst);
353 }
354
355 inst->setInLSQ();
356}
357
358template <class Impl>
359void
360LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
361{
362 assert((loadTail + 1) % LQEntries != loadHead);
363 assert(loads < LQEntries);
364
365 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
366 load_inst->pcState(), loadTail, load_inst->seqNum);
367
368 load_inst->lqIdx = loadTail;
369
370 if (stores == 0) {
371 load_inst->sqIdx = -1;
372 } else {
373 load_inst->sqIdx = storeTail;
374 }
375
376 loadQueue[loadTail] = load_inst;
377
378 incrLdIdx(loadTail);
379
380 ++loads;
381}
382
383template <class Impl>
384void
385LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
386{
387 // Make sure it is not full before inserting an instruction.
388 assert((storeTail + 1) % SQEntries != storeHead);
389 assert(stores < SQEntries);
390
391 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
392 store_inst->pcState(), storeTail, store_inst->seqNum);
393
394 store_inst->sqIdx = storeTail;
395 store_inst->lqIdx = loadTail;
396
397 storeQueue[storeTail] = SQEntry(store_inst);
398
399 incrStIdx(storeTail);
400
401 ++stores;
402}
403
404template <class Impl>
405typename Impl::DynInstPtr
406LSQUnit<Impl>::getMemDepViolator()
407{
408 DynInstPtr temp = memDepViolator;
409
410 memDepViolator = NULL;
411
412 return temp;
413}
414
415template <class Impl>
416unsigned
417LSQUnit<Impl>::numFreeEntries()
418{
419 unsigned free_lq_entries = LQEntries - loads;
420 unsigned free_sq_entries = SQEntries - stores;
421
422 // Both the LQ and SQ entries have an extra dummy entry to differentiate
423 // empty/full conditions. Subtract 1 from the free entries.
424 if (free_lq_entries < free_sq_entries) {
425 return free_lq_entries - 1;
426 } else {
427 return free_sq_entries - 1;
428 }
429}
430
431template <class Impl>
432void
433LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
434{
435 int load_idx = loadHead;
436 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
436
437 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
438 // address. The CPU can speculatively execute a LL operation after a pending
439 // SC operation in the pipeline and that can make the cache monitor the CPU
440 // is connected to valid while it really shouldn't be.
437
438 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
439 // address. The CPU can speculatively execute a LL operation after a pending
440 // SC operation in the pipeline and that can make the cache monitor the CPU
441 // is connected to valid while it really shouldn't be.
441 for (int x = 0; x < cpu->numActiveThreads(); x++) {
442 for (int x = 0; x < cpu->numContexts(); x++) {
442 ThreadContext *tc = cpu->getContext(x);
443 bool no_squash = cpu->thread[x]->noSquashFromTC;
444 cpu->thread[x]->noSquashFromTC = true;
445 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
446 cpu->thread[x]->noSquashFromTC = no_squash;
447 }
448
443 ThreadContext *tc = cpu->getContext(x);
444 bool no_squash = cpu->thread[x]->noSquashFromTC;
445 cpu->thread[x]->noSquashFromTC = true;
446 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
447 cpu->thread[x]->noSquashFromTC = no_squash;
448 }
449
450 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
451
452 DynInstPtr ld_inst = loadQueue[load_idx];
453 if (ld_inst) {
454 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
455 // Check that this snoop didn't just invalidate our lock flag
456 if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
457 ld_inst->memReqFlags & Request::LLSC)
458 TheISA::handleLockedSnoopHit(ld_inst.get());
459 }
460
449 // If this is the only load in the LSQ we don't care
450 if (load_idx == loadTail)
451 return;
461 // If this is the only load in the LSQ we don't care
462 if (load_idx == loadTail)
463 return;
464
452 incrLdIdx(load_idx);
453
465 incrLdIdx(load_idx);
466
454 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
455 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
456 while (load_idx != loadTail) {
457 DynInstPtr ld_inst = loadQueue[load_idx];
458
459 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
460 incrLdIdx(load_idx);
461 continue;
462 }
463
464 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
465 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
466 ld_inst->seqNum, load_addr, invalidate_addr);
467
468 if (load_addr == invalidate_addr) {
469 if (ld_inst->possibleLoadViolation()) {
470 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
467 while (load_idx != loadTail) {
468 DynInstPtr ld_inst = loadQueue[load_idx];
469
470 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
471 incrLdIdx(load_idx);
472 continue;
473 }
474
475 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
476 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
477 ld_inst->seqNum, load_addr, invalidate_addr);
478
479 if (load_addr == invalidate_addr) {
480 if (ld_inst->possibleLoadViolation()) {
481 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
471 ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum);
482 pkt->getAddr(), ld_inst->seqNum);
472
473 // Mark the load for re-execution
474 ld_inst->fault = new ReExec;
475 } else {
483
484 // Mark the load for re-execution
485 ld_inst->fault = new ReExec;
486 } else {
487 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
488 pkt->getAddr(), ld_inst->seqNum);
489
490 // Make sure that we don't lose a snoop hitting a LOCKED
491 // address since the LOCK* flags don't get updated until
492 // commit.
493 if (ld_inst->memReqFlags & Request::LLSC)
494 TheISA::handleLockedSnoopHit(ld_inst.get());
495
476 // If a older load checks this and it's true
477 // then we might have missed the snoop
478 // in which case we need to invalidate to be sure
479 ld_inst->hitExternalSnoop(true);
480 }
481 }
482 incrLdIdx(load_idx);
483 }
484 return;
485}
486
487template <class Impl>
488Fault
489LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
490{
491 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
492 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
493
494 /** @todo in theory you only need to check an instruction that has executed
495 * however, there isn't a good way in the pipeline at the moment to check
496 * all instructions that will execute before the store writes back. Thus,
497 * like the implementation that came before it, we're overly conservative.
498 */
499 while (load_idx != loadTail) {
500 DynInstPtr ld_inst = loadQueue[load_idx];
501 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
502 incrLdIdx(load_idx);
503 continue;
504 }
505
506 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
507 Addr ld_eff_addr2 =
508 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
509
510 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
511 if (inst->isLoad()) {
512 // If this load is to the same block as an external snoop
513 // invalidate that we've observed then the load needs to be
514 // squashed as it could have newer data
515 if (ld_inst->hitExternalSnoop()) {
516 if (!memDepViolator ||
517 ld_inst->seqNum < memDepViolator->seqNum) {
518 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
519 "and [sn:%lli] at address %#x\n",
520 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
521 memDepViolator = ld_inst;
522
523 ++lsqMemOrderViolation;
524
525 return new GenericISA::M5PanicFault(
526 "Detected fault with inst [sn:%lli] and "
527 "[sn:%lli] at address %#x\n",
528 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
529 }
530 }
531
532 // Otherwise, mark the load has a possible load violation
533 // and if we see a snoop before it's commited, we need to squash
534 ld_inst->possibleLoadViolation(true);
535 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
536 " between instructions [sn:%lli] and [sn:%lli]\n",
537 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
538 } else {
539 // A load/store incorrectly passed this store.
540 // Check if we already have a violator, or if it's newer
541 // squash and refetch.
542 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
543 break;
544
545 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
546 "[sn:%lli] at address %#x\n",
547 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
548 memDepViolator = ld_inst;
549
550 ++lsqMemOrderViolation;
551
552 return new GenericISA::M5PanicFault("Detected fault with "
553 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
554 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
555 }
556 }
557
558 incrLdIdx(load_idx);
559 }
560 return NoFault;
561}
562
563
564
565
566template <class Impl>
567Fault
568LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
569{
570 using namespace TheISA;
571 // Execute a specific load.
572 Fault load_fault = NoFault;
573
574 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
575 inst->pcState(), inst->seqNum);
576
577 assert(!inst->isSquashed());
578
579 load_fault = inst->initiateAcc();
580
581 if (inst->isTranslationDelayed() &&
582 load_fault == NoFault)
583 return load_fault;
584
585 // If the instruction faulted or predicated false, then we need to send it
586 // along to commit without the instruction completing.
587 if (load_fault != NoFault || inst->readPredicate() == false) {
588 // Send this instruction to commit, also make sure iew stage
589 // realizes there is activity.
590 // Mark it as executed unless it is an uncached load that
591 // needs to hit the head of commit.
592 if (inst->readPredicate() == false)
593 inst->forwardOldRegs();
594 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
595 inst->seqNum,
596 (load_fault != NoFault ? "fault" : "predication"));
597 if (!(inst->hasRequest() && inst->uncacheable()) ||
598 inst->isAtCommit()) {
599 inst->setExecuted();
600 }
601 iewStage->instToCommit(inst);
602 iewStage->activityThisCycle();
603 } else if (!loadBlocked()) {
604 assert(inst->effAddrValid());
605 int load_idx = inst->lqIdx;
606 incrLdIdx(load_idx);
607
608 if (checkLoads)
609 return checkViolations(load_idx, inst);
610 }
611
612 return load_fault;
613}
614
615template <class Impl>
616Fault
617LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
618{
619 using namespace TheISA;
620 // Make sure that a store exists.
621 assert(stores != 0);
622
623 int store_idx = store_inst->sqIdx;
624
625 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
626 store_inst->pcState(), store_inst->seqNum);
627
628 assert(!store_inst->isSquashed());
629
630 // Check the recently completed loads to see if any match this store's
631 // address. If so, then we have a memory ordering violation.
632 int load_idx = store_inst->lqIdx;
633
634 Fault store_fault = store_inst->initiateAcc();
635
636 if (store_inst->isTranslationDelayed() &&
637 store_fault == NoFault)
638 return store_fault;
639
640 if (store_inst->readPredicate() == false)
641 store_inst->forwardOldRegs();
642
643 if (storeQueue[store_idx].size == 0) {
644 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
645 store_inst->pcState(), store_inst->seqNum);
646
647 return store_fault;
648 } else if (store_inst->readPredicate() == false) {
649 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
650 store_inst->seqNum);
651 return store_fault;
652 }
653
654 assert(store_fault == NoFault);
655
656 if (store_inst->isStoreConditional()) {
657 // Store conditionals need to set themselves as able to
658 // writeback if we haven't had a fault by here.
659 storeQueue[store_idx].canWB = true;
660
661 ++storesToWB;
662 }
663
664 return checkViolations(load_idx, store_inst);
665
666}
667
668template <class Impl>
669void
670LSQUnit<Impl>::commitLoad()
671{
672 assert(loadQueue[loadHead]);
673
674 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
675 loadQueue[loadHead]->pcState());
676
677 loadQueue[loadHead] = NULL;
678
679 incrLdIdx(loadHead);
680
681 --loads;
682}
683
684template <class Impl>
685void
686LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
687{
688 assert(loads == 0 || loadQueue[loadHead]);
689
690 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
691 commitLoad();
692 }
693}
694
695template <class Impl>
696void
697LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
698{
699 assert(stores == 0 || storeQueue[storeHead].inst);
700
701 int store_idx = storeHead;
702
703 while (store_idx != storeTail) {
704 assert(storeQueue[store_idx].inst);
705 // Mark any stores that are now committed and have not yet
706 // been marked as able to write back.
707 if (!storeQueue[store_idx].canWB) {
708 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
709 break;
710 }
711 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
712 "%s [sn:%lli]\n",
713 storeQueue[store_idx].inst->pcState(),
714 storeQueue[store_idx].inst->seqNum);
715
716 storeQueue[store_idx].canWB = true;
717
718 ++storesToWB;
719 }
720
721 incrStIdx(store_idx);
722 }
723}
724
725template <class Impl>
726void
727LSQUnit<Impl>::writebackPendingStore()
728{
729 if (hasPendingPkt) {
730 assert(pendingPkt != NULL);
731
732 // If the cache is blocked, this will store the packet for retry.
733 if (sendStore(pendingPkt)) {
734 storePostSend(pendingPkt);
735 }
736 pendingPkt = NULL;
737 hasPendingPkt = false;
738 }
739}
740
741template <class Impl>
742void
743LSQUnit<Impl>::writebackStores()
744{
745 // First writeback the second packet from any split store that didn't
746 // complete last cycle because there weren't enough cache ports available.
747 if (TheISA::HasUnalignedMemAcc) {
748 writebackPendingStore();
749 }
750
751 while (storesToWB > 0 &&
752 storeWBIdx != storeTail &&
753 storeQueue[storeWBIdx].inst &&
754 storeQueue[storeWBIdx].canWB &&
755 ((!needsTSO) || (!storeInFlight)) &&
756 usedPorts < cachePorts) {
757
758 if (isStoreBlocked || lsq->cacheBlocked()) {
759 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
760 " is blocked!\n");
761 break;
762 }
763
764 // Store didn't write any data so no need to write it back to
765 // memory.
766 if (storeQueue[storeWBIdx].size == 0) {
767 completeStore(storeWBIdx);
768
769 incrStIdx(storeWBIdx);
770
771 continue;
772 }
773
774 ++usedPorts;
775
776 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
777 incrStIdx(storeWBIdx);
778
779 continue;
780 }
781
782 assert(storeQueue[storeWBIdx].req);
783 assert(!storeQueue[storeWBIdx].committed);
784
785 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
786 assert(storeQueue[storeWBIdx].sreqLow);
787 assert(storeQueue[storeWBIdx].sreqHigh);
788 }
789
790 DynInstPtr inst = storeQueue[storeWBIdx].inst;
791
792 Request *req = storeQueue[storeWBIdx].req;
793 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
794 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
795
796 storeQueue[storeWBIdx].committed = true;
797
798 assert(!inst->memData);
799 inst->memData = new uint8_t[64];
800
801 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
802
803 MemCmd command =
804 req->isSwap() ? MemCmd::SwapReq :
805 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
806 PacketPtr data_pkt;
807 PacketPtr snd_data_pkt = NULL;
808
809 LSQSenderState *state = new LSQSenderState;
810 state->isLoad = false;
811 state->idx = storeWBIdx;
812 state->inst = inst;
813
814 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
815
816 // Build a single data packet if the store isn't split.
817 data_pkt = new Packet(req, command);
818 data_pkt->dataStatic(inst->memData);
819 data_pkt->senderState = state;
820 } else {
821 // Create two packets if the store is split in two.
822 data_pkt = new Packet(sreqLow, command);
823 snd_data_pkt = new Packet(sreqHigh, command);
824
825 data_pkt->dataStatic(inst->memData);
826 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
827
828 data_pkt->senderState = state;
829 snd_data_pkt->senderState = state;
830
831 state->isSplit = true;
832 state->outstanding = 2;
833
834 // Can delete the main request now.
835 delete req;
836 req = sreqLow;
837 }
838
839 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
840 "to Addr:%#x, data:%#x [sn:%lli]\n",
841 storeWBIdx, inst->pcState(),
842 req->getPaddr(), (int)*(inst->memData),
843 inst->seqNum);
844
845 // @todo: Remove this SC hack once the memory system handles it.
846 if (inst->isStoreConditional()) {
847 assert(!storeQueue[storeWBIdx].isSplit);
848 // Disable recording the result temporarily. Writing to
849 // misc regs normally updates the result, but this is not
850 // the desired behavior when handling store conditionals.
851 inst->recordResult(false);
496 // If a older load checks this and it's true
497 // then we might have missed the snoop
498 // in which case we need to invalidate to be sure
499 ld_inst->hitExternalSnoop(true);
500 }
501 }
502 incrLdIdx(load_idx);
503 }
504 return;
505}
506
507template <class Impl>
508Fault
509LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
510{
511 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
512 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
513
514 /** @todo in theory you only need to check an instruction that has executed
515 * however, there isn't a good way in the pipeline at the moment to check
516 * all instructions that will execute before the store writes back. Thus,
517 * like the implementation that came before it, we're overly conservative.
518 */
519 while (load_idx != loadTail) {
520 DynInstPtr ld_inst = loadQueue[load_idx];
521 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
522 incrLdIdx(load_idx);
523 continue;
524 }
525
526 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
527 Addr ld_eff_addr2 =
528 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
529
530 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
531 if (inst->isLoad()) {
532 // If this load is to the same block as an external snoop
533 // invalidate that we've observed then the load needs to be
534 // squashed as it could have newer data
535 if (ld_inst->hitExternalSnoop()) {
536 if (!memDepViolator ||
537 ld_inst->seqNum < memDepViolator->seqNum) {
538 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
539 "and [sn:%lli] at address %#x\n",
540 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
541 memDepViolator = ld_inst;
542
543 ++lsqMemOrderViolation;
544
545 return new GenericISA::M5PanicFault(
546 "Detected fault with inst [sn:%lli] and "
547 "[sn:%lli] at address %#x\n",
548 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
549 }
550 }
551
552 // Otherwise, mark the load has a possible load violation
553 // and if we see a snoop before it's commited, we need to squash
554 ld_inst->possibleLoadViolation(true);
555 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
556 " between instructions [sn:%lli] and [sn:%lli]\n",
557 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
558 } else {
559 // A load/store incorrectly passed this store.
560 // Check if we already have a violator, or if it's newer
561 // squash and refetch.
562 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
563 break;
564
565 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
566 "[sn:%lli] at address %#x\n",
567 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
568 memDepViolator = ld_inst;
569
570 ++lsqMemOrderViolation;
571
572 return new GenericISA::M5PanicFault("Detected fault with "
573 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
574 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
575 }
576 }
577
578 incrLdIdx(load_idx);
579 }
580 return NoFault;
581}
582
583
584
585
586template <class Impl>
587Fault
588LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
589{
590 using namespace TheISA;
591 // Execute a specific load.
592 Fault load_fault = NoFault;
593
594 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
595 inst->pcState(), inst->seqNum);
596
597 assert(!inst->isSquashed());
598
599 load_fault = inst->initiateAcc();
600
601 if (inst->isTranslationDelayed() &&
602 load_fault == NoFault)
603 return load_fault;
604
605 // If the instruction faulted or predicated false, then we need to send it
606 // along to commit without the instruction completing.
607 if (load_fault != NoFault || inst->readPredicate() == false) {
608 // Send this instruction to commit, also make sure iew stage
609 // realizes there is activity.
610 // Mark it as executed unless it is an uncached load that
611 // needs to hit the head of commit.
612 if (inst->readPredicate() == false)
613 inst->forwardOldRegs();
614 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
615 inst->seqNum,
616 (load_fault != NoFault ? "fault" : "predication"));
617 if (!(inst->hasRequest() && inst->uncacheable()) ||
618 inst->isAtCommit()) {
619 inst->setExecuted();
620 }
621 iewStage->instToCommit(inst);
622 iewStage->activityThisCycle();
623 } else if (!loadBlocked()) {
624 assert(inst->effAddrValid());
625 int load_idx = inst->lqIdx;
626 incrLdIdx(load_idx);
627
628 if (checkLoads)
629 return checkViolations(load_idx, inst);
630 }
631
632 return load_fault;
633}
634
635template <class Impl>
636Fault
637LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
638{
639 using namespace TheISA;
640 // Make sure that a store exists.
641 assert(stores != 0);
642
643 int store_idx = store_inst->sqIdx;
644
645 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
646 store_inst->pcState(), store_inst->seqNum);
647
648 assert(!store_inst->isSquashed());
649
650 // Check the recently completed loads to see if any match this store's
651 // address. If so, then we have a memory ordering violation.
652 int load_idx = store_inst->lqIdx;
653
654 Fault store_fault = store_inst->initiateAcc();
655
656 if (store_inst->isTranslationDelayed() &&
657 store_fault == NoFault)
658 return store_fault;
659
660 if (store_inst->readPredicate() == false)
661 store_inst->forwardOldRegs();
662
663 if (storeQueue[store_idx].size == 0) {
664 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
665 store_inst->pcState(), store_inst->seqNum);
666
667 return store_fault;
668 } else if (store_inst->readPredicate() == false) {
669 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
670 store_inst->seqNum);
671 return store_fault;
672 }
673
674 assert(store_fault == NoFault);
675
676 if (store_inst->isStoreConditional()) {
677 // Store conditionals need to set themselves as able to
678 // writeback if we haven't had a fault by here.
679 storeQueue[store_idx].canWB = true;
680
681 ++storesToWB;
682 }
683
684 return checkViolations(load_idx, store_inst);
685
686}
687
688template <class Impl>
689void
690LSQUnit<Impl>::commitLoad()
691{
692 assert(loadQueue[loadHead]);
693
694 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
695 loadQueue[loadHead]->pcState());
696
697 loadQueue[loadHead] = NULL;
698
699 incrLdIdx(loadHead);
700
701 --loads;
702}
703
704template <class Impl>
705void
706LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
707{
708 assert(loads == 0 || loadQueue[loadHead]);
709
710 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
711 commitLoad();
712 }
713}
714
715template <class Impl>
716void
717LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
718{
719 assert(stores == 0 || storeQueue[storeHead].inst);
720
721 int store_idx = storeHead;
722
723 while (store_idx != storeTail) {
724 assert(storeQueue[store_idx].inst);
725 // Mark any stores that are now committed and have not yet
726 // been marked as able to write back.
727 if (!storeQueue[store_idx].canWB) {
728 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
729 break;
730 }
731 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
732 "%s [sn:%lli]\n",
733 storeQueue[store_idx].inst->pcState(),
734 storeQueue[store_idx].inst->seqNum);
735
736 storeQueue[store_idx].canWB = true;
737
738 ++storesToWB;
739 }
740
741 incrStIdx(store_idx);
742 }
743}
744
745template <class Impl>
746void
747LSQUnit<Impl>::writebackPendingStore()
748{
749 if (hasPendingPkt) {
750 assert(pendingPkt != NULL);
751
752 // If the cache is blocked, this will store the packet for retry.
753 if (sendStore(pendingPkt)) {
754 storePostSend(pendingPkt);
755 }
756 pendingPkt = NULL;
757 hasPendingPkt = false;
758 }
759}
760
761template <class Impl>
762void
763LSQUnit<Impl>::writebackStores()
764{
765 // First writeback the second packet from any split store that didn't
766 // complete last cycle because there weren't enough cache ports available.
767 if (TheISA::HasUnalignedMemAcc) {
768 writebackPendingStore();
769 }
770
771 while (storesToWB > 0 &&
772 storeWBIdx != storeTail &&
773 storeQueue[storeWBIdx].inst &&
774 storeQueue[storeWBIdx].canWB &&
775 ((!needsTSO) || (!storeInFlight)) &&
776 usedPorts < cachePorts) {
777
778 if (isStoreBlocked || lsq->cacheBlocked()) {
779 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
780 " is blocked!\n");
781 break;
782 }
783
784 // Store didn't write any data so no need to write it back to
785 // memory.
786 if (storeQueue[storeWBIdx].size == 0) {
787 completeStore(storeWBIdx);
788
789 incrStIdx(storeWBIdx);
790
791 continue;
792 }
793
794 ++usedPorts;
795
796 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
797 incrStIdx(storeWBIdx);
798
799 continue;
800 }
801
802 assert(storeQueue[storeWBIdx].req);
803 assert(!storeQueue[storeWBIdx].committed);
804
805 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
806 assert(storeQueue[storeWBIdx].sreqLow);
807 assert(storeQueue[storeWBIdx].sreqHigh);
808 }
809
810 DynInstPtr inst = storeQueue[storeWBIdx].inst;
811
812 Request *req = storeQueue[storeWBIdx].req;
813 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
814 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
815
816 storeQueue[storeWBIdx].committed = true;
817
818 assert(!inst->memData);
819 inst->memData = new uint8_t[64];
820
821 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
822
823 MemCmd command =
824 req->isSwap() ? MemCmd::SwapReq :
825 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
826 PacketPtr data_pkt;
827 PacketPtr snd_data_pkt = NULL;
828
829 LSQSenderState *state = new LSQSenderState;
830 state->isLoad = false;
831 state->idx = storeWBIdx;
832 state->inst = inst;
833
834 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
835
836 // Build a single data packet if the store isn't split.
837 data_pkt = new Packet(req, command);
838 data_pkt->dataStatic(inst->memData);
839 data_pkt->senderState = state;
840 } else {
841 // Create two packets if the store is split in two.
842 data_pkt = new Packet(sreqLow, command);
843 snd_data_pkt = new Packet(sreqHigh, command);
844
845 data_pkt->dataStatic(inst->memData);
846 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
847
848 data_pkt->senderState = state;
849 snd_data_pkt->senderState = state;
850
851 state->isSplit = true;
852 state->outstanding = 2;
853
854 // Can delete the main request now.
855 delete req;
856 req = sreqLow;
857 }
858
859 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
860 "to Addr:%#x, data:%#x [sn:%lli]\n",
861 storeWBIdx, inst->pcState(),
862 req->getPaddr(), (int)*(inst->memData),
863 inst->seqNum);
864
865 // @todo: Remove this SC hack once the memory system handles it.
866 if (inst->isStoreConditional()) {
867 assert(!storeQueue[storeWBIdx].isSplit);
868 // Disable recording the result temporarily. Writing to
869 // misc regs normally updates the result, but this is not
870 // the desired behavior when handling store conditionals.
871 inst->recordResult(false);
852 bool success = TheISA::handleLockedWrite(inst.get(), req);
872 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
853 inst->recordResult(true);
854
855 if (!success) {
856 // Instantly complete this store.
857 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
858 "Instantly completing it.\n",
859 inst->seqNum);
860 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
861 cpu->schedule(wb, curTick() + 1);
862 if (cpu->checker) {
863 // Make sure to set the LLSC data for verification
864 // if checker is loaded
865 inst->reqToVerify->setExtraData(0);
866 inst->completeAcc(data_pkt);
867 }
868 completeStore(storeWBIdx);
869 incrStIdx(storeWBIdx);
870 continue;
871 }
872 } else {
873 // Non-store conditionals do not need a writeback.
874 state->noWB = true;
875 }
876
877 bool split =
878 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
879
880 ThreadContext *thread = cpu->tcBase(lsqID);
881
882 if (req->isMmappedIpr()) {
883 assert(!inst->isStoreConditional());
884 TheISA::handleIprWrite(thread, data_pkt);
885 delete data_pkt;
886 if (split) {
887 assert(snd_data_pkt->req->isMmappedIpr());
888 TheISA::handleIprWrite(thread, snd_data_pkt);
889 delete snd_data_pkt;
890 delete sreqLow;
891 delete sreqHigh;
892 }
893 delete state;
894 delete req;
895 completeStore(storeWBIdx);
896 incrStIdx(storeWBIdx);
897 } else if (!sendStore(data_pkt)) {
898 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
899 "retry later\n",
900 inst->seqNum);
901
902 // Need to store the second packet, if split.
903 if (split) {
904 state->pktToSend = true;
905 state->pendingPacket = snd_data_pkt;
906 }
907 } else {
908
909 // If split, try to send the second packet too
910 if (split) {
911 assert(snd_data_pkt);
912
913 // Ensure there are enough ports to use.
914 if (usedPorts < cachePorts) {
915 ++usedPorts;
916 if (sendStore(snd_data_pkt)) {
917 storePostSend(snd_data_pkt);
918 } else {
919 DPRINTF(IEW, "D-Cache became blocked when writing"
920 " [sn:%lli] second packet, will retry later\n",
921 inst->seqNum);
922 }
923 } else {
924
925 // Store the packet for when there's free ports.
926 assert(pendingPkt == NULL);
927 pendingPkt = snd_data_pkt;
928 hasPendingPkt = true;
929 }
930 } else {
931
932 // Not a split store.
933 storePostSend(data_pkt);
934 }
935 }
936 }
937
938 // Not sure this should set it to 0.
939 usedPorts = 0;
940
941 assert(stores >= 0 && storesToWB >= 0);
942}
943
944/*template <class Impl>
945void
946LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
947{
948 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
949 mshrSeqNums.end(),
950 seqNum);
951
952 if (mshr_it != mshrSeqNums.end()) {
953 mshrSeqNums.erase(mshr_it);
954 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
955 }
956}*/
957
958template <class Impl>
959void
960LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
961{
962 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
963 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
964
965 int load_idx = loadTail;
966 decrLdIdx(load_idx);
967
968 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
969 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
970 "[sn:%lli]\n",
971 loadQueue[load_idx]->pcState(),
972 loadQueue[load_idx]->seqNum);
973
974 if (isStalled() && load_idx == stallingLoadIdx) {
975 stalled = false;
976 stallingStoreIsn = 0;
977 stallingLoadIdx = 0;
978 }
979
980 // Clear the smart pointer to make sure it is decremented.
981 loadQueue[load_idx]->setSquashed();
982 loadQueue[load_idx] = NULL;
983 --loads;
984
985 // Inefficient!
986 loadTail = load_idx;
987
988 decrLdIdx(load_idx);
989 ++lsqSquashedLoads;
990 }
991
992 if (isLoadBlocked) {
993 if (squashed_num < blockedLoadSeqNum) {
994 isLoadBlocked = false;
995 loadBlockedHandled = false;
996 blockedLoadSeqNum = 0;
997 }
998 }
999
1000 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1001 memDepViolator = NULL;
1002 }
1003
1004 int store_idx = storeTail;
1005 decrStIdx(store_idx);
1006
1007 while (stores != 0 &&
1008 storeQueue[store_idx].inst->seqNum > squashed_num) {
1009 // Instructions marked as can WB are already committed.
1010 if (storeQueue[store_idx].canWB) {
1011 break;
1012 }
1013
1014 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1015 "idx:%i [sn:%lli]\n",
1016 storeQueue[store_idx].inst->pcState(),
1017 store_idx, storeQueue[store_idx].inst->seqNum);
1018
1019 // I don't think this can happen. It should have been cleared
1020 // by the stalling load.
1021 if (isStalled() &&
1022 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1023 panic("Is stalled should have been cleared by stalling load!\n");
1024 stalled = false;
1025 stallingStoreIsn = 0;
1026 }
1027
1028 // Clear the smart pointer to make sure it is decremented.
1029 storeQueue[store_idx].inst->setSquashed();
1030 storeQueue[store_idx].inst = NULL;
1031 storeQueue[store_idx].canWB = 0;
1032
1033 // Must delete request now that it wasn't handed off to
1034 // memory. This is quite ugly. @todo: Figure out the proper
1035 // place to really handle request deletes.
1036 delete storeQueue[store_idx].req;
1037 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1038 delete storeQueue[store_idx].sreqLow;
1039 delete storeQueue[store_idx].sreqHigh;
1040
1041 storeQueue[store_idx].sreqLow = NULL;
1042 storeQueue[store_idx].sreqHigh = NULL;
1043 }
1044
1045 storeQueue[store_idx].req = NULL;
1046 --stores;
1047
1048 // Inefficient!
1049 storeTail = store_idx;
1050
1051 decrStIdx(store_idx);
1052 ++lsqSquashedStores;
1053 }
1054}
1055
1056template <class Impl>
1057void
1058LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1059{
1060 if (isStalled() &&
1061 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1062 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1063 "load idx:%i\n",
1064 stallingStoreIsn, stallingLoadIdx);
1065 stalled = false;
1066 stallingStoreIsn = 0;
1067 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1068 }
1069
1070 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1071 // The store is basically completed at this time. This
1072 // only works so long as the checker doesn't try to
1073 // verify the value in memory for stores.
1074 storeQueue[storeWBIdx].inst->setCompleted();
1075
1076 if (cpu->checker) {
1077 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1078 }
1079 }
1080
1081 if (needsTSO) {
1082 storeInFlight = true;
1083 }
1084
1085 incrStIdx(storeWBIdx);
1086}
1087
1088template <class Impl>
1089void
1090LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1091{
1092 iewStage->wakeCPU();
1093
1094 // Squashed instructions do not need to complete their access.
1095 if (inst->isSquashed()) {
1096 iewStage->decrWb(inst->seqNum);
1097 assert(!inst->isStore());
1098 ++lsqIgnoredResponses;
1099 return;
1100 }
1101
1102 if (!inst->isExecuted()) {
1103 inst->setExecuted();
1104
1105 // Complete access to copy data to proper place.
1106 inst->completeAcc(pkt);
1107 }
1108
1109 // Need to insert instruction into queue to commit
1110 iewStage->instToCommit(inst);
1111
1112 iewStage->activityThisCycle();
1113
1114 // see if this load changed the PC
1115 iewStage->checkMisprediction(inst);
1116}
1117
1118template <class Impl>
1119void
1120LSQUnit<Impl>::completeStore(int store_idx)
1121{
1122 assert(storeQueue[store_idx].inst);
1123 storeQueue[store_idx].completed = true;
1124 --storesToWB;
1125 // A bit conservative because a store completion may not free up entries,
1126 // but hopefully avoids two store completions in one cycle from making
1127 // the CPU tick twice.
1128 cpu->wakeCPU();
1129 cpu->activityThisCycle();
1130
1131 if (store_idx == storeHead) {
1132 do {
1133 incrStIdx(storeHead);
1134
1135 --stores;
1136 } while (storeQueue[storeHead].completed &&
1137 storeHead != storeTail);
1138
1139 iewStage->updateLSQNextCycle = true;
1140 }
1141
1142 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1143 "idx:%i\n",
1144 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1145
1146#if TRACING_ON
1147 if (DTRACE(O3PipeView)) {
1148 storeQueue[store_idx].inst->storeTick =
1149 curTick() - storeQueue[store_idx].inst->fetchTick;
1150 }
1151#endif
1152
1153 if (isStalled() &&
1154 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1155 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1156 "load idx:%i\n",
1157 stallingStoreIsn, stallingLoadIdx);
1158 stalled = false;
1159 stallingStoreIsn = 0;
1160 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1161 }
1162
1163 storeQueue[store_idx].inst->setCompleted();
1164
1165 if (needsTSO) {
1166 storeInFlight = false;
1167 }
1168
1169 // Tell the checker we've completed this instruction. Some stores
1170 // may get reported twice to the checker, but the checker can
1171 // handle that case.
1172 if (cpu->checker) {
1173 cpu->checker->verify(storeQueue[store_idx].inst);
1174 }
1175}
1176
1177template <class Impl>
1178bool
1179LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1180{
1181 if (!dcachePort->sendTimingReq(data_pkt)) {
1182 // Need to handle becoming blocked on a store.
1183 isStoreBlocked = true;
1184 ++lsqCacheBlocked;
1185 assert(retryPkt == NULL);
1186 retryPkt = data_pkt;
1187 lsq->setRetryTid(lsqID);
1188 return false;
1189 }
1190 return true;
1191}
1192
1193template <class Impl>
1194void
1195LSQUnit<Impl>::recvRetry()
1196{
1197 if (isStoreBlocked) {
1198 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1199 assert(retryPkt != NULL);
1200
1201 LSQSenderState *state =
1202 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1203
1204 if (dcachePort->sendTimingReq(retryPkt)) {
1205 // Don't finish the store unless this is the last packet.
1206 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1207 state->pendingPacket == retryPkt) {
1208 state->pktToSend = false;
1209 storePostSend(retryPkt);
1210 }
1211 retryPkt = NULL;
1212 isStoreBlocked = false;
1213 lsq->setRetryTid(InvalidThreadID);
1214
1215 // Send any outstanding packet.
1216 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1217 assert(state->pendingPacket);
1218 if (sendStore(state->pendingPacket)) {
1219 storePostSend(state->pendingPacket);
1220 }
1221 }
1222 } else {
1223 // Still blocked!
1224 ++lsqCacheBlocked;
1225 lsq->setRetryTid(lsqID);
1226 }
1227 } else if (isLoadBlocked) {
1228 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1229 "no need to resend packet.\n");
1230 } else {
1231 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1232 }
1233}
1234
1235template <class Impl>
1236inline void
1237LSQUnit<Impl>::incrStIdx(int &store_idx) const
1238{
1239 if (++store_idx >= SQEntries)
1240 store_idx = 0;
1241}
1242
1243template <class Impl>
1244inline void
1245LSQUnit<Impl>::decrStIdx(int &store_idx) const
1246{
1247 if (--store_idx < 0)
1248 store_idx += SQEntries;
1249}
1250
1251template <class Impl>
1252inline void
1253LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1254{
1255 if (++load_idx >= LQEntries)
1256 load_idx = 0;
1257}
1258
1259template <class Impl>
1260inline void
1261LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1262{
1263 if (--load_idx < 0)
1264 load_idx += LQEntries;
1265}
1266
1267template <class Impl>
1268void
1269LSQUnit<Impl>::dumpInsts() const
1270{
1271 cprintf("Load store queue: Dumping instructions.\n");
1272 cprintf("Load queue size: %i\n", loads);
1273 cprintf("Load queue: ");
1274
1275 int load_idx = loadHead;
1276
1277 while (load_idx != loadTail && loadQueue[load_idx]) {
1278 const DynInstPtr &inst(loadQueue[load_idx]);
1279 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1280
1281 incrLdIdx(load_idx);
1282 }
1283 cprintf("\n");
1284
1285 cprintf("Store queue size: %i\n", stores);
1286 cprintf("Store queue: ");
1287
1288 int store_idx = storeHead;
1289
1290 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1291 const DynInstPtr &inst(storeQueue[store_idx].inst);
1292 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1293
1294 incrStIdx(store_idx);
1295 }
1296
1297 cprintf("\n");
1298}
1299
1300#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
873 inst->recordResult(true);
874
875 if (!success) {
876 // Instantly complete this store.
877 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
878 "Instantly completing it.\n",
879 inst->seqNum);
880 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
881 cpu->schedule(wb, curTick() + 1);
882 if (cpu->checker) {
883 // Make sure to set the LLSC data for verification
884 // if checker is loaded
885 inst->reqToVerify->setExtraData(0);
886 inst->completeAcc(data_pkt);
887 }
888 completeStore(storeWBIdx);
889 incrStIdx(storeWBIdx);
890 continue;
891 }
892 } else {
893 // Non-store conditionals do not need a writeback.
894 state->noWB = true;
895 }
896
897 bool split =
898 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
899
900 ThreadContext *thread = cpu->tcBase(lsqID);
901
902 if (req->isMmappedIpr()) {
903 assert(!inst->isStoreConditional());
904 TheISA::handleIprWrite(thread, data_pkt);
905 delete data_pkt;
906 if (split) {
907 assert(snd_data_pkt->req->isMmappedIpr());
908 TheISA::handleIprWrite(thread, snd_data_pkt);
909 delete snd_data_pkt;
910 delete sreqLow;
911 delete sreqHigh;
912 }
913 delete state;
914 delete req;
915 completeStore(storeWBIdx);
916 incrStIdx(storeWBIdx);
917 } else if (!sendStore(data_pkt)) {
918 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
919 "retry later\n",
920 inst->seqNum);
921
922 // Need to store the second packet, if split.
923 if (split) {
924 state->pktToSend = true;
925 state->pendingPacket = snd_data_pkt;
926 }
927 } else {
928
929 // If split, try to send the second packet too
930 if (split) {
931 assert(snd_data_pkt);
932
933 // Ensure there are enough ports to use.
934 if (usedPorts < cachePorts) {
935 ++usedPorts;
936 if (sendStore(snd_data_pkt)) {
937 storePostSend(snd_data_pkt);
938 } else {
939 DPRINTF(IEW, "D-Cache became blocked when writing"
940 " [sn:%lli] second packet, will retry later\n",
941 inst->seqNum);
942 }
943 } else {
944
945 // Store the packet for when there's free ports.
946 assert(pendingPkt == NULL);
947 pendingPkt = snd_data_pkt;
948 hasPendingPkt = true;
949 }
950 } else {
951
952 // Not a split store.
953 storePostSend(data_pkt);
954 }
955 }
956 }
957
958 // Not sure this should set it to 0.
959 usedPorts = 0;
960
961 assert(stores >= 0 && storesToWB >= 0);
962}
963
964/*template <class Impl>
965void
966LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
967{
968 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
969 mshrSeqNums.end(),
970 seqNum);
971
972 if (mshr_it != mshrSeqNums.end()) {
973 mshrSeqNums.erase(mshr_it);
974 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
975 }
976}*/
977
978template <class Impl>
979void
980LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
981{
982 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
983 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
984
985 int load_idx = loadTail;
986 decrLdIdx(load_idx);
987
988 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
989 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
990 "[sn:%lli]\n",
991 loadQueue[load_idx]->pcState(),
992 loadQueue[load_idx]->seqNum);
993
994 if (isStalled() && load_idx == stallingLoadIdx) {
995 stalled = false;
996 stallingStoreIsn = 0;
997 stallingLoadIdx = 0;
998 }
999
1000 // Clear the smart pointer to make sure it is decremented.
1001 loadQueue[load_idx]->setSquashed();
1002 loadQueue[load_idx] = NULL;
1003 --loads;
1004
1005 // Inefficient!
1006 loadTail = load_idx;
1007
1008 decrLdIdx(load_idx);
1009 ++lsqSquashedLoads;
1010 }
1011
1012 if (isLoadBlocked) {
1013 if (squashed_num < blockedLoadSeqNum) {
1014 isLoadBlocked = false;
1015 loadBlockedHandled = false;
1016 blockedLoadSeqNum = 0;
1017 }
1018 }
1019
1020 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1021 memDepViolator = NULL;
1022 }
1023
1024 int store_idx = storeTail;
1025 decrStIdx(store_idx);
1026
1027 while (stores != 0 &&
1028 storeQueue[store_idx].inst->seqNum > squashed_num) {
1029 // Instructions marked as can WB are already committed.
1030 if (storeQueue[store_idx].canWB) {
1031 break;
1032 }
1033
1034 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1035 "idx:%i [sn:%lli]\n",
1036 storeQueue[store_idx].inst->pcState(),
1037 store_idx, storeQueue[store_idx].inst->seqNum);
1038
1039 // I don't think this can happen. It should have been cleared
1040 // by the stalling load.
1041 if (isStalled() &&
1042 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1043 panic("Is stalled should have been cleared by stalling load!\n");
1044 stalled = false;
1045 stallingStoreIsn = 0;
1046 }
1047
1048 // Clear the smart pointer to make sure it is decremented.
1049 storeQueue[store_idx].inst->setSquashed();
1050 storeQueue[store_idx].inst = NULL;
1051 storeQueue[store_idx].canWB = 0;
1052
1053 // Must delete request now that it wasn't handed off to
1054 // memory. This is quite ugly. @todo: Figure out the proper
1055 // place to really handle request deletes.
1056 delete storeQueue[store_idx].req;
1057 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1058 delete storeQueue[store_idx].sreqLow;
1059 delete storeQueue[store_idx].sreqHigh;
1060
1061 storeQueue[store_idx].sreqLow = NULL;
1062 storeQueue[store_idx].sreqHigh = NULL;
1063 }
1064
1065 storeQueue[store_idx].req = NULL;
1066 --stores;
1067
1068 // Inefficient!
1069 storeTail = store_idx;
1070
1071 decrStIdx(store_idx);
1072 ++lsqSquashedStores;
1073 }
1074}
1075
1076template <class Impl>
1077void
1078LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1079{
1080 if (isStalled() &&
1081 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1082 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1083 "load idx:%i\n",
1084 stallingStoreIsn, stallingLoadIdx);
1085 stalled = false;
1086 stallingStoreIsn = 0;
1087 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1088 }
1089
1090 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1091 // The store is basically completed at this time. This
1092 // only works so long as the checker doesn't try to
1093 // verify the value in memory for stores.
1094 storeQueue[storeWBIdx].inst->setCompleted();
1095
1096 if (cpu->checker) {
1097 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1098 }
1099 }
1100
1101 if (needsTSO) {
1102 storeInFlight = true;
1103 }
1104
1105 incrStIdx(storeWBIdx);
1106}
1107
1108template <class Impl>
1109void
1110LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1111{
1112 iewStage->wakeCPU();
1113
1114 // Squashed instructions do not need to complete their access.
1115 if (inst->isSquashed()) {
1116 iewStage->decrWb(inst->seqNum);
1117 assert(!inst->isStore());
1118 ++lsqIgnoredResponses;
1119 return;
1120 }
1121
1122 if (!inst->isExecuted()) {
1123 inst->setExecuted();
1124
1125 // Complete access to copy data to proper place.
1126 inst->completeAcc(pkt);
1127 }
1128
1129 // Need to insert instruction into queue to commit
1130 iewStage->instToCommit(inst);
1131
1132 iewStage->activityThisCycle();
1133
1134 // see if this load changed the PC
1135 iewStage->checkMisprediction(inst);
1136}
1137
1138template <class Impl>
1139void
1140LSQUnit<Impl>::completeStore(int store_idx)
1141{
1142 assert(storeQueue[store_idx].inst);
1143 storeQueue[store_idx].completed = true;
1144 --storesToWB;
1145 // A bit conservative because a store completion may not free up entries,
1146 // but hopefully avoids two store completions in one cycle from making
1147 // the CPU tick twice.
1148 cpu->wakeCPU();
1149 cpu->activityThisCycle();
1150
1151 if (store_idx == storeHead) {
1152 do {
1153 incrStIdx(storeHead);
1154
1155 --stores;
1156 } while (storeQueue[storeHead].completed &&
1157 storeHead != storeTail);
1158
1159 iewStage->updateLSQNextCycle = true;
1160 }
1161
1162 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1163 "idx:%i\n",
1164 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1165
1166#if TRACING_ON
1167 if (DTRACE(O3PipeView)) {
1168 storeQueue[store_idx].inst->storeTick =
1169 curTick() - storeQueue[store_idx].inst->fetchTick;
1170 }
1171#endif
1172
1173 if (isStalled() &&
1174 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1175 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1176 "load idx:%i\n",
1177 stallingStoreIsn, stallingLoadIdx);
1178 stalled = false;
1179 stallingStoreIsn = 0;
1180 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1181 }
1182
1183 storeQueue[store_idx].inst->setCompleted();
1184
1185 if (needsTSO) {
1186 storeInFlight = false;
1187 }
1188
1189 // Tell the checker we've completed this instruction. Some stores
1190 // may get reported twice to the checker, but the checker can
1191 // handle that case.
1192 if (cpu->checker) {
1193 cpu->checker->verify(storeQueue[store_idx].inst);
1194 }
1195}
1196
1197template <class Impl>
1198bool
1199LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1200{
1201 if (!dcachePort->sendTimingReq(data_pkt)) {
1202 // Need to handle becoming blocked on a store.
1203 isStoreBlocked = true;
1204 ++lsqCacheBlocked;
1205 assert(retryPkt == NULL);
1206 retryPkt = data_pkt;
1207 lsq->setRetryTid(lsqID);
1208 return false;
1209 }
1210 return true;
1211}
1212
1213template <class Impl>
1214void
1215LSQUnit<Impl>::recvRetry()
1216{
1217 if (isStoreBlocked) {
1218 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1219 assert(retryPkt != NULL);
1220
1221 LSQSenderState *state =
1222 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1223
1224 if (dcachePort->sendTimingReq(retryPkt)) {
1225 // Don't finish the store unless this is the last packet.
1226 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1227 state->pendingPacket == retryPkt) {
1228 state->pktToSend = false;
1229 storePostSend(retryPkt);
1230 }
1231 retryPkt = NULL;
1232 isStoreBlocked = false;
1233 lsq->setRetryTid(InvalidThreadID);
1234
1235 // Send any outstanding packet.
1236 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1237 assert(state->pendingPacket);
1238 if (sendStore(state->pendingPacket)) {
1239 storePostSend(state->pendingPacket);
1240 }
1241 }
1242 } else {
1243 // Still blocked!
1244 ++lsqCacheBlocked;
1245 lsq->setRetryTid(lsqID);
1246 }
1247 } else if (isLoadBlocked) {
1248 DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1249 "no need to resend packet.\n");
1250 } else {
1251 DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1252 }
1253}
1254
1255template <class Impl>
1256inline void
1257LSQUnit<Impl>::incrStIdx(int &store_idx) const
1258{
1259 if (++store_idx >= SQEntries)
1260 store_idx = 0;
1261}
1262
1263template <class Impl>
1264inline void
1265LSQUnit<Impl>::decrStIdx(int &store_idx) const
1266{
1267 if (--store_idx < 0)
1268 store_idx += SQEntries;
1269}
1270
1271template <class Impl>
1272inline void
1273LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1274{
1275 if (++load_idx >= LQEntries)
1276 load_idx = 0;
1277}
1278
1279template <class Impl>
1280inline void
1281LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1282{
1283 if (--load_idx < 0)
1284 load_idx += LQEntries;
1285}
1286
1287template <class Impl>
1288void
1289LSQUnit<Impl>::dumpInsts() const
1290{
1291 cprintf("Load store queue: Dumping instructions.\n");
1292 cprintf("Load queue size: %i\n", loads);
1293 cprintf("Load queue: ");
1294
1295 int load_idx = loadHead;
1296
1297 while (load_idx != loadTail && loadQueue[load_idx]) {
1298 const DynInstPtr &inst(loadQueue[load_idx]);
1299 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1300
1301 incrLdIdx(load_idx);
1302 }
1303 cprintf("\n");
1304
1305 cprintf("Store queue size: %i\n", stores);
1306 cprintf("Store queue: ");
1307
1308 int store_idx = storeHead;
1309
1310 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1311 const DynInstPtr &inst(storeQueue[store_idx].inst);
1312 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1313
1314 incrStIdx(store_idx);
1315 }
1316
1317 cprintf("\n");
1318}
1319
1320#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__