lsq_unit_impl.hh (10474:799c8ee4ecba) lsq_unit_impl.hh (10573:3b405d11d6dc)
1
2/*
3 * Copyright (c) 2010-2014 ARM Limited
4 * Copyright (c) 2013 Advanced Micro Devices, Inc.
5 * All rights reserved
6 *
7 * The license below extends only to copyright in the software and shall
8 * not be construed as granting a license to any other intellectual
9 * property including but not limited to intellectual property relating
10 * to a hardware implementation of the functionality of the software
11 * licensed hereunder. You may use the software subject to the license
12 * terms below provided that you ensure that this notice is replicated
13 * unmodified and in its entirety in all distributions of the software,
14 * modified or unmodified, in source code or in binary form.
15 *
16 * Copyright (c) 2004-2005 The Regents of The University of Michigan
17 * All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are
21 * met: redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer;
23 * redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution;
26 * neither the name of the copyright holders nor the names of its
27 * contributors may be used to endorse or promote products derived from
28 * this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Authors: Kevin Lim
43 * Korey Sewell
44 */
45
46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
47#define __CPU_O3_LSQ_UNIT_IMPL_HH__
48
49#include "arch/generic/debugfaults.hh"
50#include "arch/locked_mem.hh"
51#include "base/str.hh"
52#include "config/the_isa.hh"
53#include "cpu/checker/cpu.hh"
54#include "cpu/o3/lsq.hh"
55#include "cpu/o3/lsq_unit.hh"
56#include "debug/Activity.hh"
57#include "debug/IEW.hh"
58#include "debug/LSQUnit.hh"
59#include "debug/O3PipeView.hh"
60#include "mem/packet.hh"
61#include "mem/request.hh"
62
63template<class Impl>
64LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
65 LSQUnit *lsq_ptr)
66 : Event(Default_Pri, AutoDelete),
67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
68{
69}
70
71template<class Impl>
72void
73LSQUnit<Impl>::WritebackEvent::process()
74{
75 assert(!lsqPtr->cpu->switchedOut());
76
77 lsqPtr->writeback(inst, pkt);
78
79 if (pkt->senderState)
80 delete pkt->senderState;
81
82 delete pkt->req;
83 delete pkt;
84}
85
86template<class Impl>
87const char *
88LSQUnit<Impl>::WritebackEvent::description() const
89{
90 return "Store writeback";
91}
92
93template<class Impl>
94void
95LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
96{
97 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
98 DynInstPtr inst = state->inst;
99 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
100 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
101
102 if (state->cacheBlocked) {
103 // This is the first half of a previous split load,
104 // where the 2nd half blocked, ignore this response
105 DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier "
106 "blocked split load recieved. Ignoring.\n", inst->seqNum);
107 delete state;
1
2/*
3 * Copyright (c) 2010-2014 ARM Limited
4 * Copyright (c) 2013 Advanced Micro Devices, Inc.
5 * All rights reserved
6 *
7 * The license below extends only to copyright in the software and shall
8 * not be construed as granting a license to any other intellectual
9 * property including but not limited to intellectual property relating
10 * to a hardware implementation of the functionality of the software
11 * licensed hereunder. You may use the software subject to the license
12 * terms below provided that you ensure that this notice is replicated
13 * unmodified and in its entirety in all distributions of the software,
14 * modified or unmodified, in source code or in binary form.
15 *
16 * Copyright (c) 2004-2005 The Regents of The University of Michigan
17 * All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are
21 * met: redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer;
23 * redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution;
26 * neither the name of the copyright holders nor the names of its
27 * contributors may be used to endorse or promote products derived from
28 * this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Authors: Kevin Lim
43 * Korey Sewell
44 */
45
46#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
47#define __CPU_O3_LSQ_UNIT_IMPL_HH__
48
49#include "arch/generic/debugfaults.hh"
50#include "arch/locked_mem.hh"
51#include "base/str.hh"
52#include "config/the_isa.hh"
53#include "cpu/checker/cpu.hh"
54#include "cpu/o3/lsq.hh"
55#include "cpu/o3/lsq_unit.hh"
56#include "debug/Activity.hh"
57#include "debug/IEW.hh"
58#include "debug/LSQUnit.hh"
59#include "debug/O3PipeView.hh"
60#include "mem/packet.hh"
61#include "mem/request.hh"
62
63template<class Impl>
64LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
65 LSQUnit *lsq_ptr)
66 : Event(Default_Pri, AutoDelete),
67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
68{
69}
70
71template<class Impl>
72void
73LSQUnit<Impl>::WritebackEvent::process()
74{
75 assert(!lsqPtr->cpu->switchedOut());
76
77 lsqPtr->writeback(inst, pkt);
78
79 if (pkt->senderState)
80 delete pkt->senderState;
81
82 delete pkt->req;
83 delete pkt;
84}
85
86template<class Impl>
87const char *
88LSQUnit<Impl>::WritebackEvent::description() const
89{
90 return "Store writeback";
91}
92
93template<class Impl>
94void
95LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
96{
97 LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
98 DynInstPtr inst = state->inst;
99 DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
100 DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
101
102 if (state->cacheBlocked) {
103 // This is the first half of a previous split load,
104 // where the 2nd half blocked, ignore this response
105 DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier "
106 "blocked split load recieved. Ignoring.\n", inst->seqNum);
107 delete state;
108 delete pkt->req;
109 delete pkt;
110 return;
111 }
112
113 // If this is a split access, wait until all packets are received.
114 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
108 return;
109 }
110
111 // If this is a split access, wait until all packets are received.
112 if (TheISA::HasUnalignedMemAcc && !state->complete()) {
115 delete pkt->req;
116 delete pkt;
117 return;
118 }
119
120 assert(!cpu->switchedOut());
121 if (!inst->isSquashed()) {
122 if (!state->noWB) {
123 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
124 !state->isLoad) {
125 writeback(inst, pkt);
126 } else {
127 writeback(inst, state->mainPkt);
128 }
129 }
130
131 if (inst->isStore()) {
132 completeStore(state->idx);
133 }
134 }
135
136 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
137 delete state->mainPkt->req;
138 delete state->mainPkt;
139 }
140
141 pkt->req->setAccessLatency();
142 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
143
144 delete state;
113 return;
114 }
115
116 assert(!cpu->switchedOut());
117 if (!inst->isSquashed()) {
118 if (!state->noWB) {
119 if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
120 !state->isLoad) {
121 writeback(inst, pkt);
122 } else {
123 writeback(inst, state->mainPkt);
124 }
125 }
126
127 if (inst->isStore()) {
128 completeStore(state->idx);
129 }
130 }
131
132 if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
133 delete state->mainPkt->req;
134 delete state->mainPkt;
135 }
136
137 pkt->req->setAccessLatency();
138 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
139
140 delete state;
145 delete pkt->req;
146 delete pkt;
147}
148
149template <class Impl>
150LSQUnit<Impl>::LSQUnit()
151 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
152 isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false)
153{
154}
155
156template<class Impl>
157void
158LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
159 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
160 unsigned id)
161{
162 cpu = cpu_ptr;
163 iewStage = iew_ptr;
164
165 lsq = lsq_ptr;
166
167 lsqID = id;
168
169 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
170
171 // Add 1 for the sentinel entry (they are circular queues).
172 LQEntries = maxLQEntries + 1;
173 SQEntries = maxSQEntries + 1;
174
175 //Due to uint8_t index in LSQSenderState
176 assert(LQEntries <= 256);
177 assert(SQEntries <= 256);
178
179 loadQueue.resize(LQEntries);
180 storeQueue.resize(SQEntries);
181
182 depCheckShift = params->LSQDepCheckShift;
183 checkLoads = params->LSQCheckLoads;
184 cachePorts = params->cachePorts;
185 needsTSO = params->needsTSO;
186
187 resetState();
188}
189
190
191template<class Impl>
192void
193LSQUnit<Impl>::resetState()
194{
195 loads = stores = storesToWB = 0;
196
197 loadHead = loadTail = 0;
198
199 storeHead = storeWBIdx = storeTail = 0;
200
201 usedPorts = 0;
202
203 retryPkt = NULL;
204 memDepViolator = NULL;
205
206 stalled = false;
207
208 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
209}
210
211template<class Impl>
212std::string
213LSQUnit<Impl>::name() const
214{
215 if (Impl::MaxThreads == 1) {
216 return iewStage->name() + ".lsq";
217 } else {
218 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID);
219 }
220}
221
222template<class Impl>
223void
224LSQUnit<Impl>::regStats()
225{
226 lsqForwLoads
227 .name(name() + ".forwLoads")
228 .desc("Number of loads that had data forwarded from stores");
229
230 invAddrLoads
231 .name(name() + ".invAddrLoads")
232 .desc("Number of loads ignored due to an invalid address");
233
234 lsqSquashedLoads
235 .name(name() + ".squashedLoads")
236 .desc("Number of loads squashed");
237
238 lsqIgnoredResponses
239 .name(name() + ".ignoredResponses")
240 .desc("Number of memory responses ignored because the instruction is squashed");
241
242 lsqMemOrderViolation
243 .name(name() + ".memOrderViolation")
244 .desc("Number of memory ordering violations");
245
246 lsqSquashedStores
247 .name(name() + ".squashedStores")
248 .desc("Number of stores squashed");
249
250 invAddrSwpfs
251 .name(name() + ".invAddrSwpfs")
252 .desc("Number of software prefetches ignored due to an invalid address");
253
254 lsqBlockedLoads
255 .name(name() + ".blockedLoads")
256 .desc("Number of blocked loads due to partial load-store forwarding");
257
258 lsqRescheduledLoads
259 .name(name() + ".rescheduledLoads")
260 .desc("Number of loads that were rescheduled");
261
262 lsqCacheBlocked
263 .name(name() + ".cacheBlocked")
264 .desc("Number of times an access to memory failed due to the cache being blocked");
265}
266
267template<class Impl>
268void
269LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
270{
271 dcachePort = dcache_port;
272}
273
274template<class Impl>
275void
276LSQUnit<Impl>::clearLQ()
277{
278 loadQueue.clear();
279}
280
281template<class Impl>
282void
283LSQUnit<Impl>::clearSQ()
284{
285 storeQueue.clear();
286}
287
288template<class Impl>
289void
290LSQUnit<Impl>::drainSanityCheck() const
291{
292 for (int i = 0; i < loadQueue.size(); ++i)
293 assert(!loadQueue[i]);
294
295 assert(storesToWB == 0);
296 assert(!retryPkt);
297}
298
299template<class Impl>
300void
301LSQUnit<Impl>::takeOverFrom()
302{
303 resetState();
304}
305
306template<class Impl>
307void
308LSQUnit<Impl>::resizeLQ(unsigned size)
309{
310 unsigned size_plus_sentinel = size + 1;
311 assert(size_plus_sentinel >= LQEntries);
312
313 if (size_plus_sentinel > LQEntries) {
314 while (size_plus_sentinel > loadQueue.size()) {
315 DynInstPtr dummy;
316 loadQueue.push_back(dummy);
317 LQEntries++;
318 }
319 } else {
320 LQEntries = size_plus_sentinel;
321 }
322
323 assert(LQEntries <= 256);
324}
325
326template<class Impl>
327void
328LSQUnit<Impl>::resizeSQ(unsigned size)
329{
330 unsigned size_plus_sentinel = size + 1;
331 if (size_plus_sentinel > SQEntries) {
332 while (size_plus_sentinel > storeQueue.size()) {
333 SQEntry dummy;
334 storeQueue.push_back(dummy);
335 SQEntries++;
336 }
337 } else {
338 SQEntries = size_plus_sentinel;
339 }
340
341 assert(SQEntries <= 256);
342}
343
344template <class Impl>
345void
346LSQUnit<Impl>::insert(DynInstPtr &inst)
347{
348 assert(inst->isMemRef());
349
350 assert(inst->isLoad() || inst->isStore());
351
352 if (inst->isLoad()) {
353 insertLoad(inst);
354 } else {
355 insertStore(inst);
356 }
357
358 inst->setInLSQ();
359}
360
361template <class Impl>
362void
363LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
364{
365 assert((loadTail + 1) % LQEntries != loadHead);
366 assert(loads < LQEntries);
367
368 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
369 load_inst->pcState(), loadTail, load_inst->seqNum);
370
371 load_inst->lqIdx = loadTail;
372
373 if (stores == 0) {
374 load_inst->sqIdx = -1;
375 } else {
376 load_inst->sqIdx = storeTail;
377 }
378
379 loadQueue[loadTail] = load_inst;
380
381 incrLdIdx(loadTail);
382
383 ++loads;
384}
385
386template <class Impl>
387void
388LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
389{
390 // Make sure it is not full before inserting an instruction.
391 assert((storeTail + 1) % SQEntries != storeHead);
392 assert(stores < SQEntries);
393
394 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
395 store_inst->pcState(), storeTail, store_inst->seqNum);
396
397 store_inst->sqIdx = storeTail;
398 store_inst->lqIdx = loadTail;
399
400 storeQueue[storeTail] = SQEntry(store_inst);
401
402 incrStIdx(storeTail);
403
404 ++stores;
405}
406
407template <class Impl>
408typename Impl::DynInstPtr
409LSQUnit<Impl>::getMemDepViolator()
410{
411 DynInstPtr temp = memDepViolator;
412
413 memDepViolator = NULL;
414
415 return temp;
416}
417
418template <class Impl>
419unsigned
420LSQUnit<Impl>::numFreeLoadEntries()
421{
422 //LQ has an extra dummy entry to differentiate
423 //empty/full conditions. Subtract 1 from the free entries.
424 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
425 return LQEntries - loads - 1;
426}
427
428template <class Impl>
429unsigned
430LSQUnit<Impl>::numFreeStoreEntries()
431{
432 //SQ has an extra dummy entry to differentiate
433 //empty/full conditions. Subtract 1 from the free entries.
434 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
435 return SQEntries - stores - 1;
436
437 }
438
439template <class Impl>
440void
441LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
442{
443 int load_idx = loadHead;
444 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
445
446 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
447 // address. The CPU can speculatively execute a LL operation after a pending
448 // SC operation in the pipeline and that can make the cache monitor the CPU
449 // is connected to valid while it really shouldn't be.
450 for (int x = 0; x < cpu->numContexts(); x++) {
451 ThreadContext *tc = cpu->getContext(x);
452 bool no_squash = cpu->thread[x]->noSquashFromTC;
453 cpu->thread[x]->noSquashFromTC = true;
454 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
455 cpu->thread[x]->noSquashFromTC = no_squash;
456 }
457
458 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
459
460 DynInstPtr ld_inst = loadQueue[load_idx];
461 if (ld_inst) {
462 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
463 // Check that this snoop didn't just invalidate our lock flag
464 if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
465 ld_inst->memReqFlags & Request::LLSC)
466 TheISA::handleLockedSnoopHit(ld_inst.get());
467 }
468
469 // If this is the only load in the LSQ we don't care
470 if (load_idx == loadTail)
471 return;
472
473 incrLdIdx(load_idx);
474
475 bool force_squash = false;
476
477 while (load_idx != loadTail) {
478 DynInstPtr ld_inst = loadQueue[load_idx];
479
480 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
481 incrLdIdx(load_idx);
482 continue;
483 }
484
485 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
486 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
487 ld_inst->seqNum, load_addr, invalidate_addr);
488
489 if (load_addr == invalidate_addr || force_squash) {
490 if (needsTSO) {
491 // If we have a TSO system, as all loads must be ordered with
492 // all other loads, this load as well as *all* subsequent loads
493 // need to be squashed to prevent possible load reordering.
494 force_squash = true;
495 }
496 if (ld_inst->possibleLoadViolation() || force_squash) {
497 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
498 pkt->getAddr(), ld_inst->seqNum);
499
500 // Mark the load for re-execution
501 ld_inst->fault = std::make_shared<ReExec>();
502 } else {
503 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
504 pkt->getAddr(), ld_inst->seqNum);
505
506 // Make sure that we don't lose a snoop hitting a LOCKED
507 // address since the LOCK* flags don't get updated until
508 // commit.
509 if (ld_inst->memReqFlags & Request::LLSC)
510 TheISA::handleLockedSnoopHit(ld_inst.get());
511
512 // If a older load checks this and it's true
513 // then we might have missed the snoop
514 // in which case we need to invalidate to be sure
515 ld_inst->hitExternalSnoop(true);
516 }
517 }
518 incrLdIdx(load_idx);
519 }
520 return;
521}
522
523template <class Impl>
524Fault
525LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
526{
527 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
528 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
529
530 /** @todo in theory you only need to check an instruction that has executed
531 * however, there isn't a good way in the pipeline at the moment to check
532 * all instructions that will execute before the store writes back. Thus,
533 * like the implementation that came before it, we're overly conservative.
534 */
535 while (load_idx != loadTail) {
536 DynInstPtr ld_inst = loadQueue[load_idx];
537 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
538 incrLdIdx(load_idx);
539 continue;
540 }
541
542 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
543 Addr ld_eff_addr2 =
544 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
545
546 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
547 if (inst->isLoad()) {
548 // If this load is to the same block as an external snoop
549 // invalidate that we've observed then the load needs to be
550 // squashed as it could have newer data
551 if (ld_inst->hitExternalSnoop()) {
552 if (!memDepViolator ||
553 ld_inst->seqNum < memDepViolator->seqNum) {
554 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
555 "and [sn:%lli] at address %#x\n",
556 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
557 memDepViolator = ld_inst;
558
559 ++lsqMemOrderViolation;
560
561 return std::make_shared<GenericISA::M5PanicFault>(
562 "Detected fault with inst [sn:%lli] and "
563 "[sn:%lli] at address %#x\n",
564 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
565 }
566 }
567
568 // Otherwise, mark the load has a possible load violation
569 // and if we see a snoop before it's commited, we need to squash
570 ld_inst->possibleLoadViolation(true);
571 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
572 " between instructions [sn:%lli] and [sn:%lli]\n",
573 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
574 } else {
575 // A load/store incorrectly passed this store.
576 // Check if we already have a violator, or if it's newer
577 // squash and refetch.
578 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
579 break;
580
581 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
582 "[sn:%lli] at address %#x\n",
583 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
584 memDepViolator = ld_inst;
585
586 ++lsqMemOrderViolation;
587
588 return std::make_shared<GenericISA::M5PanicFault>(
589 "Detected fault with "
590 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
591 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
592 }
593 }
594
595 incrLdIdx(load_idx);
596 }
597 return NoFault;
598}
599
600
601
602
603template <class Impl>
604Fault
605LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
606{
607 using namespace TheISA;
608 // Execute a specific load.
609 Fault load_fault = NoFault;
610
611 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
612 inst->pcState(), inst->seqNum);
613
614 assert(!inst->isSquashed());
615
616 load_fault = inst->initiateAcc();
617
618 if (inst->isTranslationDelayed() &&
619 load_fault == NoFault)
620 return load_fault;
621
622 // If the instruction faulted or predicated false, then we need to send it
623 // along to commit without the instruction completing.
624 if (load_fault != NoFault || !inst->readPredicate()) {
625 // Send this instruction to commit, also make sure iew stage
626 // realizes there is activity.
627 // Mark it as executed unless it is an uncached load that
628 // needs to hit the head of commit.
629 if (!inst->readPredicate())
630 inst->forwardOldRegs();
631 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
632 inst->seqNum,
633 (load_fault != NoFault ? "fault" : "predication"));
634 if (!(inst->hasRequest() && inst->uncacheable()) ||
635 inst->isAtCommit()) {
636 inst->setExecuted();
637 }
638 iewStage->instToCommit(inst);
639 iewStage->activityThisCycle();
640 } else {
641 assert(inst->effAddrValid());
642 int load_idx = inst->lqIdx;
643 incrLdIdx(load_idx);
644
645 if (checkLoads)
646 return checkViolations(load_idx, inst);
647 }
648
649 return load_fault;
650}
651
652template <class Impl>
653Fault
654LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
655{
656 using namespace TheISA;
657 // Make sure that a store exists.
658 assert(stores != 0);
659
660 int store_idx = store_inst->sqIdx;
661
662 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
663 store_inst->pcState(), store_inst->seqNum);
664
665 assert(!store_inst->isSquashed());
666
667 // Check the recently completed loads to see if any match this store's
668 // address. If so, then we have a memory ordering violation.
669 int load_idx = store_inst->lqIdx;
670
671 Fault store_fault = store_inst->initiateAcc();
672
673 if (store_inst->isTranslationDelayed() &&
674 store_fault == NoFault)
675 return store_fault;
676
677 if (!store_inst->readPredicate())
678 store_inst->forwardOldRegs();
679
680 if (storeQueue[store_idx].size == 0) {
681 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
682 store_inst->pcState(), store_inst->seqNum);
683
684 return store_fault;
685 } else if (!store_inst->readPredicate()) {
686 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
687 store_inst->seqNum);
688 return store_fault;
689 }
690
691 assert(store_fault == NoFault);
692
693 if (store_inst->isStoreConditional()) {
694 // Store conditionals need to set themselves as able to
695 // writeback if we haven't had a fault by here.
696 storeQueue[store_idx].canWB = true;
697
698 ++storesToWB;
699 }
700
701 return checkViolations(load_idx, store_inst);
702
703}
704
705template <class Impl>
706void
707LSQUnit<Impl>::commitLoad()
708{
709 assert(loadQueue[loadHead]);
710
711 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
712 loadQueue[loadHead]->pcState());
713
714 loadQueue[loadHead] = NULL;
715
716 incrLdIdx(loadHead);
717
718 --loads;
719}
720
721template <class Impl>
722void
723LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
724{
725 assert(loads == 0 || loadQueue[loadHead]);
726
727 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
728 commitLoad();
729 }
730}
731
732template <class Impl>
733void
734LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
735{
736 assert(stores == 0 || storeQueue[storeHead].inst);
737
738 int store_idx = storeHead;
739
740 while (store_idx != storeTail) {
741 assert(storeQueue[store_idx].inst);
742 // Mark any stores that are now committed and have not yet
743 // been marked as able to write back.
744 if (!storeQueue[store_idx].canWB) {
745 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
746 break;
747 }
748 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
749 "%s [sn:%lli]\n",
750 storeQueue[store_idx].inst->pcState(),
751 storeQueue[store_idx].inst->seqNum);
752
753 storeQueue[store_idx].canWB = true;
754
755 ++storesToWB;
756 }
757
758 incrStIdx(store_idx);
759 }
760}
761
762template <class Impl>
763void
764LSQUnit<Impl>::writebackPendingStore()
765{
766 if (hasPendingPkt) {
767 assert(pendingPkt != NULL);
768
769 // If the cache is blocked, this will store the packet for retry.
770 if (sendStore(pendingPkt)) {
771 storePostSend(pendingPkt);
772 }
773 pendingPkt = NULL;
774 hasPendingPkt = false;
775 }
776}
777
778template <class Impl>
779void
780LSQUnit<Impl>::writebackStores()
781{
782 // First writeback the second packet from any split store that didn't
783 // complete last cycle because there weren't enough cache ports available.
784 if (TheISA::HasUnalignedMemAcc) {
785 writebackPendingStore();
786 }
787
788 while (storesToWB > 0 &&
789 storeWBIdx != storeTail &&
790 storeQueue[storeWBIdx].inst &&
791 storeQueue[storeWBIdx].canWB &&
792 ((!needsTSO) || (!storeInFlight)) &&
793 usedPorts < cachePorts) {
794
795 if (isStoreBlocked) {
796 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
797 " is blocked!\n");
798 break;
799 }
800
801 // Store didn't write any data so no need to write it back to
802 // memory.
803 if (storeQueue[storeWBIdx].size == 0) {
804 completeStore(storeWBIdx);
805
806 incrStIdx(storeWBIdx);
807
808 continue;
809 }
810
811 ++usedPorts;
812
813 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
814 incrStIdx(storeWBIdx);
815
816 continue;
817 }
818
819 assert(storeQueue[storeWBIdx].req);
820 assert(!storeQueue[storeWBIdx].committed);
821
822 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
823 assert(storeQueue[storeWBIdx].sreqLow);
824 assert(storeQueue[storeWBIdx].sreqHigh);
825 }
826
827 DynInstPtr inst = storeQueue[storeWBIdx].inst;
828
829 Request *req = storeQueue[storeWBIdx].req;
830 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
831 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
832
833 storeQueue[storeWBIdx].committed = true;
834
835 assert(!inst->memData);
836 inst->memData = new uint8_t[req->getSize()];
837
838 if (storeQueue[storeWBIdx].isAllZeros)
839 memset(inst->memData, 0, req->getSize());
840 else
841 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
842
843 PacketPtr data_pkt;
844 PacketPtr snd_data_pkt = NULL;
845
846 LSQSenderState *state = new LSQSenderState;
847 state->isLoad = false;
848 state->idx = storeWBIdx;
849 state->inst = inst;
850
851 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
852
853 // Build a single data packet if the store isn't split.
854 data_pkt = Packet::createWrite(req);
855 data_pkt->dataStatic(inst->memData);
856 data_pkt->senderState = state;
857 } else {
858 // Create two packets if the store is split in two.
859 data_pkt = Packet::createWrite(sreqLow);
860 snd_data_pkt = Packet::createWrite(sreqHigh);
861
862 data_pkt->dataStatic(inst->memData);
863 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
864
865 data_pkt->senderState = state;
866 snd_data_pkt->senderState = state;
867
868 state->isSplit = true;
869 state->outstanding = 2;
870
871 // Can delete the main request now.
872 delete req;
873 req = sreqLow;
874 }
875
876 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
877 "to Addr:%#x, data:%#x [sn:%lli]\n",
878 storeWBIdx, inst->pcState(),
879 req->getPaddr(), (int)*(inst->memData),
880 inst->seqNum);
881
882 // @todo: Remove this SC hack once the memory system handles it.
883 if (inst->isStoreConditional()) {
884 assert(!storeQueue[storeWBIdx].isSplit);
885 // Disable recording the result temporarily. Writing to
886 // misc regs normally updates the result, but this is not
887 // the desired behavior when handling store conditionals.
888 inst->recordResult(false);
889 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
890 inst->recordResult(true);
891
892 if (!success) {
893 // Instantly complete this store.
894 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
895 "Instantly completing it.\n",
896 inst->seqNum);
897 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
898 cpu->schedule(wb, curTick() + 1);
899 if (cpu->checker) {
900 // Make sure to set the LLSC data for verification
901 // if checker is loaded
902 inst->reqToVerify->setExtraData(0);
903 inst->completeAcc(data_pkt);
904 }
905 completeStore(storeWBIdx);
906 incrStIdx(storeWBIdx);
907 continue;
908 }
909 } else {
910 // Non-store conditionals do not need a writeback.
911 state->noWB = true;
912 }
913
914 bool split =
915 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
916
917 ThreadContext *thread = cpu->tcBase(lsqID);
918
919 if (req->isMmappedIpr()) {
920 assert(!inst->isStoreConditional());
921 TheISA::handleIprWrite(thread, data_pkt);
922 delete data_pkt;
923 if (split) {
924 assert(snd_data_pkt->req->isMmappedIpr());
925 TheISA::handleIprWrite(thread, snd_data_pkt);
926 delete snd_data_pkt;
927 delete sreqLow;
928 delete sreqHigh;
929 }
930 delete state;
931 delete req;
932 completeStore(storeWBIdx);
933 incrStIdx(storeWBIdx);
934 } else if (!sendStore(data_pkt)) {
935 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
936 "retry later\n",
937 inst->seqNum);
938
939 // Need to store the second packet, if split.
940 if (split) {
941 state->pktToSend = true;
942 state->pendingPacket = snd_data_pkt;
943 }
944 } else {
945
946 // If split, try to send the second packet too
947 if (split) {
948 assert(snd_data_pkt);
949
950 // Ensure there are enough ports to use.
951 if (usedPorts < cachePorts) {
952 ++usedPorts;
953 if (sendStore(snd_data_pkt)) {
954 storePostSend(snd_data_pkt);
955 } else {
956 DPRINTF(IEW, "D-Cache became blocked when writing"
957 " [sn:%lli] second packet, will retry later\n",
958 inst->seqNum);
959 }
960 } else {
961
962 // Store the packet for when there's free ports.
963 assert(pendingPkt == NULL);
964 pendingPkt = snd_data_pkt;
965 hasPendingPkt = true;
966 }
967 } else {
968
969 // Not a split store.
970 storePostSend(data_pkt);
971 }
972 }
973 }
974
975 // Not sure this should set it to 0.
976 usedPorts = 0;
977
978 assert(stores >= 0 && storesToWB >= 0);
979}
980
981/*template <class Impl>
982void
983LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
984{
985 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
986 mshrSeqNums.end(),
987 seqNum);
988
989 if (mshr_it != mshrSeqNums.end()) {
990 mshrSeqNums.erase(mshr_it);
991 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
992 }
993}*/
994
995template <class Impl>
996void
997LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
998{
999 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
1000 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
1001
1002 int load_idx = loadTail;
1003 decrLdIdx(load_idx);
1004
1005 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
1006 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
1007 "[sn:%lli]\n",
1008 loadQueue[load_idx]->pcState(),
1009 loadQueue[load_idx]->seqNum);
1010
1011 if (isStalled() && load_idx == stallingLoadIdx) {
1012 stalled = false;
1013 stallingStoreIsn = 0;
1014 stallingLoadIdx = 0;
1015 }
1016
1017 // Clear the smart pointer to make sure it is decremented.
1018 loadQueue[load_idx]->setSquashed();
1019 loadQueue[load_idx] = NULL;
1020 --loads;
1021
1022 // Inefficient!
1023 loadTail = load_idx;
1024
1025 decrLdIdx(load_idx);
1026 ++lsqSquashedLoads;
1027 }
1028
1029 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1030 memDepViolator = NULL;
1031 }
1032
1033 int store_idx = storeTail;
1034 decrStIdx(store_idx);
1035
1036 while (stores != 0 &&
1037 storeQueue[store_idx].inst->seqNum > squashed_num) {
1038 // Instructions marked as can WB are already committed.
1039 if (storeQueue[store_idx].canWB) {
1040 break;
1041 }
1042
1043 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1044 "idx:%i [sn:%lli]\n",
1045 storeQueue[store_idx].inst->pcState(),
1046 store_idx, storeQueue[store_idx].inst->seqNum);
1047
1048 // I don't think this can happen. It should have been cleared
1049 // by the stalling load.
1050 if (isStalled() &&
1051 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1052 panic("Is stalled should have been cleared by stalling load!\n");
1053 stalled = false;
1054 stallingStoreIsn = 0;
1055 }
1056
1057 // Clear the smart pointer to make sure it is decremented.
1058 storeQueue[store_idx].inst->setSquashed();
1059 storeQueue[store_idx].inst = NULL;
1060 storeQueue[store_idx].canWB = 0;
1061
1062 // Must delete request now that it wasn't handed off to
1063 // memory. This is quite ugly. @todo: Figure out the proper
1064 // place to really handle request deletes.
1065 delete storeQueue[store_idx].req;
1066 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1067 delete storeQueue[store_idx].sreqLow;
1068 delete storeQueue[store_idx].sreqHigh;
1069
1070 storeQueue[store_idx].sreqLow = NULL;
1071 storeQueue[store_idx].sreqHigh = NULL;
1072 }
1073
1074 storeQueue[store_idx].req = NULL;
1075 --stores;
1076
1077 // Inefficient!
1078 storeTail = store_idx;
1079
1080 decrStIdx(store_idx);
1081 ++lsqSquashedStores;
1082 }
1083}
1084
1085template <class Impl>
1086void
1087LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1088{
1089 if (isStalled() &&
1090 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1091 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1092 "load idx:%i\n",
1093 stallingStoreIsn, stallingLoadIdx);
1094 stalled = false;
1095 stallingStoreIsn = 0;
1096 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1097 }
1098
1099 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1100 // The store is basically completed at this time. This
1101 // only works so long as the checker doesn't try to
1102 // verify the value in memory for stores.
1103 storeQueue[storeWBIdx].inst->setCompleted();
1104
1105 if (cpu->checker) {
1106 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1107 }
1108 }
1109
1110 if (needsTSO) {
1111 storeInFlight = true;
1112 }
1113
1114 incrStIdx(storeWBIdx);
1115}
1116
1117template <class Impl>
1118void
1119LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1120{
1121 iewStage->wakeCPU();
1122
1123 // Squashed instructions do not need to complete their access.
1124 if (inst->isSquashed()) {
1125 assert(!inst->isStore());
1126 ++lsqIgnoredResponses;
1127 return;
1128 }
1129
1130 if (!inst->isExecuted()) {
1131 inst->setExecuted();
1132
1133 // Complete access to copy data to proper place.
1134 inst->completeAcc(pkt);
1135 }
1136
1137 // Need to insert instruction into queue to commit
1138 iewStage->instToCommit(inst);
1139
1140 iewStage->activityThisCycle();
1141
1142 // see if this load changed the PC
1143 iewStage->checkMisprediction(inst);
1144}
1145
1146template <class Impl>
1147void
1148LSQUnit<Impl>::completeStore(int store_idx)
1149{
1150 assert(storeQueue[store_idx].inst);
1151 storeQueue[store_idx].completed = true;
1152 --storesToWB;
1153 // A bit conservative because a store completion may not free up entries,
1154 // but hopefully avoids two store completions in one cycle from making
1155 // the CPU tick twice.
1156 cpu->wakeCPU();
1157 cpu->activityThisCycle();
1158
1159 if (store_idx == storeHead) {
1160 do {
1161 incrStIdx(storeHead);
1162
1163 --stores;
1164 } while (storeQueue[storeHead].completed &&
1165 storeHead != storeTail);
1166
1167 iewStage->updateLSQNextCycle = true;
1168 }
1169
1170 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1171 "idx:%i\n",
1172 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1173
1174#if TRACING_ON
1175 if (DTRACE(O3PipeView)) {
1176 storeQueue[store_idx].inst->storeTick =
1177 curTick() - storeQueue[store_idx].inst->fetchTick;
1178 }
1179#endif
1180
1181 if (isStalled() &&
1182 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1183 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1184 "load idx:%i\n",
1185 stallingStoreIsn, stallingLoadIdx);
1186 stalled = false;
1187 stallingStoreIsn = 0;
1188 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1189 }
1190
1191 storeQueue[store_idx].inst->setCompleted();
1192
1193 if (needsTSO) {
1194 storeInFlight = false;
1195 }
1196
1197 // Tell the checker we've completed this instruction. Some stores
1198 // may get reported twice to the checker, but the checker can
1199 // handle that case.
1200 if (cpu->checker) {
1201 cpu->checker->verify(storeQueue[store_idx].inst);
1202 }
1203}
1204
1205template <class Impl>
1206bool
1207LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1208{
1209 if (!dcachePort->sendTimingReq(data_pkt)) {
1210 // Need to handle becoming blocked on a store.
1211 isStoreBlocked = true;
1212 ++lsqCacheBlocked;
1213 assert(retryPkt == NULL);
1214 retryPkt = data_pkt;
1215 return false;
1216 }
1217 return true;
1218}
1219
1220template <class Impl>
1221void
1222LSQUnit<Impl>::recvRetry()
1223{
1224 if (isStoreBlocked) {
1225 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1226 assert(retryPkt != NULL);
1227
1228 LSQSenderState *state =
1229 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1230
1231 if (dcachePort->sendTimingReq(retryPkt)) {
1232 // Don't finish the store unless this is the last packet.
1233 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1234 state->pendingPacket == retryPkt) {
1235 state->pktToSend = false;
1236 storePostSend(retryPkt);
1237 }
1238 retryPkt = NULL;
1239 isStoreBlocked = false;
1240
1241 // Send any outstanding packet.
1242 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1243 assert(state->pendingPacket);
1244 if (sendStore(state->pendingPacket)) {
1245 storePostSend(state->pendingPacket);
1246 }
1247 }
1248 } else {
1249 // Still blocked!
1250 ++lsqCacheBlocked;
1251 }
1252 }
1253}
1254
1255template <class Impl>
1256inline void
1257LSQUnit<Impl>::incrStIdx(int &store_idx) const
1258{
1259 if (++store_idx >= SQEntries)
1260 store_idx = 0;
1261}
1262
1263template <class Impl>
1264inline void
1265LSQUnit<Impl>::decrStIdx(int &store_idx) const
1266{
1267 if (--store_idx < 0)
1268 store_idx += SQEntries;
1269}
1270
1271template <class Impl>
1272inline void
1273LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1274{
1275 if (++load_idx >= LQEntries)
1276 load_idx = 0;
1277}
1278
1279template <class Impl>
1280inline void
1281LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1282{
1283 if (--load_idx < 0)
1284 load_idx += LQEntries;
1285}
1286
1287template <class Impl>
1288void
1289LSQUnit<Impl>::dumpInsts() const
1290{
1291 cprintf("Load store queue: Dumping instructions.\n");
1292 cprintf("Load queue size: %i\n", loads);
1293 cprintf("Load queue: ");
1294
1295 int load_idx = loadHead;
1296
1297 while (load_idx != loadTail && loadQueue[load_idx]) {
1298 const DynInstPtr &inst(loadQueue[load_idx]);
1299 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1300
1301 incrLdIdx(load_idx);
1302 }
1303 cprintf("\n");
1304
1305 cprintf("Store queue size: %i\n", stores);
1306 cprintf("Store queue: ");
1307
1308 int store_idx = storeHead;
1309
1310 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1311 const DynInstPtr &inst(storeQueue[store_idx].inst);
1312 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1313
1314 incrStIdx(store_idx);
1315 }
1316
1317 cprintf("\n");
1318}
1319
1320#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
141}
142
143template <class Impl>
144LSQUnit<Impl>::LSQUnit()
145 : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
146 isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false)
147{
148}
149
150template<class Impl>
151void
152LSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
153 LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
154 unsigned id)
155{
156 cpu = cpu_ptr;
157 iewStage = iew_ptr;
158
159 lsq = lsq_ptr;
160
161 lsqID = id;
162
163 DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
164
165 // Add 1 for the sentinel entry (they are circular queues).
166 LQEntries = maxLQEntries + 1;
167 SQEntries = maxSQEntries + 1;
168
169 //Due to uint8_t index in LSQSenderState
170 assert(LQEntries <= 256);
171 assert(SQEntries <= 256);
172
173 loadQueue.resize(LQEntries);
174 storeQueue.resize(SQEntries);
175
176 depCheckShift = params->LSQDepCheckShift;
177 checkLoads = params->LSQCheckLoads;
178 cachePorts = params->cachePorts;
179 needsTSO = params->needsTSO;
180
181 resetState();
182}
183
184
185template<class Impl>
186void
187LSQUnit<Impl>::resetState()
188{
189 loads = stores = storesToWB = 0;
190
191 loadHead = loadTail = 0;
192
193 storeHead = storeWBIdx = storeTail = 0;
194
195 usedPorts = 0;
196
197 retryPkt = NULL;
198 memDepViolator = NULL;
199
200 stalled = false;
201
202 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
203}
204
205template<class Impl>
206std::string
207LSQUnit<Impl>::name() const
208{
209 if (Impl::MaxThreads == 1) {
210 return iewStage->name() + ".lsq";
211 } else {
212 return iewStage->name() + ".lsq.thread" + std::to_string(lsqID);
213 }
214}
215
216template<class Impl>
217void
218LSQUnit<Impl>::regStats()
219{
220 lsqForwLoads
221 .name(name() + ".forwLoads")
222 .desc("Number of loads that had data forwarded from stores");
223
224 invAddrLoads
225 .name(name() + ".invAddrLoads")
226 .desc("Number of loads ignored due to an invalid address");
227
228 lsqSquashedLoads
229 .name(name() + ".squashedLoads")
230 .desc("Number of loads squashed");
231
232 lsqIgnoredResponses
233 .name(name() + ".ignoredResponses")
234 .desc("Number of memory responses ignored because the instruction is squashed");
235
236 lsqMemOrderViolation
237 .name(name() + ".memOrderViolation")
238 .desc("Number of memory ordering violations");
239
240 lsqSquashedStores
241 .name(name() + ".squashedStores")
242 .desc("Number of stores squashed");
243
244 invAddrSwpfs
245 .name(name() + ".invAddrSwpfs")
246 .desc("Number of software prefetches ignored due to an invalid address");
247
248 lsqBlockedLoads
249 .name(name() + ".blockedLoads")
250 .desc("Number of blocked loads due to partial load-store forwarding");
251
252 lsqRescheduledLoads
253 .name(name() + ".rescheduledLoads")
254 .desc("Number of loads that were rescheduled");
255
256 lsqCacheBlocked
257 .name(name() + ".cacheBlocked")
258 .desc("Number of times an access to memory failed due to the cache being blocked");
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
264{
265 dcachePort = dcache_port;
266}
267
268template<class Impl>
269void
270LSQUnit<Impl>::clearLQ()
271{
272 loadQueue.clear();
273}
274
275template<class Impl>
276void
277LSQUnit<Impl>::clearSQ()
278{
279 storeQueue.clear();
280}
281
282template<class Impl>
283void
284LSQUnit<Impl>::drainSanityCheck() const
285{
286 for (int i = 0; i < loadQueue.size(); ++i)
287 assert(!loadQueue[i]);
288
289 assert(storesToWB == 0);
290 assert(!retryPkt);
291}
292
293template<class Impl>
294void
295LSQUnit<Impl>::takeOverFrom()
296{
297 resetState();
298}
299
300template<class Impl>
301void
302LSQUnit<Impl>::resizeLQ(unsigned size)
303{
304 unsigned size_plus_sentinel = size + 1;
305 assert(size_plus_sentinel >= LQEntries);
306
307 if (size_plus_sentinel > LQEntries) {
308 while (size_plus_sentinel > loadQueue.size()) {
309 DynInstPtr dummy;
310 loadQueue.push_back(dummy);
311 LQEntries++;
312 }
313 } else {
314 LQEntries = size_plus_sentinel;
315 }
316
317 assert(LQEntries <= 256);
318}
319
320template<class Impl>
321void
322LSQUnit<Impl>::resizeSQ(unsigned size)
323{
324 unsigned size_plus_sentinel = size + 1;
325 if (size_plus_sentinel > SQEntries) {
326 while (size_plus_sentinel > storeQueue.size()) {
327 SQEntry dummy;
328 storeQueue.push_back(dummy);
329 SQEntries++;
330 }
331 } else {
332 SQEntries = size_plus_sentinel;
333 }
334
335 assert(SQEntries <= 256);
336}
337
338template <class Impl>
339void
340LSQUnit<Impl>::insert(DynInstPtr &inst)
341{
342 assert(inst->isMemRef());
343
344 assert(inst->isLoad() || inst->isStore());
345
346 if (inst->isLoad()) {
347 insertLoad(inst);
348 } else {
349 insertStore(inst);
350 }
351
352 inst->setInLSQ();
353}
354
355template <class Impl>
356void
357LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
358{
359 assert((loadTail + 1) % LQEntries != loadHead);
360 assert(loads < LQEntries);
361
362 DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
363 load_inst->pcState(), loadTail, load_inst->seqNum);
364
365 load_inst->lqIdx = loadTail;
366
367 if (stores == 0) {
368 load_inst->sqIdx = -1;
369 } else {
370 load_inst->sqIdx = storeTail;
371 }
372
373 loadQueue[loadTail] = load_inst;
374
375 incrLdIdx(loadTail);
376
377 ++loads;
378}
379
380template <class Impl>
381void
382LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
383{
384 // Make sure it is not full before inserting an instruction.
385 assert((storeTail + 1) % SQEntries != storeHead);
386 assert(stores < SQEntries);
387
388 DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
389 store_inst->pcState(), storeTail, store_inst->seqNum);
390
391 store_inst->sqIdx = storeTail;
392 store_inst->lqIdx = loadTail;
393
394 storeQueue[storeTail] = SQEntry(store_inst);
395
396 incrStIdx(storeTail);
397
398 ++stores;
399}
400
401template <class Impl>
402typename Impl::DynInstPtr
403LSQUnit<Impl>::getMemDepViolator()
404{
405 DynInstPtr temp = memDepViolator;
406
407 memDepViolator = NULL;
408
409 return temp;
410}
411
412template <class Impl>
413unsigned
414LSQUnit<Impl>::numFreeLoadEntries()
415{
416 //LQ has an extra dummy entry to differentiate
417 //empty/full conditions. Subtract 1 from the free entries.
418 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
419 return LQEntries - loads - 1;
420}
421
422template <class Impl>
423unsigned
424LSQUnit<Impl>::numFreeStoreEntries()
425{
426 //SQ has an extra dummy entry to differentiate
427 //empty/full conditions. Subtract 1 from the free entries.
428 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
429 return SQEntries - stores - 1;
430
431 }
432
433template <class Impl>
434void
435LSQUnit<Impl>::checkSnoop(PacketPtr pkt)
436{
437 int load_idx = loadHead;
438 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
439
440 // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
441 // address. The CPU can speculatively execute a LL operation after a pending
442 // SC operation in the pipeline and that can make the cache monitor the CPU
443 // is connected to valid while it really shouldn't be.
444 for (int x = 0; x < cpu->numContexts(); x++) {
445 ThreadContext *tc = cpu->getContext(x);
446 bool no_squash = cpu->thread[x]->noSquashFromTC;
447 cpu->thread[x]->noSquashFromTC = true;
448 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
449 cpu->thread[x]->noSquashFromTC = no_squash;
450 }
451
452 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
453
454 DynInstPtr ld_inst = loadQueue[load_idx];
455 if (ld_inst) {
456 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
457 // Check that this snoop didn't just invalidate our lock flag
458 if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
459 ld_inst->memReqFlags & Request::LLSC)
460 TheISA::handleLockedSnoopHit(ld_inst.get());
461 }
462
463 // If this is the only load in the LSQ we don't care
464 if (load_idx == loadTail)
465 return;
466
467 incrLdIdx(load_idx);
468
469 bool force_squash = false;
470
471 while (load_idx != loadTail) {
472 DynInstPtr ld_inst = loadQueue[load_idx];
473
474 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
475 incrLdIdx(load_idx);
476 continue;
477 }
478
479 Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
480 DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
481 ld_inst->seqNum, load_addr, invalidate_addr);
482
483 if (load_addr == invalidate_addr || force_squash) {
484 if (needsTSO) {
485 // If we have a TSO system, as all loads must be ordered with
486 // all other loads, this load as well as *all* subsequent loads
487 // need to be squashed to prevent possible load reordering.
488 force_squash = true;
489 }
490 if (ld_inst->possibleLoadViolation() || force_squash) {
491 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
492 pkt->getAddr(), ld_inst->seqNum);
493
494 // Mark the load for re-execution
495 ld_inst->fault = std::make_shared<ReExec>();
496 } else {
497 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
498 pkt->getAddr(), ld_inst->seqNum);
499
500 // Make sure that we don't lose a snoop hitting a LOCKED
501 // address since the LOCK* flags don't get updated until
502 // commit.
503 if (ld_inst->memReqFlags & Request::LLSC)
504 TheISA::handleLockedSnoopHit(ld_inst.get());
505
506 // If a older load checks this and it's true
507 // then we might have missed the snoop
508 // in which case we need to invalidate to be sure
509 ld_inst->hitExternalSnoop(true);
510 }
511 }
512 incrLdIdx(load_idx);
513 }
514 return;
515}
516
517template <class Impl>
518Fault
519LSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
520{
521 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
522 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
523
524 /** @todo in theory you only need to check an instruction that has executed
525 * however, there isn't a good way in the pipeline at the moment to check
526 * all instructions that will execute before the store writes back. Thus,
527 * like the implementation that came before it, we're overly conservative.
528 */
529 while (load_idx != loadTail) {
530 DynInstPtr ld_inst = loadQueue[load_idx];
531 if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
532 incrLdIdx(load_idx);
533 continue;
534 }
535
536 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
537 Addr ld_eff_addr2 =
538 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
539
540 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
541 if (inst->isLoad()) {
542 // If this load is to the same block as an external snoop
543 // invalidate that we've observed then the load needs to be
544 // squashed as it could have newer data
545 if (ld_inst->hitExternalSnoop()) {
546 if (!memDepViolator ||
547 ld_inst->seqNum < memDepViolator->seqNum) {
548 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
549 "and [sn:%lli] at address %#x\n",
550 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
551 memDepViolator = ld_inst;
552
553 ++lsqMemOrderViolation;
554
555 return std::make_shared<GenericISA::M5PanicFault>(
556 "Detected fault with inst [sn:%lli] and "
557 "[sn:%lli] at address %#x\n",
558 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
559 }
560 }
561
562 // Otherwise, mark the load has a possible load violation
563 // and if we see a snoop before it's commited, we need to squash
564 ld_inst->possibleLoadViolation(true);
565 DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
566 " between instructions [sn:%lli] and [sn:%lli]\n",
567 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
568 } else {
569 // A load/store incorrectly passed this store.
570 // Check if we already have a violator, or if it's newer
571 // squash and refetch.
572 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
573 break;
574
575 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
576 "[sn:%lli] at address %#x\n",
577 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
578 memDepViolator = ld_inst;
579
580 ++lsqMemOrderViolation;
581
582 return std::make_shared<GenericISA::M5PanicFault>(
583 "Detected fault with "
584 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
585 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
586 }
587 }
588
589 incrLdIdx(load_idx);
590 }
591 return NoFault;
592}
593
594
595
596
597template <class Impl>
598Fault
599LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
600{
601 using namespace TheISA;
602 // Execute a specific load.
603 Fault load_fault = NoFault;
604
605 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
606 inst->pcState(), inst->seqNum);
607
608 assert(!inst->isSquashed());
609
610 load_fault = inst->initiateAcc();
611
612 if (inst->isTranslationDelayed() &&
613 load_fault == NoFault)
614 return load_fault;
615
616 // If the instruction faulted or predicated false, then we need to send it
617 // along to commit without the instruction completing.
618 if (load_fault != NoFault || !inst->readPredicate()) {
619 // Send this instruction to commit, also make sure iew stage
620 // realizes there is activity.
621 // Mark it as executed unless it is an uncached load that
622 // needs to hit the head of commit.
623 if (!inst->readPredicate())
624 inst->forwardOldRegs();
625 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
626 inst->seqNum,
627 (load_fault != NoFault ? "fault" : "predication"));
628 if (!(inst->hasRequest() && inst->uncacheable()) ||
629 inst->isAtCommit()) {
630 inst->setExecuted();
631 }
632 iewStage->instToCommit(inst);
633 iewStage->activityThisCycle();
634 } else {
635 assert(inst->effAddrValid());
636 int load_idx = inst->lqIdx;
637 incrLdIdx(load_idx);
638
639 if (checkLoads)
640 return checkViolations(load_idx, inst);
641 }
642
643 return load_fault;
644}
645
646template <class Impl>
647Fault
648LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
649{
650 using namespace TheISA;
651 // Make sure that a store exists.
652 assert(stores != 0);
653
654 int store_idx = store_inst->sqIdx;
655
656 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
657 store_inst->pcState(), store_inst->seqNum);
658
659 assert(!store_inst->isSquashed());
660
661 // Check the recently completed loads to see if any match this store's
662 // address. If so, then we have a memory ordering violation.
663 int load_idx = store_inst->lqIdx;
664
665 Fault store_fault = store_inst->initiateAcc();
666
667 if (store_inst->isTranslationDelayed() &&
668 store_fault == NoFault)
669 return store_fault;
670
671 if (!store_inst->readPredicate())
672 store_inst->forwardOldRegs();
673
674 if (storeQueue[store_idx].size == 0) {
675 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
676 store_inst->pcState(), store_inst->seqNum);
677
678 return store_fault;
679 } else if (!store_inst->readPredicate()) {
680 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
681 store_inst->seqNum);
682 return store_fault;
683 }
684
685 assert(store_fault == NoFault);
686
687 if (store_inst->isStoreConditional()) {
688 // Store conditionals need to set themselves as able to
689 // writeback if we haven't had a fault by here.
690 storeQueue[store_idx].canWB = true;
691
692 ++storesToWB;
693 }
694
695 return checkViolations(load_idx, store_inst);
696
697}
698
699template <class Impl>
700void
701LSQUnit<Impl>::commitLoad()
702{
703 assert(loadQueue[loadHead]);
704
705 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
706 loadQueue[loadHead]->pcState());
707
708 loadQueue[loadHead] = NULL;
709
710 incrLdIdx(loadHead);
711
712 --loads;
713}
714
715template <class Impl>
716void
717LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
718{
719 assert(loads == 0 || loadQueue[loadHead]);
720
721 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
722 commitLoad();
723 }
724}
725
726template <class Impl>
727void
728LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
729{
730 assert(stores == 0 || storeQueue[storeHead].inst);
731
732 int store_idx = storeHead;
733
734 while (store_idx != storeTail) {
735 assert(storeQueue[store_idx].inst);
736 // Mark any stores that are now committed and have not yet
737 // been marked as able to write back.
738 if (!storeQueue[store_idx].canWB) {
739 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
740 break;
741 }
742 DPRINTF(LSQUnit, "Marking store as able to write back, PC "
743 "%s [sn:%lli]\n",
744 storeQueue[store_idx].inst->pcState(),
745 storeQueue[store_idx].inst->seqNum);
746
747 storeQueue[store_idx].canWB = true;
748
749 ++storesToWB;
750 }
751
752 incrStIdx(store_idx);
753 }
754}
755
756template <class Impl>
757void
758LSQUnit<Impl>::writebackPendingStore()
759{
760 if (hasPendingPkt) {
761 assert(pendingPkt != NULL);
762
763 // If the cache is blocked, this will store the packet for retry.
764 if (sendStore(pendingPkt)) {
765 storePostSend(pendingPkt);
766 }
767 pendingPkt = NULL;
768 hasPendingPkt = false;
769 }
770}
771
772template <class Impl>
773void
774LSQUnit<Impl>::writebackStores()
775{
776 // First writeback the second packet from any split store that didn't
777 // complete last cycle because there weren't enough cache ports available.
778 if (TheISA::HasUnalignedMemAcc) {
779 writebackPendingStore();
780 }
781
782 while (storesToWB > 0 &&
783 storeWBIdx != storeTail &&
784 storeQueue[storeWBIdx].inst &&
785 storeQueue[storeWBIdx].canWB &&
786 ((!needsTSO) || (!storeInFlight)) &&
787 usedPorts < cachePorts) {
788
789 if (isStoreBlocked) {
790 DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
791 " is blocked!\n");
792 break;
793 }
794
795 // Store didn't write any data so no need to write it back to
796 // memory.
797 if (storeQueue[storeWBIdx].size == 0) {
798 completeStore(storeWBIdx);
799
800 incrStIdx(storeWBIdx);
801
802 continue;
803 }
804
805 ++usedPorts;
806
807 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
808 incrStIdx(storeWBIdx);
809
810 continue;
811 }
812
813 assert(storeQueue[storeWBIdx].req);
814 assert(!storeQueue[storeWBIdx].committed);
815
816 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
817 assert(storeQueue[storeWBIdx].sreqLow);
818 assert(storeQueue[storeWBIdx].sreqHigh);
819 }
820
821 DynInstPtr inst = storeQueue[storeWBIdx].inst;
822
823 Request *req = storeQueue[storeWBIdx].req;
824 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
825 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
826
827 storeQueue[storeWBIdx].committed = true;
828
829 assert(!inst->memData);
830 inst->memData = new uint8_t[req->getSize()];
831
832 if (storeQueue[storeWBIdx].isAllZeros)
833 memset(inst->memData, 0, req->getSize());
834 else
835 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
836
837 PacketPtr data_pkt;
838 PacketPtr snd_data_pkt = NULL;
839
840 LSQSenderState *state = new LSQSenderState;
841 state->isLoad = false;
842 state->idx = storeWBIdx;
843 state->inst = inst;
844
845 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
846
847 // Build a single data packet if the store isn't split.
848 data_pkt = Packet::createWrite(req);
849 data_pkt->dataStatic(inst->memData);
850 data_pkt->senderState = state;
851 } else {
852 // Create two packets if the store is split in two.
853 data_pkt = Packet::createWrite(sreqLow);
854 snd_data_pkt = Packet::createWrite(sreqHigh);
855
856 data_pkt->dataStatic(inst->memData);
857 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
858
859 data_pkt->senderState = state;
860 snd_data_pkt->senderState = state;
861
862 state->isSplit = true;
863 state->outstanding = 2;
864
865 // Can delete the main request now.
866 delete req;
867 req = sreqLow;
868 }
869
870 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
871 "to Addr:%#x, data:%#x [sn:%lli]\n",
872 storeWBIdx, inst->pcState(),
873 req->getPaddr(), (int)*(inst->memData),
874 inst->seqNum);
875
876 // @todo: Remove this SC hack once the memory system handles it.
877 if (inst->isStoreConditional()) {
878 assert(!storeQueue[storeWBIdx].isSplit);
879 // Disable recording the result temporarily. Writing to
880 // misc regs normally updates the result, but this is not
881 // the desired behavior when handling store conditionals.
882 inst->recordResult(false);
883 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
884 inst->recordResult(true);
885
886 if (!success) {
887 // Instantly complete this store.
888 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
889 "Instantly completing it.\n",
890 inst->seqNum);
891 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
892 cpu->schedule(wb, curTick() + 1);
893 if (cpu->checker) {
894 // Make sure to set the LLSC data for verification
895 // if checker is loaded
896 inst->reqToVerify->setExtraData(0);
897 inst->completeAcc(data_pkt);
898 }
899 completeStore(storeWBIdx);
900 incrStIdx(storeWBIdx);
901 continue;
902 }
903 } else {
904 // Non-store conditionals do not need a writeback.
905 state->noWB = true;
906 }
907
908 bool split =
909 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
910
911 ThreadContext *thread = cpu->tcBase(lsqID);
912
913 if (req->isMmappedIpr()) {
914 assert(!inst->isStoreConditional());
915 TheISA::handleIprWrite(thread, data_pkt);
916 delete data_pkt;
917 if (split) {
918 assert(snd_data_pkt->req->isMmappedIpr());
919 TheISA::handleIprWrite(thread, snd_data_pkt);
920 delete snd_data_pkt;
921 delete sreqLow;
922 delete sreqHigh;
923 }
924 delete state;
925 delete req;
926 completeStore(storeWBIdx);
927 incrStIdx(storeWBIdx);
928 } else if (!sendStore(data_pkt)) {
929 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
930 "retry later\n",
931 inst->seqNum);
932
933 // Need to store the second packet, if split.
934 if (split) {
935 state->pktToSend = true;
936 state->pendingPacket = snd_data_pkt;
937 }
938 } else {
939
940 // If split, try to send the second packet too
941 if (split) {
942 assert(snd_data_pkt);
943
944 // Ensure there are enough ports to use.
945 if (usedPorts < cachePorts) {
946 ++usedPorts;
947 if (sendStore(snd_data_pkt)) {
948 storePostSend(snd_data_pkt);
949 } else {
950 DPRINTF(IEW, "D-Cache became blocked when writing"
951 " [sn:%lli] second packet, will retry later\n",
952 inst->seqNum);
953 }
954 } else {
955
956 // Store the packet for when there's free ports.
957 assert(pendingPkt == NULL);
958 pendingPkt = snd_data_pkt;
959 hasPendingPkt = true;
960 }
961 } else {
962
963 // Not a split store.
964 storePostSend(data_pkt);
965 }
966 }
967 }
968
969 // Not sure this should set it to 0.
970 usedPorts = 0;
971
972 assert(stores >= 0 && storesToWB >= 0);
973}
974
975/*template <class Impl>
976void
977LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
978{
979 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
980 mshrSeqNums.end(),
981 seqNum);
982
983 if (mshr_it != mshrSeqNums.end()) {
984 mshrSeqNums.erase(mshr_it);
985 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
986 }
987}*/
988
989template <class Impl>
990void
991LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
992{
993 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
994 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
995
996 int load_idx = loadTail;
997 decrLdIdx(load_idx);
998
999 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
1000 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
1001 "[sn:%lli]\n",
1002 loadQueue[load_idx]->pcState(),
1003 loadQueue[load_idx]->seqNum);
1004
1005 if (isStalled() && load_idx == stallingLoadIdx) {
1006 stalled = false;
1007 stallingStoreIsn = 0;
1008 stallingLoadIdx = 0;
1009 }
1010
1011 // Clear the smart pointer to make sure it is decremented.
1012 loadQueue[load_idx]->setSquashed();
1013 loadQueue[load_idx] = NULL;
1014 --loads;
1015
1016 // Inefficient!
1017 loadTail = load_idx;
1018
1019 decrLdIdx(load_idx);
1020 ++lsqSquashedLoads;
1021 }
1022
1023 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1024 memDepViolator = NULL;
1025 }
1026
1027 int store_idx = storeTail;
1028 decrStIdx(store_idx);
1029
1030 while (stores != 0 &&
1031 storeQueue[store_idx].inst->seqNum > squashed_num) {
1032 // Instructions marked as can WB are already committed.
1033 if (storeQueue[store_idx].canWB) {
1034 break;
1035 }
1036
1037 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1038 "idx:%i [sn:%lli]\n",
1039 storeQueue[store_idx].inst->pcState(),
1040 store_idx, storeQueue[store_idx].inst->seqNum);
1041
1042 // I don't think this can happen. It should have been cleared
1043 // by the stalling load.
1044 if (isStalled() &&
1045 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1046 panic("Is stalled should have been cleared by stalling load!\n");
1047 stalled = false;
1048 stallingStoreIsn = 0;
1049 }
1050
1051 // Clear the smart pointer to make sure it is decremented.
1052 storeQueue[store_idx].inst->setSquashed();
1053 storeQueue[store_idx].inst = NULL;
1054 storeQueue[store_idx].canWB = 0;
1055
1056 // Must delete request now that it wasn't handed off to
1057 // memory. This is quite ugly. @todo: Figure out the proper
1058 // place to really handle request deletes.
1059 delete storeQueue[store_idx].req;
1060 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1061 delete storeQueue[store_idx].sreqLow;
1062 delete storeQueue[store_idx].sreqHigh;
1063
1064 storeQueue[store_idx].sreqLow = NULL;
1065 storeQueue[store_idx].sreqHigh = NULL;
1066 }
1067
1068 storeQueue[store_idx].req = NULL;
1069 --stores;
1070
1071 // Inefficient!
1072 storeTail = store_idx;
1073
1074 decrStIdx(store_idx);
1075 ++lsqSquashedStores;
1076 }
1077}
1078
1079template <class Impl>
1080void
1081LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1082{
1083 if (isStalled() &&
1084 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1085 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1086 "load idx:%i\n",
1087 stallingStoreIsn, stallingLoadIdx);
1088 stalled = false;
1089 stallingStoreIsn = 0;
1090 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1091 }
1092
1093 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1094 // The store is basically completed at this time. This
1095 // only works so long as the checker doesn't try to
1096 // verify the value in memory for stores.
1097 storeQueue[storeWBIdx].inst->setCompleted();
1098
1099 if (cpu->checker) {
1100 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1101 }
1102 }
1103
1104 if (needsTSO) {
1105 storeInFlight = true;
1106 }
1107
1108 incrStIdx(storeWBIdx);
1109}
1110
1111template <class Impl>
1112void
1113LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1114{
1115 iewStage->wakeCPU();
1116
1117 // Squashed instructions do not need to complete their access.
1118 if (inst->isSquashed()) {
1119 assert(!inst->isStore());
1120 ++lsqIgnoredResponses;
1121 return;
1122 }
1123
1124 if (!inst->isExecuted()) {
1125 inst->setExecuted();
1126
1127 // Complete access to copy data to proper place.
1128 inst->completeAcc(pkt);
1129 }
1130
1131 // Need to insert instruction into queue to commit
1132 iewStage->instToCommit(inst);
1133
1134 iewStage->activityThisCycle();
1135
1136 // see if this load changed the PC
1137 iewStage->checkMisprediction(inst);
1138}
1139
1140template <class Impl>
1141void
1142LSQUnit<Impl>::completeStore(int store_idx)
1143{
1144 assert(storeQueue[store_idx].inst);
1145 storeQueue[store_idx].completed = true;
1146 --storesToWB;
1147 // A bit conservative because a store completion may not free up entries,
1148 // but hopefully avoids two store completions in one cycle from making
1149 // the CPU tick twice.
1150 cpu->wakeCPU();
1151 cpu->activityThisCycle();
1152
1153 if (store_idx == storeHead) {
1154 do {
1155 incrStIdx(storeHead);
1156
1157 --stores;
1158 } while (storeQueue[storeHead].completed &&
1159 storeHead != storeTail);
1160
1161 iewStage->updateLSQNextCycle = true;
1162 }
1163
1164 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1165 "idx:%i\n",
1166 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1167
1168#if TRACING_ON
1169 if (DTRACE(O3PipeView)) {
1170 storeQueue[store_idx].inst->storeTick =
1171 curTick() - storeQueue[store_idx].inst->fetchTick;
1172 }
1173#endif
1174
1175 if (isStalled() &&
1176 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1177 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1178 "load idx:%i\n",
1179 stallingStoreIsn, stallingLoadIdx);
1180 stalled = false;
1181 stallingStoreIsn = 0;
1182 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1183 }
1184
1185 storeQueue[store_idx].inst->setCompleted();
1186
1187 if (needsTSO) {
1188 storeInFlight = false;
1189 }
1190
1191 // Tell the checker we've completed this instruction. Some stores
1192 // may get reported twice to the checker, but the checker can
1193 // handle that case.
1194 if (cpu->checker) {
1195 cpu->checker->verify(storeQueue[store_idx].inst);
1196 }
1197}
1198
1199template <class Impl>
1200bool
1201LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1202{
1203 if (!dcachePort->sendTimingReq(data_pkt)) {
1204 // Need to handle becoming blocked on a store.
1205 isStoreBlocked = true;
1206 ++lsqCacheBlocked;
1207 assert(retryPkt == NULL);
1208 retryPkt = data_pkt;
1209 return false;
1210 }
1211 return true;
1212}
1213
1214template <class Impl>
1215void
1216LSQUnit<Impl>::recvRetry()
1217{
1218 if (isStoreBlocked) {
1219 DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1220 assert(retryPkt != NULL);
1221
1222 LSQSenderState *state =
1223 dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1224
1225 if (dcachePort->sendTimingReq(retryPkt)) {
1226 // Don't finish the store unless this is the last packet.
1227 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1228 state->pendingPacket == retryPkt) {
1229 state->pktToSend = false;
1230 storePostSend(retryPkt);
1231 }
1232 retryPkt = NULL;
1233 isStoreBlocked = false;
1234
1235 // Send any outstanding packet.
1236 if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1237 assert(state->pendingPacket);
1238 if (sendStore(state->pendingPacket)) {
1239 storePostSend(state->pendingPacket);
1240 }
1241 }
1242 } else {
1243 // Still blocked!
1244 ++lsqCacheBlocked;
1245 }
1246 }
1247}
1248
1249template <class Impl>
1250inline void
1251LSQUnit<Impl>::incrStIdx(int &store_idx) const
1252{
1253 if (++store_idx >= SQEntries)
1254 store_idx = 0;
1255}
1256
1257template <class Impl>
1258inline void
1259LSQUnit<Impl>::decrStIdx(int &store_idx) const
1260{
1261 if (--store_idx < 0)
1262 store_idx += SQEntries;
1263}
1264
1265template <class Impl>
1266inline void
1267LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1268{
1269 if (++load_idx >= LQEntries)
1270 load_idx = 0;
1271}
1272
1273template <class Impl>
1274inline void
1275LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1276{
1277 if (--load_idx < 0)
1278 load_idx += LQEntries;
1279}
1280
1281template <class Impl>
1282void
1283LSQUnit<Impl>::dumpInsts() const
1284{
1285 cprintf("Load store queue: Dumping instructions.\n");
1286 cprintf("Load queue size: %i\n", loads);
1287 cprintf("Load queue: ");
1288
1289 int load_idx = loadHead;
1290
1291 while (load_idx != loadTail && loadQueue[load_idx]) {
1292 const DynInstPtr &inst(loadQueue[load_idx]);
1293 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1294
1295 incrLdIdx(load_idx);
1296 }
1297 cprintf("\n");
1298
1299 cprintf("Store queue size: %i\n", stores);
1300 cprintf("Store queue: ");
1301
1302 int store_idx = storeHead;
1303
1304 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1305 const DynInstPtr &inst(storeQueue[store_idx].inst);
1306 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1307
1308 incrStIdx(store_idx);
1309 }
1310
1311 cprintf("\n");
1312}
1313
1314#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__