lsq_unit_impl.hh revision 4032:8b987a6a2afc
17839Snilay@cs.wisc.edu/*
27839Snilay@cs.wisc.edu * Copyright (c) 2004-2005 The Regents of The University of Michigan
37839Snilay@cs.wisc.edu * All rights reserved.
47839Snilay@cs.wisc.edu *
57839Snilay@cs.wisc.edu * Redistribution and use in source and binary forms, with or without
67839Snilay@cs.wisc.edu * modification, are permitted provided that the following conditions are
77839Snilay@cs.wisc.edu * met: redistributions of source code must retain the above copyright
87839Snilay@cs.wisc.edu * notice, this list of conditions and the following disclaimer;
97839Snilay@cs.wisc.edu * redistributions in binary form must reproduce the above copyright
107839Snilay@cs.wisc.edu * notice, this list of conditions and the following disclaimer in the
117839Snilay@cs.wisc.edu * documentation and/or other materials provided with the distribution;
127839Snilay@cs.wisc.edu * neither the name of the copyright holders nor the names of its
137839Snilay@cs.wisc.edu * contributors may be used to endorse or promote products derived from
147839Snilay@cs.wisc.edu * this software without specific prior written permission.
157839Snilay@cs.wisc.edu *
167839Snilay@cs.wisc.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
177839Snilay@cs.wisc.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
187839Snilay@cs.wisc.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
197839Snilay@cs.wisc.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
207839Snilay@cs.wisc.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
217839Snilay@cs.wisc.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
227839Snilay@cs.wisc.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
237839Snilay@cs.wisc.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
247839Snilay@cs.wisc.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
257839Snilay@cs.wisc.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
267839Snilay@cs.wisc.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
277839Snilay@cs.wisc.edu *
287839Snilay@cs.wisc.edu * Authors: Kevin Lim
297839Snilay@cs.wisc.edu *          Korey Sewell
307839Snilay@cs.wisc.edu */
317839Snilay@cs.wisc.edu
327839Snilay@cs.wisc.edu#include "arch/locked_mem.hh"
338085SBrad.Beckmann@amd.com#include "config/use_checker.hh"
347839Snilay@cs.wisc.edu
357839Snilay@cs.wisc.edu#include "cpu/o3/lsq.hh"
367839Snilay@cs.wisc.edu#include "cpu/o3/lsq_unit.hh"
378085SBrad.Beckmann@amd.com#include "base/str.hh"
387839Snilay@cs.wisc.edu#include "mem/packet.hh"
397839Snilay@cs.wisc.edu#include "mem/request.hh"
407839Snilay@cs.wisc.edu
417839Snilay@cs.wisc.edu#if USE_CHECKER
427839Snilay@cs.wisc.edu#include "cpu/checker/cpu.hh"
437839Snilay@cs.wisc.edu#endif
447839Snilay@cs.wisc.edu
457839Snilay@cs.wisc.edutemplate<class Impl>
467839Snilay@cs.wisc.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
477839Snilay@cs.wisc.edu                                              LSQUnit *lsq_ptr)
487839Snilay@cs.wisc.edu    : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
497839Snilay@cs.wisc.edu{
507839Snilay@cs.wisc.edu    this->setFlags(Event::AutoDelete);
517839Snilay@cs.wisc.edu}
527839Snilay@cs.wisc.edu
537839Snilay@cs.wisc.edutemplate<class Impl>
548085SBrad.Beckmann@amd.comvoid
558085SBrad.Beckmann@amd.comLSQUnit<Impl>::WritebackEvent::process()
568085SBrad.Beckmann@amd.com{
578085SBrad.Beckmann@amd.com    if (!lsqPtr->isSwitchedOut()) {
588085SBrad.Beckmann@amd.com        lsqPtr->writeback(inst, pkt);
597839Snilay@cs.wisc.edu    }
60    delete pkt;
61}
62
63template<class Impl>
64const char *
65LSQUnit<Impl>::WritebackEvent::description()
66{
67    return "Store writeback event";
68}
69
70template<class Impl>
71void
72LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
73{
74    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
75    DynInstPtr inst = state->inst;
76    DPRINTF(IEW, "Writeback event [sn:%lli]\n", inst->seqNum);
77    DPRINTF(Activity, "Activity: Writeback event [sn:%lli]\n", inst->seqNum);
78
79    //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
80
81    if (isSwitchedOut() || inst->isSquashed()) {
82        iewStage->decrWb(inst->seqNum);
83        delete state;
84        delete pkt->req;
85        delete pkt;
86        return;
87    } else {
88        if (!state->noWB) {
89            writeback(inst, pkt);
90        }
91
92        if (inst->isStore()) {
93            completeStore(state->idx);
94        }
95    }
96
97    delete state;
98    delete pkt->req;
99    delete pkt;
100}
101
102template <class Impl>
103LSQUnit<Impl>::LSQUnit()
104    : loads(0), stores(0), storesToWB(0), stalled(false),
105      isStoreBlocked(false), isLoadBlocked(false),
106      loadBlockedHandled(false)
107{
108}
109
110template<class Impl>
111void
112LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries,
113                    unsigned maxSQEntries, unsigned id)
114{
115    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
116
117    switchedOut = false;
118
119    lsq = lsq_ptr;
120
121    lsqID = id;
122
123    // Add 1 for the sentinel entry (they are circular queues).
124    LQEntries = maxLQEntries + 1;
125    SQEntries = maxSQEntries + 1;
126
127    loadQueue.resize(LQEntries);
128    storeQueue.resize(SQEntries);
129
130    loadHead = loadTail = 0;
131
132    storeHead = storeWBIdx = storeTail = 0;
133
134    usedPorts = 0;
135    cachePorts = params->cachePorts;
136
137    retryPkt = NULL;
138    memDepViolator = NULL;
139
140    blockedLoadSeqNum = 0;
141}
142
143template<class Impl>
144void
145LSQUnit<Impl>::setCPU(O3CPU *cpu_ptr)
146{
147    cpu = cpu_ptr;
148
149#if USE_CHECKER
150    if (cpu->checker) {
151        cpu->checker->setDcachePort(dcachePort);
152    }
153#endif
154}
155
156template<class Impl>
157std::string
158LSQUnit<Impl>::name() const
159{
160    if (Impl::MaxThreads == 1) {
161        return iewStage->name() + ".lsq";
162    } else {
163        return iewStage->name() + ".lsq.thread." + to_string(lsqID);
164    }
165}
166
167template<class Impl>
168void
169LSQUnit<Impl>::regStats()
170{
171    lsqForwLoads
172        .name(name() + ".forwLoads")
173        .desc("Number of loads that had data forwarded from stores");
174
175    invAddrLoads
176        .name(name() + ".invAddrLoads")
177        .desc("Number of loads ignored due to an invalid address");
178
179    lsqSquashedLoads
180        .name(name() + ".squashedLoads")
181        .desc("Number of loads squashed");
182
183    lsqIgnoredResponses
184        .name(name() + ".ignoredResponses")
185        .desc("Number of memory responses ignored because the instruction is squashed");
186
187    lsqMemOrderViolation
188        .name(name() + ".memOrderViolation")
189        .desc("Number of memory ordering violations");
190
191    lsqSquashedStores
192        .name(name() + ".squashedStores")
193        .desc("Number of stores squashed");
194
195    invAddrSwpfs
196        .name(name() + ".invAddrSwpfs")
197        .desc("Number of software prefetches ignored due to an invalid address");
198
199    lsqBlockedLoads
200        .name(name() + ".blockedLoads")
201        .desc("Number of blocked loads due to partial load-store forwarding");
202
203    lsqRescheduledLoads
204        .name(name() + ".rescheduledLoads")
205        .desc("Number of loads that were rescheduled");
206
207    lsqCacheBlocked
208        .name(name() + ".cacheBlocked")
209        .desc("Number of times an access to memory failed due to the cache being blocked");
210}
211
212template<class Impl>
213void
214LSQUnit<Impl>::clearLQ()
215{
216    loadQueue.clear();
217}
218
219template<class Impl>
220void
221LSQUnit<Impl>::clearSQ()
222{
223    storeQueue.clear();
224}
225
226template<class Impl>
227void
228LSQUnit<Impl>::switchOut()
229{
230    switchedOut = true;
231    for (int i = 0; i < loadQueue.size(); ++i) {
232        assert(!loadQueue[i]);
233        loadQueue[i] = NULL;
234    }
235
236    assert(storesToWB == 0);
237}
238
239template<class Impl>
240void
241LSQUnit<Impl>::takeOverFrom()
242{
243    switchedOut = false;
244    loads = stores = storesToWB = 0;
245
246    loadHead = loadTail = 0;
247
248    storeHead = storeWBIdx = storeTail = 0;
249
250    usedPorts = 0;
251
252    memDepViolator = NULL;
253
254    blockedLoadSeqNum = 0;
255
256    stalled = false;
257    isLoadBlocked = false;
258    loadBlockedHandled = false;
259}
260
261template<class Impl>
262void
263LSQUnit<Impl>::resizeLQ(unsigned size)
264{
265    unsigned size_plus_sentinel = size + 1;
266    assert(size_plus_sentinel >= LQEntries);
267
268    if (size_plus_sentinel > LQEntries) {
269        while (size_plus_sentinel > loadQueue.size()) {
270            DynInstPtr dummy;
271            loadQueue.push_back(dummy);
272            LQEntries++;
273        }
274    } else {
275        LQEntries = size_plus_sentinel;
276    }
277
278}
279
280template<class Impl>
281void
282LSQUnit<Impl>::resizeSQ(unsigned size)
283{
284    unsigned size_plus_sentinel = size + 1;
285    if (size_plus_sentinel > SQEntries) {
286        while (size_plus_sentinel > storeQueue.size()) {
287            SQEntry dummy;
288            storeQueue.push_back(dummy);
289            SQEntries++;
290        }
291    } else {
292        SQEntries = size_plus_sentinel;
293    }
294}
295
296template <class Impl>
297void
298LSQUnit<Impl>::insert(DynInstPtr &inst)
299{
300    assert(inst->isMemRef());
301
302    assert(inst->isLoad() || inst->isStore());
303
304    if (inst->isLoad()) {
305        insertLoad(inst);
306    } else {
307        insertStore(inst);
308    }
309
310    inst->setInLSQ();
311}
312
313template <class Impl>
314void
315LSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
316{
317    assert((loadTail + 1) % LQEntries != loadHead);
318    assert(loads < LQEntries);
319
320    DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n",
321            load_inst->readPC(), loadTail, load_inst->seqNum);
322
323    load_inst->lqIdx = loadTail;
324
325    if (stores == 0) {
326        load_inst->sqIdx = -1;
327    } else {
328        load_inst->sqIdx = storeTail;
329    }
330
331    loadQueue[loadTail] = load_inst;
332
333    incrLdIdx(loadTail);
334
335    ++loads;
336}
337
338template <class Impl>
339void
340LSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
341{
342    // Make sure it is not full before inserting an instruction.
343    assert((storeTail + 1) % SQEntries != storeHead);
344    assert(stores < SQEntries);
345
346    DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n",
347            store_inst->readPC(), storeTail, store_inst->seqNum);
348
349    store_inst->sqIdx = storeTail;
350    store_inst->lqIdx = loadTail;
351
352    storeQueue[storeTail] = SQEntry(store_inst);
353
354    incrStIdx(storeTail);
355
356    ++stores;
357}
358
359template <class Impl>
360typename Impl::DynInstPtr
361LSQUnit<Impl>::getMemDepViolator()
362{
363    DynInstPtr temp = memDepViolator;
364
365    memDepViolator = NULL;
366
367    return temp;
368}
369
370template <class Impl>
371unsigned
372LSQUnit<Impl>::numFreeEntries()
373{
374    unsigned free_lq_entries = LQEntries - loads;
375    unsigned free_sq_entries = SQEntries - stores;
376
377    // Both the LQ and SQ entries have an extra dummy entry to differentiate
378    // empty/full conditions.  Subtract 1 from the free entries.
379    if (free_lq_entries < free_sq_entries) {
380        return free_lq_entries - 1;
381    } else {
382        return free_sq_entries - 1;
383    }
384}
385
386template <class Impl>
387int
388LSQUnit<Impl>::numLoadsReady()
389{
390    int load_idx = loadHead;
391    int retval = 0;
392
393    while (load_idx != loadTail) {
394        assert(loadQueue[load_idx]);
395
396        if (loadQueue[load_idx]->readyToIssue()) {
397            ++retval;
398        }
399    }
400
401    return retval;
402}
403
404template <class Impl>
405Fault
406LSQUnit<Impl>::executeLoad(DynInstPtr &inst)
407{
408    using namespace TheISA;
409    // Execute a specific load.
410    Fault load_fault = NoFault;
411
412    DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n",
413            inst->readPC(),inst->seqNum);
414
415    assert(!inst->isSquashed());
416
417    load_fault = inst->initiateAcc();
418
419    // If the instruction faulted, then we need to send it along to commit
420    // without the instruction completing.
421    if (load_fault != NoFault) {
422        // Send this instruction to commit, also make sure iew stage
423        // realizes there is activity.
424        // Mark it as executed unless it is an uncached load that
425        // needs to hit the head of commit.
426        if (!(inst->hasRequest() && inst->uncacheable()) ||
427            inst->isAtCommit()) {
428            inst->setExecuted();
429        }
430        iewStage->instToCommit(inst);
431        iewStage->activityThisCycle();
432    } else if (!loadBlocked()) {
433        assert(inst->effAddrValid);
434        int load_idx = inst->lqIdx;
435        incrLdIdx(load_idx);
436        while (load_idx != loadTail) {
437            // Really only need to check loads that have actually executed
438
439            // @todo: For now this is extra conservative, detecting a
440            // violation if the addresses match assuming all accesses
441            // are quad word accesses.
442
443            // @todo: Fix this, magic number being used here
444            if (loadQueue[load_idx]->effAddrValid &&
445                (loadQueue[load_idx]->effAddr >> 8) ==
446                (inst->effAddr >> 8)) {
447                // A load incorrectly passed this load.  Squash and refetch.
448                // For now return a fault to show that it was unsuccessful.
449                DynInstPtr violator = loadQueue[load_idx];
450                if (!memDepViolator ||
451                    (violator->seqNum < memDepViolator->seqNum)) {
452                    memDepViolator = violator;
453                } else {
454                    break;
455                }
456
457                ++lsqMemOrderViolation;
458
459                return genMachineCheckFault();
460            }
461
462            incrLdIdx(load_idx);
463        }
464    }
465
466    return load_fault;
467}
468
469template <class Impl>
470Fault
471LSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
472{
473    using namespace TheISA;
474    // Make sure that a store exists.
475    assert(stores != 0);
476
477    int store_idx = store_inst->sqIdx;
478
479    DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n",
480            store_inst->readPC(), store_inst->seqNum);
481
482    assert(!store_inst->isSquashed());
483
484    // Check the recently completed loads to see if any match this store's
485    // address.  If so, then we have a memory ordering violation.
486    int load_idx = store_inst->lqIdx;
487
488    Fault store_fault = store_inst->initiateAcc();
489
490    if (storeQueue[store_idx].size == 0) {
491        DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n",
492                store_inst->readPC(),store_inst->seqNum);
493
494        return store_fault;
495    }
496
497    assert(store_fault == NoFault);
498
499    if (store_inst->isStoreConditional()) {
500        // Store conditionals need to set themselves as able to
501        // writeback if we haven't had a fault by here.
502        storeQueue[store_idx].canWB = true;
503
504        ++storesToWB;
505    }
506
507    assert(store_inst->effAddrValid);
508    while (load_idx != loadTail) {
509        // Really only need to check loads that have actually executed
510        // It's safe to check all loads because effAddr is set to
511        // InvalAddr when the dyn inst is created.
512
513        // @todo: For now this is extra conservative, detecting a
514        // violation if the addresses match assuming all accesses
515        // are quad word accesses.
516
517        // @todo: Fix this, magic number being used here
518        if (loadQueue[load_idx]->effAddrValid &&
519            (loadQueue[load_idx]->effAddr >> 8) ==
520            (store_inst->effAddr >> 8)) {
521            // A load incorrectly passed this store.  Squash and refetch.
522            // For now return a fault to show that it was unsuccessful.
523            DynInstPtr violator = loadQueue[load_idx];
524            if (!memDepViolator ||
525                (violator->seqNum < memDepViolator->seqNum)) {
526                memDepViolator = violator;
527            } else {
528                break;
529            }
530
531            ++lsqMemOrderViolation;
532
533            return genMachineCheckFault();
534        }
535
536        incrLdIdx(load_idx);
537    }
538
539    return store_fault;
540}
541
542template <class Impl>
543void
544LSQUnit<Impl>::commitLoad()
545{
546    assert(loadQueue[loadHead]);
547
548    DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n",
549            loadQueue[loadHead]->readPC());
550
551    loadQueue[loadHead] = NULL;
552
553    incrLdIdx(loadHead);
554
555    --loads;
556}
557
558template <class Impl>
559void
560LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
561{
562    assert(loads == 0 || loadQueue[loadHead]);
563
564    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
565        commitLoad();
566    }
567}
568
569template <class Impl>
570void
571LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
572{
573    assert(stores == 0 || storeQueue[storeHead].inst);
574
575    int store_idx = storeHead;
576
577    while (store_idx != storeTail) {
578        assert(storeQueue[store_idx].inst);
579        // Mark any stores that are now committed and have not yet
580        // been marked as able to write back.
581        if (!storeQueue[store_idx].canWB) {
582            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
583                break;
584            }
585            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
586                    "%#x [sn:%lli]\n",
587                    storeQueue[store_idx].inst->readPC(),
588                    storeQueue[store_idx].inst->seqNum);
589
590            storeQueue[store_idx].canWB = true;
591
592            ++storesToWB;
593        }
594
595        incrStIdx(store_idx);
596    }
597}
598
599template <class Impl>
600void
601LSQUnit<Impl>::writebackStores()
602{
603    while (storesToWB > 0 &&
604           storeWBIdx != storeTail &&
605           storeQueue[storeWBIdx].inst &&
606           storeQueue[storeWBIdx].canWB &&
607           usedPorts < cachePorts) {
608
609        if (isStoreBlocked || lsq->cacheBlocked()) {
610            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
611                    " is blocked!\n");
612            break;
613        }
614
615        // Store didn't write any data so no need to write it back to
616        // memory.
617        if (storeQueue[storeWBIdx].size == 0) {
618            completeStore(storeWBIdx);
619
620            incrStIdx(storeWBIdx);
621
622            continue;
623        }
624
625        ++usedPorts;
626
627        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
628            incrStIdx(storeWBIdx);
629
630            continue;
631        }
632
633        assert(storeQueue[storeWBIdx].req);
634        assert(!storeQueue[storeWBIdx].committed);
635
636        DynInstPtr inst = storeQueue[storeWBIdx].inst;
637
638        Request *req = storeQueue[storeWBIdx].req;
639        storeQueue[storeWBIdx].committed = true;
640
641        assert(!inst->memData);
642        inst->memData = new uint8_t[64];
643
644        TheISA::IntReg convertedData =
645            TheISA::htog(storeQueue[storeWBIdx].data);
646
647        //FIXME This is a hack to get SPARC working. It, along with endianness
648        //in the memory system in general, need to be straightened out more
649        //formally. The problem is that the data's endianness is swapped when
650        //it's in the 64 bit data field in the store queue. The data that you
651        //want won't start at the beginning of the field anymore unless it was
652        //a 64 bit access.
653        memcpy(inst->memData,
654                (uint8_t *)&convertedData +
655                (TheISA::ByteOrderDiffers ?
656                 (sizeof(TheISA::IntReg) - req->getSize()) : 0),
657                req->getSize());
658
659        PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
660        data_pkt->dataStatic(inst->memData);
661
662        LSQSenderState *state = new LSQSenderState;
663        state->isLoad = false;
664        state->idx = storeWBIdx;
665        state->inst = inst;
666        data_pkt->senderState = state;
667
668        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
669                "to Addr:%#x, data:%#x [sn:%lli]\n",
670                storeWBIdx, inst->readPC(),
671                req->getPaddr(), (int)*(inst->memData),
672                inst->seqNum);
673
674        // @todo: Remove this SC hack once the memory system handles it.
675        if (req->isLocked()) {
676            // Disable recording the result temporarily.  Writing to
677            // misc regs normally updates the result, but this is not
678            // the desired behavior when handling store conditionals.
679            inst->recordResult = false;
680            bool success = TheISA::handleLockedWrite(inst.get(), req);
681            inst->recordResult = true;
682
683            if (!success) {
684                // Instantly complete this store.
685                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
686                        "Instantly completing it.\n",
687                        inst->seqNum);
688                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
689                wb->schedule(curTick + 1);
690                delete state;
691                completeStore(storeWBIdx);
692                incrStIdx(storeWBIdx);
693                continue;
694            }
695        } else {
696            // Non-store conditionals do not need a writeback.
697            state->noWB = true;
698        }
699
700        if (!dcachePort->sendTiming(data_pkt)) {
701            if (data_pkt->result == Packet::BadAddress) {
702                panic("LSQ sent out a bad address for a completed store!");
703            }
704            // Need to handle becoming blocked on a store.
705            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
706                    "retry later\n",
707                    inst->seqNum);
708            isStoreBlocked = true;
709            ++lsqCacheBlocked;
710            assert(retryPkt == NULL);
711            retryPkt = data_pkt;
712            lsq->setRetryTid(lsqID);
713        } else {
714            storePostSend(data_pkt);
715        }
716    }
717
718    // Not sure this should set it to 0.
719    usedPorts = 0;
720
721    assert(stores >= 0 && storesToWB >= 0);
722}
723
724/*template <class Impl>
725void
726LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
727{
728    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
729                                              mshrSeqNums.end(),
730                                              seqNum);
731
732    if (mshr_it != mshrSeqNums.end()) {
733        mshrSeqNums.erase(mshr_it);
734        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
735    }
736}*/
737
738template <class Impl>
739void
740LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
741{
742    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
743            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
744
745    int load_idx = loadTail;
746    decrLdIdx(load_idx);
747
748    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
749        DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, "
750                "[sn:%lli]\n",
751                loadQueue[load_idx]->readPC(),
752                loadQueue[load_idx]->seqNum);
753
754        if (isStalled() && load_idx == stallingLoadIdx) {
755            stalled = false;
756            stallingStoreIsn = 0;
757            stallingLoadIdx = 0;
758        }
759
760        // Clear the smart pointer to make sure it is decremented.
761        loadQueue[load_idx]->setSquashed();
762        loadQueue[load_idx] = NULL;
763        --loads;
764
765        // Inefficient!
766        loadTail = load_idx;
767
768        decrLdIdx(load_idx);
769        ++lsqSquashedLoads;
770    }
771
772    if (isLoadBlocked) {
773        if (squashed_num < blockedLoadSeqNum) {
774            isLoadBlocked = false;
775            loadBlockedHandled = false;
776            blockedLoadSeqNum = 0;
777        }
778    }
779
780    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
781        memDepViolator = NULL;
782    }
783
784    int store_idx = storeTail;
785    decrStIdx(store_idx);
786
787    while (stores != 0 &&
788           storeQueue[store_idx].inst->seqNum > squashed_num) {
789        // Instructions marked as can WB are already committed.
790        if (storeQueue[store_idx].canWB) {
791            break;
792        }
793
794        DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, "
795                "idx:%i [sn:%lli]\n",
796                storeQueue[store_idx].inst->readPC(),
797                store_idx, storeQueue[store_idx].inst->seqNum);
798
799        // I don't think this can happen.  It should have been cleared
800        // by the stalling load.
801        if (isStalled() &&
802            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
803            panic("Is stalled should have been cleared by stalling load!\n");
804            stalled = false;
805            stallingStoreIsn = 0;
806        }
807
808        // Clear the smart pointer to make sure it is decremented.
809        storeQueue[store_idx].inst->setSquashed();
810        storeQueue[store_idx].inst = NULL;
811        storeQueue[store_idx].canWB = 0;
812
813        // Must delete request now that it wasn't handed off to
814        // memory.  This is quite ugly.  @todo: Figure out the proper
815        // place to really handle request deletes.
816        delete storeQueue[store_idx].req;
817
818        storeQueue[store_idx].req = NULL;
819        --stores;
820
821        // Inefficient!
822        storeTail = store_idx;
823
824        decrStIdx(store_idx);
825        ++lsqSquashedStores;
826    }
827}
828
829template <class Impl>
830void
831LSQUnit<Impl>::storePostSend(PacketPtr pkt)
832{
833    if (isStalled() &&
834        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
835        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
836                "load idx:%i\n",
837                stallingStoreIsn, stallingLoadIdx);
838        stalled = false;
839        stallingStoreIsn = 0;
840        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
841    }
842
843    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
844        // The store is basically completed at this time. This
845        // only works so long as the checker doesn't try to
846        // verify the value in memory for stores.
847        storeQueue[storeWBIdx].inst->setCompleted();
848#if USE_CHECKER
849        if (cpu->checker) {
850            cpu->checker->verify(storeQueue[storeWBIdx].inst);
851        }
852#endif
853    }
854
855    if (pkt->result != Packet::Success) {
856        DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n",
857                storeWBIdx);
858
859        DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n",
860                storeQueue[storeWBIdx].inst->seqNum);
861
862        //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum);
863
864        //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size());
865
866        // @todo: Increment stat here.
867    } else {
868        DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n",
869                storeWBIdx);
870
871        DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n",
872                storeQueue[storeWBIdx].inst->seqNum);
873    }
874
875    incrStIdx(storeWBIdx);
876}
877
878template <class Impl>
879void
880LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
881{
882    iewStage->wakeCPU();
883
884    // Squashed instructions do not need to complete their access.
885    if (inst->isSquashed()) {
886        iewStage->decrWb(inst->seqNum);
887        assert(!inst->isStore());
888        ++lsqIgnoredResponses;
889        return;
890    }
891
892    if (!inst->isExecuted()) {
893        inst->setExecuted();
894
895        // Complete access to copy data to proper place.
896        inst->completeAcc(pkt);
897    }
898
899    // Need to insert instruction into queue to commit
900    iewStage->instToCommit(inst);
901
902    iewStage->activityThisCycle();
903}
904
905template <class Impl>
906void
907LSQUnit<Impl>::completeStore(int store_idx)
908{
909    assert(storeQueue[store_idx].inst);
910    storeQueue[store_idx].completed = true;
911    --storesToWB;
912    // A bit conservative because a store completion may not free up entries,
913    // but hopefully avoids two store completions in one cycle from making
914    // the CPU tick twice.
915    cpu->wakeCPU();
916    cpu->activityThisCycle();
917
918    if (store_idx == storeHead) {
919        do {
920            incrStIdx(storeHead);
921
922            --stores;
923        } while (storeQueue[storeHead].completed &&
924                 storeHead != storeTail);
925
926        iewStage->updateLSQNextCycle = true;
927    }
928
929    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
930            "idx:%i\n",
931            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
932
933    if (isStalled() &&
934        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
935        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
936                "load idx:%i\n",
937                stallingStoreIsn, stallingLoadIdx);
938        stalled = false;
939        stallingStoreIsn = 0;
940        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
941    }
942
943    storeQueue[store_idx].inst->setCompleted();
944
945    // Tell the checker we've completed this instruction.  Some stores
946    // may get reported twice to the checker, but the checker can
947    // handle that case.
948#if USE_CHECKER
949    if (cpu->checker) {
950        cpu->checker->verify(storeQueue[store_idx].inst);
951    }
952#endif
953}
954
955template <class Impl>
956void
957LSQUnit<Impl>::recvRetry()
958{
959    if (isStoreBlocked) {
960        assert(retryPkt != NULL);
961
962        if (dcachePort->sendTiming(retryPkt)) {
963            if (retryPkt->result == Packet::BadAddress) {
964                panic("LSQ sent out a bad address for a completed store!");
965            }
966            storePostSend(retryPkt);
967            retryPkt = NULL;
968            isStoreBlocked = false;
969            lsq->setRetryTid(-1);
970        } else {
971            // Still blocked!
972            ++lsqCacheBlocked;
973            lsq->setRetryTid(lsqID);
974        }
975    } else if (isLoadBlocked) {
976        DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
977                "no need to resend packet.\n");
978    } else {
979        DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
980    }
981}
982
983template <class Impl>
984inline void
985LSQUnit<Impl>::incrStIdx(int &store_idx)
986{
987    if (++store_idx >= SQEntries)
988        store_idx = 0;
989}
990
991template <class Impl>
992inline void
993LSQUnit<Impl>::decrStIdx(int &store_idx)
994{
995    if (--store_idx < 0)
996        store_idx += SQEntries;
997}
998
999template <class Impl>
1000inline void
1001LSQUnit<Impl>::incrLdIdx(int &load_idx)
1002{
1003    if (++load_idx >= LQEntries)
1004        load_idx = 0;
1005}
1006
1007template <class Impl>
1008inline void
1009LSQUnit<Impl>::decrLdIdx(int &load_idx)
1010{
1011    if (--load_idx < 0)
1012        load_idx += LQEntries;
1013}
1014
1015template <class Impl>
1016void
1017LSQUnit<Impl>::dumpInsts()
1018{
1019    cprintf("Load store queue: Dumping instructions.\n");
1020    cprintf("Load queue size: %i\n", loads);
1021    cprintf("Load queue: ");
1022
1023    int load_idx = loadHead;
1024
1025    while (load_idx != loadTail && loadQueue[load_idx]) {
1026        cprintf("%#x ", loadQueue[load_idx]->readPC());
1027
1028        incrLdIdx(load_idx);
1029    }
1030
1031    cprintf("Store queue size: %i\n", stores);
1032    cprintf("Store queue: ");
1033
1034    int store_idx = storeHead;
1035
1036    while (store_idx != storeTail && storeQueue[store_idx].inst) {
1037        cprintf("%#x ", storeQueue[store_idx].inst->readPC());
1038
1039        incrStIdx(store_idx);
1040    }
1041
1042    cprintf("\n");
1043}
1044