lsq_unit_impl.hh revision 8581
16657Snate@binkert.org/*
26657Snate@binkert.org * Copyright (c) 2010 ARM Limited
310972Sdavid.hashe@amd.com * All rights reserved
46657Snate@binkert.org *
56657Snate@binkert.org * The license below extends only to copyright in the software and shall
66657Snate@binkert.org * not be construed as granting a license to any other intellectual
76657Snate@binkert.org * property including but not limited to intellectual property relating
86657Snate@binkert.org * to a hardware implementation of the functionality of the software
96657Snate@binkert.org * licensed hereunder.  You may use the software subject to the license
106657Snate@binkert.org * terms below provided that you ensure that this notice is replicated
116657Snate@binkert.org * unmodified and in its entirety in all distributions of the software,
126657Snate@binkert.org * modified or unmodified, in source code or in binary form.
136657Snate@binkert.org *
146657Snate@binkert.org * Copyright (c) 2004-2005 The Regents of The University of Michigan
156657Snate@binkert.org * All rights reserved.
166657Snate@binkert.org *
176657Snate@binkert.org * Redistribution and use in source and binary forms, with or without
186657Snate@binkert.org * modification, are permitted provided that the following conditions are
196657Snate@binkert.org * met: redistributions of source code must retain the above copyright
206657Snate@binkert.org * notice, this list of conditions and the following disclaimer;
216657Snate@binkert.org * redistributions in binary form must reproduce the above copyright
226657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
236657Snate@binkert.org * documentation and/or other materials provided with the distribution;
246657Snate@binkert.org * neither the name of the copyright holders nor the names of its
256657Snate@binkert.org * contributors may be used to endorse or promote products derived from
266657Snate@binkert.org * this software without specific prior written permission.
276657Snate@binkert.org *
286657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
296999Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
306657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
316657Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
326657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
336657Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
348189SLisa.Hsu@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
356657Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
369499Snilay@cs.wisc.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
379499Snilay@cs.wisc.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
389364Snilay@cs.wisc.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
397055Snate@binkert.org *
406882SBrad.Beckmann@amd.com * Authors: Kevin Lim
416882SBrad.Beckmann@amd.com *          Korey Sewell
428191SLisa.Hsu@amd.com */
436882SBrad.Beckmann@amd.com
446882SBrad.Beckmann@amd.com#include "arch/locked_mem.hh"
459102SNuwan.Jayasena@amd.com#include "base/str.hh"
4611084Snilay@cs.wisc.edu#include "config/the_isa.hh"
479366Snilay@cs.wisc.edu#include "config/use_checker.hh"
489499Snilay@cs.wisc.edu#include "cpu/o3/lsq.hh"
499499Snilay@cs.wisc.edu#include "cpu/o3/lsq_unit.hh"
509499Snilay@cs.wisc.edu#include "debug/Activity.hh"
516882SBrad.Beckmann@amd.com#include "debug/IEW.hh"
526657Snate@binkert.org#include "debug/LSQUnit.hh"
536657Snate@binkert.org#include "mem/packet.hh"
546657Snate@binkert.org#include "mem/request.hh"
556657Snate@binkert.org
5610311Snilay@cs.wisc.edu#if USE_CHECKER
5710311Snilay@cs.wisc.edu#include "cpu/checker/cpu.hh"
5810311Snilay@cs.wisc.edu#endif
5910311Snilay@cs.wisc.edu
606657Snate@binkert.orgtemplate<class Impl>
6110311Snilay@cs.wisc.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
629366Snilay@cs.wisc.edu                                              LSQUnit *lsq_ptr)
637839Snilay@cs.wisc.edu    : Event(Default_Pri, AutoDelete),
646657Snate@binkert.org      inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
656882SBrad.Beckmann@amd.com{
6610308Snilay@cs.wisc.edu}
6710308Snilay@cs.wisc.edu
686882SBrad.Beckmann@amd.comtemplate<class Impl>
6910308Snilay@cs.wisc.eduvoid
7010308Snilay@cs.wisc.eduLSQUnit<Impl>::WritebackEvent::process()
7110308Snilay@cs.wisc.edu{
7210308Snilay@cs.wisc.edu    if (!lsqPtr->isSwitchedOut()) {
7310308Snilay@cs.wisc.edu        lsqPtr->writeback(inst, pkt);
749366Snilay@cs.wisc.edu    }
759366Snilay@cs.wisc.edu
766657Snate@binkert.org    if (pkt->senderState)
776657Snate@binkert.org        delete pkt->senderState;
786657Snate@binkert.org
796657Snate@binkert.org    delete pkt->req;
809104Shestness@cs.utexas.edu    delete pkt;
816657Snate@binkert.org}
826657Snate@binkert.org
836657Snate@binkert.orgtemplate<class Impl>
8410311Snilay@cs.wisc.educonst char *
8510311Snilay@cs.wisc.eduLSQUnit<Impl>::WritebackEvent::description() const
8610311Snilay@cs.wisc.edu{
8710311Snilay@cs.wisc.edu    return "Store writeback";
886657Snate@binkert.org}
897839Snilay@cs.wisc.edu
907839Snilay@cs.wisc.edutemplate<class Impl>
9110972Sdavid.hashe@amd.comvoid
9210972Sdavid.hashe@amd.comLSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
9310972Sdavid.hashe@amd.com{
946657Snate@binkert.org    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
956657Snate@binkert.org    DynInstPtr inst = state->inst;
966657Snate@binkert.org    DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
976657Snate@binkert.org    DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
986657Snate@binkert.org
996657Snate@binkert.org    //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
1006657Snate@binkert.org
1016657Snate@binkert.org    assert(!pkt->wasNacked());
1026657Snate@binkert.org
1036657Snate@binkert.org    // If this is a split access, wait until all packets are received.
1046657Snate@binkert.org    if (TheISA::HasUnalignedMemAcc && !state->complete()) {
1056657Snate@binkert.org        delete pkt->req;
1066657Snate@binkert.org        delete pkt;
1076657Snate@binkert.org        return;
1086657Snate@binkert.org    }
1096657Snate@binkert.org
1106657Snate@binkert.org    if (isSwitchedOut() || inst->isSquashed()) {
1116657Snate@binkert.org        iewStage->decrWb(inst->seqNum);
1126779SBrad.Beckmann@amd.com    } else {
1136657Snate@binkert.org        if (!state->noWB) {
1146657Snate@binkert.org            if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
1156657Snate@binkert.org                !state->isLoad) {
1166657Snate@binkert.org                writeback(inst, pkt);
1176657Snate@binkert.org            } else {
1186657Snate@binkert.org                writeback(inst, state->mainPkt);
1196657Snate@binkert.org            }
1206657Snate@binkert.org        }
1216657Snate@binkert.org
12210972Sdavid.hashe@amd.com        if (inst->isStore()) {
12310972Sdavid.hashe@amd.com            completeStore(state->idx);
12410972Sdavid.hashe@amd.com        }
1259104Shestness@cs.utexas.edu    }
1269104Shestness@cs.utexas.edu
1279104Shestness@cs.utexas.edu    if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
1289104Shestness@cs.utexas.edu        delete state->mainPkt->req;
1296657Snate@binkert.org        delete state->mainPkt;
1306657Snate@binkert.org    }
1316657Snate@binkert.org    delete state;
1326657Snate@binkert.org    delete pkt->req;
1336657Snate@binkert.org    delete pkt;
1346657Snate@binkert.org}
1356657Snate@binkert.org
1366657Snate@binkert.orgtemplate <class Impl>
1376657Snate@binkert.orgLSQUnit<Impl>::LSQUnit()
1386657Snate@binkert.org    : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
1396657Snate@binkert.org      isStoreBlocked(false), isLoadBlocked(false),
1406657Snate@binkert.org      loadBlockedHandled(false), hasPendingPkt(false)
1416657Snate@binkert.org{
14210307Snilay@cs.wisc.edu}
1436657Snate@binkert.org
1446657Snate@binkert.orgtemplate<class Impl>
1457839Snilay@cs.wisc.eduvoid
1467839Snilay@cs.wisc.eduLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
1477839Snilay@cs.wisc.edu        LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
1487839Snilay@cs.wisc.edu        unsigned id)
1497839Snilay@cs.wisc.edu{
1507839Snilay@cs.wisc.edu    cpu = cpu_ptr;
1517839Snilay@cs.wisc.edu    iewStage = iew_ptr;
1527839Snilay@cs.wisc.edu
1537839Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
1547839Snilay@cs.wisc.edu
15510968Sdavid.hashe@amd.com    switchedOut = false;
15610968Sdavid.hashe@amd.com
15710968Sdavid.hashe@amd.com    cacheBlockMask = 0;
15810968Sdavid.hashe@amd.com
15910968Sdavid.hashe@amd.com    lsq = lsq_ptr;
16010968Sdavid.hashe@amd.com
16110968Sdavid.hashe@amd.com    lsqID = id;
1627839Snilay@cs.wisc.edu
1636657Snate@binkert.org    // Add 1 for the sentinel entry (they are circular queues).
1646657Snate@binkert.org    LQEntries = maxLQEntries + 1;
1656657Snate@binkert.org    SQEntries = maxSQEntries + 1;
1666657Snate@binkert.org
1676657Snate@binkert.org    loadQueue.resize(LQEntries);
1686657Snate@binkert.org    storeQueue.resize(SQEntries);
1696657Snate@binkert.org
1706657Snate@binkert.org    depCheckShift = params->LSQDepCheckShift;
1716657Snate@binkert.org    checkLoads = params->LSQCheckLoads;
1726657Snate@binkert.org
1736657Snate@binkert.org    loadHead = loadTail = 0;
1746657Snate@binkert.org
1756657Snate@binkert.org    storeHead = storeWBIdx = storeTail = 0;
1766657Snate@binkert.org
1776657Snate@binkert.org    usedPorts = 0;
1786657Snate@binkert.org    cachePorts = params->cachePorts;
1796657Snate@binkert.org
1806657Snate@binkert.org    retryPkt = NULL;
1816657Snate@binkert.org    memDepViolator = NULL;
1826657Snate@binkert.org
1836657Snate@binkert.org    blockedLoadSeqNum = 0;
1846657Snate@binkert.org}
1856657Snate@binkert.org
1866657Snate@binkert.orgtemplate<class Impl>
1876657Snate@binkert.orgstd::string
1886657Snate@binkert.orgLSQUnit<Impl>::name() const
1896657Snate@binkert.org{
1906657Snate@binkert.org    if (Impl::MaxThreads == 1) {
1916657Snate@binkert.org        return iewStage->name() + ".lsq";
1926657Snate@binkert.org    } else {
19310963Sdavid.hashe@amd.com        return iewStage->name() + ".lsq.thread" + to_string(lsqID);
19410963Sdavid.hashe@amd.com    }
19510963Sdavid.hashe@amd.com}
19610963Sdavid.hashe@amd.com
19710963Sdavid.hashe@amd.comtemplate<class Impl>
19810963Sdavid.hashe@amd.comvoid
19911095Snilay@cs.wisc.eduLSQUnit<Impl>::regStats()
20010963Sdavid.hashe@amd.com{
20110963Sdavid.hashe@amd.com    lsqForwLoads
20210963Sdavid.hashe@amd.com        .name(name() + ".forwLoads")
20310963Sdavid.hashe@amd.com        .desc("Number of loads that had data forwarded from stores");
20410963Sdavid.hashe@amd.com
20510963Sdavid.hashe@amd.com    invAddrLoads
20610963Sdavid.hashe@amd.com        .name(name() + ".invAddrLoads")
20710963Sdavid.hashe@amd.com        .desc("Number of loads ignored due to an invalid address");
2089219Spower.jg@gmail.com
2096877Ssteve.reinhardt@amd.com    lsqSquashedLoads
2106657Snate@binkert.org        .name(name() + ".squashedLoads")
2119219Spower.jg@gmail.com        .desc("Number of loads squashed");
2126657Snate@binkert.org
2139219Spower.jg@gmail.com    lsqIgnoredResponses
2146657Snate@binkert.org        .name(name() + ".ignoredResponses")
2156877Ssteve.reinhardt@amd.com        .desc("Number of memory responses ignored because the instruction is squashed");
2166999Snate@binkert.org
2176877Ssteve.reinhardt@amd.com    lsqMemOrderViolation
21810308Snilay@cs.wisc.edu        .name(name() + ".memOrderViolation")
2196877Ssteve.reinhardt@amd.com        .desc("Number of memory ordering violations");
2206877Ssteve.reinhardt@amd.com
22110308Snilay@cs.wisc.edu    lsqSquashedStores
2226877Ssteve.reinhardt@amd.com        .name(name() + ".squashedStores")
2236877Ssteve.reinhardt@amd.com        .desc("Number of stores squashed");
2246877Ssteve.reinhardt@amd.com
2256877Ssteve.reinhardt@amd.com    invAddrSwpfs
2266877Ssteve.reinhardt@amd.com        .name(name() + ".invAddrSwpfs")
2276877Ssteve.reinhardt@amd.com        .desc("Number of software prefetches ignored due to an invalid address");
2286877Ssteve.reinhardt@amd.com
2299338SAndreas.Sandberg@arm.com    lsqBlockedLoads
2306877Ssteve.reinhardt@amd.com        .name(name() + ".blockedLoads")
2316877Ssteve.reinhardt@amd.com        .desc("Number of blocked loads due to partial load-store forwarding");
2326877Ssteve.reinhardt@amd.com
2336877Ssteve.reinhardt@amd.com    lsqRescheduledLoads
23410308Snilay@cs.wisc.edu        .name(name() + ".rescheduledLoads")
23510308Snilay@cs.wisc.edu        .desc("Number of loads that were rescheduled");
23610308Snilay@cs.wisc.edu
23710308Snilay@cs.wisc.edu    lsqCacheBlocked
23811084Snilay@cs.wisc.edu        .name(name() + ".cacheBlocked")
2396882SBrad.Beckmann@amd.com        .desc("Number of times an access to memory failed due to the cache being blocked");
24010308Snilay@cs.wisc.edu}
24110308Snilay@cs.wisc.edu
2426882SBrad.Beckmann@amd.comtemplate<class Impl>
2436882SBrad.Beckmann@amd.comvoid
2446882SBrad.Beckmann@amd.comLSQUnit<Impl>::setDcachePort(Port *dcache_port)
2456882SBrad.Beckmann@amd.com{
24611021Sjthestness@gmail.com    dcachePort = dcache_port;
2476877Ssteve.reinhardt@amd.com
2486877Ssteve.reinhardt@amd.com#if USE_CHECKER
24910917Sbrandon.potter@amd.com    if (cpu->checker) {
2506877Ssteve.reinhardt@amd.com        cpu->checker->setDcachePort(dcachePort);
2516657Snate@binkert.org    }
2526657Snate@binkert.org#endif
2536999Snate@binkert.org}
2546657Snate@binkert.org
2556657Snate@binkert.orgtemplate<class Impl>
2566657Snate@binkert.orgvoid
2576657Snate@binkert.orgLSQUnit<Impl>::clearLQ()
2587007Snate@binkert.org{
2596657Snate@binkert.org    loadQueue.clear();
2606657Snate@binkert.org}
2616657Snate@binkert.org
2626657Snate@binkert.orgtemplate<class Impl>
2636657Snate@binkert.orgvoid
2647007Snate@binkert.orgLSQUnit<Impl>::clearSQ()
2657007Snate@binkert.org{
2666657Snate@binkert.org    storeQueue.clear();
2677002Snate@binkert.org}
2687002Snate@binkert.org
2697002Snate@binkert.orgtemplate<class Impl>
2707002Snate@binkert.orgvoid
2716657Snate@binkert.orgLSQUnit<Impl>::switchOut()
2726657Snate@binkert.org{
2738229Snate@binkert.org    switchedOut = true;
2748229Snate@binkert.org    for (int i = 0; i < loadQueue.size(); ++i) {
2758229Snate@binkert.org        assert(!loadQueue[i]);
27610972Sdavid.hashe@amd.com        loadQueue[i] = NULL;
2776657Snate@binkert.org    }
2786657Snate@binkert.org
2796657Snate@binkert.org    assert(storesToWB == 0);
2806657Snate@binkert.org}
2816793SBrad.Beckmann@amd.com
2826657Snate@binkert.orgtemplate<class Impl>
28310311Snilay@cs.wisc.eduvoid
2846657Snate@binkert.orgLSQUnit<Impl>::takeOverFrom()
2856657Snate@binkert.org{
2866657Snate@binkert.org    switchedOut = false;
2877002Snate@binkert.org    loads = stores = storesToWB = 0;
2886657Snate@binkert.org
2897007Snate@binkert.org    loadHead = loadTail = 0;
2907007Snate@binkert.org
2919271Snilay@cs.wisc.edu    storeHead = storeWBIdx = storeTail = 0;
2926877Ssteve.reinhardt@amd.com
2936877Ssteve.reinhardt@amd.com    usedPorts = 0;
2946657Snate@binkert.org
2956877Ssteve.reinhardt@amd.com    memDepViolator = NULL;
29610311Snilay@cs.wisc.edu
29711084Snilay@cs.wisc.edu    blockedLoadSeqNum = 0;
29811084Snilay@cs.wisc.edu
29911021Sjthestness@gmail.com    stalled = false;
3009745Snilay@cs.wisc.edu    isLoadBlocked = false;
3017002Snate@binkert.org    loadBlockedHandled = false;
3026657Snate@binkert.org
30310012Snilay@cs.wisc.edu    // Just incase the memory system changed out from under us
3049745Snilay@cs.wisc.edu    cacheBlockMask = 0;
3059745Snilay@cs.wisc.edu}
3069745Snilay@cs.wisc.edu
3078683Snilay@cs.wisc.edutemplate<class Impl>
3088683Snilay@cs.wisc.eduvoid
3097007Snate@binkert.orgLSQUnit<Impl>::resizeLQ(unsigned size)
31010524Snilay@cs.wisc.edu{
3119302Snilay@cs.wisc.edu    unsigned size_plus_sentinel = size + 1;
3129745Snilay@cs.wisc.edu    assert(size_plus_sentinel >= LQEntries);
3139745Snilay@cs.wisc.edu
31411061Snilay@cs.wisc.edu    if (size_plus_sentinel > LQEntries) {
3159745Snilay@cs.wisc.edu        while (size_plus_sentinel > loadQueue.size()) {
31611061Snilay@cs.wisc.edu            DynInstPtr dummy;
3179745Snilay@cs.wisc.edu            loadQueue.push_back(dummy);
3186657Snate@binkert.org            LQEntries++;
3196657Snate@binkert.org        }
3206657Snate@binkert.org    } else {
3216657Snate@binkert.org        LQEntries = size_plus_sentinel;
3226657Snate@binkert.org    }
3236657Snate@binkert.org
3246882SBrad.Beckmann@amd.com}
3256882SBrad.Beckmann@amd.com
3266882SBrad.Beckmann@amd.comtemplate<class Impl>
3276882SBrad.Beckmann@amd.comvoid
3286657Snate@binkert.orgLSQUnit<Impl>::resizeSQ(unsigned size)
3296657Snate@binkert.org{
3307007Snate@binkert.org    unsigned size_plus_sentinel = size + 1;
3317839Snilay@cs.wisc.edu    if (size_plus_sentinel > SQEntries) {
3327839Snilay@cs.wisc.edu        while (size_plus_sentinel > storeQueue.size()) {
3337839Snilay@cs.wisc.edu            SQEntry dummy;
3347839Snilay@cs.wisc.edu            storeQueue.push_back(dummy);
3357839Snilay@cs.wisc.edu            SQEntries++;
3367839Snilay@cs.wisc.edu        }
3377839Snilay@cs.wisc.edu    } else {
3387839Snilay@cs.wisc.edu        SQEntries = size_plus_sentinel;
3397839Snilay@cs.wisc.edu    }
3407839Snilay@cs.wisc.edu}
3417839Snilay@cs.wisc.edu
3427839Snilay@cs.wisc.edutemplate <class Impl>
34311025Snilay@cs.wisc.eduvoid
3447007Snate@binkert.orgLSQUnit<Impl>::insert(DynInstPtr &inst)
3457007Snate@binkert.org{
3467007Snate@binkert.org    assert(inst->isMemRef());
3477007Snate@binkert.org
3487839Snilay@cs.wisc.edu    assert(inst->isLoad() || inst->isStore());
3497839Snilay@cs.wisc.edu
3507839Snilay@cs.wisc.edu    if (inst->isLoad()) {
3517839Snilay@cs.wisc.edu        insertLoad(inst);
3527839Snilay@cs.wisc.edu    } else {
3537839Snilay@cs.wisc.edu        insertStore(inst);
3547839Snilay@cs.wisc.edu    }
3557839Snilay@cs.wisc.edu
3567839Snilay@cs.wisc.edu    inst->setInLSQ();
3577839Snilay@cs.wisc.edu}
3587839Snilay@cs.wisc.edu
3597839Snilay@cs.wisc.edutemplate <class Impl>
36011025Snilay@cs.wisc.eduvoid
3617007Snate@binkert.orgLSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
3629745Snilay@cs.wisc.edu{
3639745Snilay@cs.wisc.edu    assert((loadTail + 1) % LQEntries != loadHead);
3649745Snilay@cs.wisc.edu    assert(loads < LQEntries);
3659745Snilay@cs.wisc.edu
3669745Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
3679745Snilay@cs.wisc.edu            load_inst->pcState(), loadTail, load_inst->seqNum);
3686657Snate@binkert.org
3697007Snate@binkert.org    load_inst->lqIdx = loadTail;
3706657Snate@binkert.org
3716657Snate@binkert.org    if (stores == 0) {
3726657Snate@binkert.org        load_inst->sqIdx = -1;
3736657Snate@binkert.org    } else {
3746657Snate@binkert.org        load_inst->sqIdx = storeTail;
3756657Snate@binkert.org    }
3766657Snate@binkert.org
3776657Snate@binkert.org    loadQueue[loadTail] = load_inst;
3787839Snilay@cs.wisc.edu
3797839Snilay@cs.wisc.edu    incrLdIdx(loadTail);
3807839Snilay@cs.wisc.edu
3817839Snilay@cs.wisc.edu    ++loads;
3827839Snilay@cs.wisc.edu}
3837839Snilay@cs.wisc.edu
3847839Snilay@cs.wisc.edutemplate <class Impl>
3857839Snilay@cs.wisc.eduvoid
3867839Snilay@cs.wisc.eduLSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
3877839Snilay@cs.wisc.edu{
3887839Snilay@cs.wisc.edu    // Make sure it is not full before inserting an instruction.
3897839Snilay@cs.wisc.edu    assert((storeTail + 1) % SQEntries != storeHead);
3907839Snilay@cs.wisc.edu    assert(stores < SQEntries);
3917839Snilay@cs.wisc.edu
3927839Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
3937839Snilay@cs.wisc.edu            store_inst->pcState(), storeTail, store_inst->seqNum);
39410121Snilay@cs.wisc.edu
3956657Snate@binkert.org    store_inst->sqIdx = storeTail;
3966657Snate@binkert.org    store_inst->lqIdx = loadTail;
3976657Snate@binkert.org
3986657Snate@binkert.org    storeQueue[storeTail] = SQEntry(store_inst);
3997839Snilay@cs.wisc.edu
4007839Snilay@cs.wisc.edu    incrStIdx(storeTail);
4017839Snilay@cs.wisc.edu
40210121Snilay@cs.wisc.edu    ++stores;
40310121Snilay@cs.wisc.edu}
40411025Snilay@cs.wisc.edu
4057839Snilay@cs.wisc.edutemplate <class Impl>
4067839Snilay@cs.wisc.edutypename Impl::DynInstPtr
4077839Snilay@cs.wisc.eduLSQUnit<Impl>::getMemDepViolator()
40810121Snilay@cs.wisc.edu{
40911025Snilay@cs.wisc.edu    DynInstPtr temp = memDepViolator;
4107839Snilay@cs.wisc.edu
4117839Snilay@cs.wisc.edu    memDepViolator = NULL;
4127839Snilay@cs.wisc.edu
41310121Snilay@cs.wisc.edu    return temp;
41411025Snilay@cs.wisc.edu}
4157839Snilay@cs.wisc.edu
4167839Snilay@cs.wisc.edutemplate <class Impl>
4177839Snilay@cs.wisc.eduunsigned
41811025Snilay@cs.wisc.eduLSQUnit<Impl>::numFreeEntries()
4196657Snate@binkert.org{
4206657Snate@binkert.org    unsigned free_lq_entries = LQEntries - loads;
4216657Snate@binkert.org    unsigned free_sq_entries = SQEntries - stores;
4226657Snate@binkert.org
4237007Snate@binkert.org    // Both the LQ and SQ entries have an extra dummy entry to differentiate
4246657Snate@binkert.org    // empty/full conditions.  Subtract 1 from the free entries.
4256657Snate@binkert.org    if (free_lq_entries < free_sq_entries) {
4269273Snilay@cs.wisc.edu        return free_lq_entries - 1;
42710305Snilay@cs.wisc.edu    } else {
4286657Snate@binkert.org        return free_sq_entries - 1;
4296657Snate@binkert.org    }
4306657Snate@binkert.org}
4317007Snate@binkert.org
4326657Snate@binkert.orgtemplate <class Impl>
4336657Snate@binkert.orgint
4349219Spower.jg@gmail.comLSQUnit<Impl>::numLoadsReady()
4356657Snate@binkert.org{
4366657Snate@binkert.org    int load_idx = loadHead;
4376999Snate@binkert.org    int retval = 0;
4386657Snate@binkert.org
4396657Snate@binkert.org    while (load_idx != loadTail) {
4406657Snate@binkert.org        assert(loadQueue[load_idx]);
4416657Snate@binkert.org
4427007Snate@binkert.org        if (loadQueue[load_idx]->readyToIssue()) {
4436657Snate@binkert.org            ++retval;
4446657Snate@binkert.org        }
4456657Snate@binkert.org    }
4466657Snate@binkert.org
4476657Snate@binkert.org    return retval;
4488946Sandreas.hansson@arm.com}
4498946Sandreas.hansson@arm.com
4508946Sandreas.hansson@arm.comtemplate <class Impl>
4517832Snate@binkert.orgvoid
4527002Snate@binkert.orgLSQUnit<Impl>::checkSnoop(PacketPtr pkt)
4537002Snate@binkert.org{
45410972Sdavid.hashe@amd.com    int load_idx = loadHead;
4557002Snate@binkert.org
4568641Snate@binkert.org    if (!cacheBlockMask) {
4577056Snate@binkert.org        assert(dcachePort);
45810972Sdavid.hashe@amd.com        Addr bs = dcachePort->peerBlockSize();
45910972Sdavid.hashe@amd.com
46010972Sdavid.hashe@amd.com        // Make sure we actually got a size
46110972Sdavid.hashe@amd.com        assert(bs != 0);
46210972Sdavid.hashe@amd.com
4636657Snate@binkert.org        cacheBlockMask = ~(bs - 1);
4648229Snate@binkert.org    }
4656657Snate@binkert.org
4666657Snate@binkert.org    // If this is the only load in the LSQ we don't care
46711108Sdavid.hashe@amd.com    if (load_idx == loadTail)
46810972Sdavid.hashe@amd.com        return;
4699219Spower.jg@gmail.com    incrLdIdx(load_idx);
4709219Spower.jg@gmail.com
4719219Spower.jg@gmail.com    DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
4729219Spower.jg@gmail.com    Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
4739219Spower.jg@gmail.com    while (load_idx != loadTail) {
4747002Snate@binkert.org        DynInstPtr ld_inst = loadQueue[load_idx];
4757002Snate@binkert.org
4766657Snate@binkert.org        if (!ld_inst->effAddrValid || ld_inst->uncacheable()) {
4776657Snate@binkert.org            incrLdIdx(load_idx);
4786657Snate@binkert.org            continue;
4796657Snate@binkert.org        }
4806657Snate@binkert.org
4816793SBrad.Beckmann@amd.com        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
4826657Snate@binkert.org        DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
4836657Snate@binkert.org                    ld_inst->seqNum, load_addr, invalidate_addr);
4846657Snate@binkert.org
48510121Snilay@cs.wisc.edu        if (load_addr == invalidate_addr) {
48610121Snilay@cs.wisc.edu            if (ld_inst->possibleLoadViolation) {
4876657Snate@binkert.org                DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
4886877Ssteve.reinhardt@amd.com                        ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum);
4896877Ssteve.reinhardt@amd.com
4906877Ssteve.reinhardt@amd.com                // Mark the load for re-execution
4916877Ssteve.reinhardt@amd.com                ld_inst->fault = new ReExec;
4926877Ssteve.reinhardt@amd.com            } else {
4936877Ssteve.reinhardt@amd.com                // If a older load checks this and it's true
4946657Snate@binkert.org                // then we might have missed the snoop
4959745Snilay@cs.wisc.edu                // in which case we need to invalidate to be sure
4969745Snilay@cs.wisc.edu                ld_inst->hitExternalSnoop = true;
4976657Snate@binkert.org            }
4987007Snate@binkert.org        }
4996657Snate@binkert.org        incrLdIdx(load_idx);
5009801Snilay@cs.wisc.edu    }
5019801Snilay@cs.wisc.edu    return;
5026657Snate@binkert.org}
5039801Snilay@cs.wisc.edu
5049801Snilay@cs.wisc.edutemplate <class Impl>
5059801Snilay@cs.wisc.eduFault
5067007Snate@binkert.orgLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
5076657Snate@binkert.org{
5086877Ssteve.reinhardt@amd.com    Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
5096877Ssteve.reinhardt@amd.com    Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
5106657Snate@binkert.org
51110078Snilay@cs.wisc.edu    /** @todo in theory you only need to check an instruction that has executed
51210078Snilay@cs.wisc.edu     * however, there isn't a good way in the pipeline at the moment to check
51310121Snilay@cs.wisc.edu     * all instructions that will execute before the store writes back. Thus,
51410121Snilay@cs.wisc.edu     * like the implementation that came before it, we're overly conservative.
51510121Snilay@cs.wisc.edu     */
5166657Snate@binkert.org    while (load_idx != loadTail) {
5176657Snate@binkert.org        DynInstPtr ld_inst = loadQueue[load_idx];
5186882SBrad.Beckmann@amd.com        if (!ld_inst->effAddrValid || ld_inst->uncacheable()) {
5196882SBrad.Beckmann@amd.com            incrLdIdx(load_idx);
5206882SBrad.Beckmann@amd.com            continue;
52110121Snilay@cs.wisc.edu        }
52210121Snilay@cs.wisc.edu
5236882SBrad.Beckmann@amd.com        Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
5246877Ssteve.reinhardt@amd.com        Addr ld_eff_addr2 =
5256882SBrad.Beckmann@amd.com            (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
52610308Snilay@cs.wisc.edu
5276882SBrad.Beckmann@amd.com        if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
52810308Snilay@cs.wisc.edu            if (inst->isLoad()) {
52910311Snilay@cs.wisc.edu                // If this load is to the same block as an external snoop
53010308Snilay@cs.wisc.edu                // invalidate that we've observed then the load needs to be
53110308Snilay@cs.wisc.edu                // squashed as it could have newer data
53210917Sbrandon.potter@amd.com                if (ld_inst->hitExternalSnoop) {
5339595Snilay@cs.wisc.edu                    if (!memDepViolator ||
5349745Snilay@cs.wisc.edu                            ld_inst->seqNum < memDepViolator->seqNum) {
5359745Snilay@cs.wisc.edu                        DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
5369745Snilay@cs.wisc.edu                                " and [sn:%lli] at address %#x\n", inst->seqNum,
5379745Snilay@cs.wisc.edu                                ld_inst->seqNum, ld_eff_addr1);
5389745Snilay@cs.wisc.edu                        memDepViolator = ld_inst;
5399745Snilay@cs.wisc.edu
5409745Snilay@cs.wisc.edu                        ++lsqMemOrderViolation;
5419745Snilay@cs.wisc.edu
5429745Snilay@cs.wisc.edu                        return TheISA::genMachineCheckFault();
5439745Snilay@cs.wisc.edu                    }
5449595Snilay@cs.wisc.edu                }
5456657Snate@binkert.org
5466657Snate@binkert.org                // Otherwise, mark the load has a possible load violation
5476657Snate@binkert.org                // and if we see a snoop before it's commited, we need to squash
5486657Snate@binkert.org                ld_inst->possibleLoadViolation = true;
5497007Snate@binkert.org                DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
55011021Sjthestness@gmail.com                        " between instructions [sn:%lli] and [sn:%lli]\n",
55110311Snilay@cs.wisc.edu                        inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
55210311Snilay@cs.wisc.edu            } else {
55310311Snilay@cs.wisc.edu                // A load/store incorrectly passed this store.
55410311Snilay@cs.wisc.edu                // Check if we already have a violator, or if it's newer
55510311Snilay@cs.wisc.edu                // squash and refetch.
55610311Snilay@cs.wisc.edu                if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
55710311Snilay@cs.wisc.edu                    break;
55810311Snilay@cs.wisc.edu
55910311Snilay@cs.wisc.edu                DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and [sn:%lli]"
56010311Snilay@cs.wisc.edu                        " at address %#x\n", inst->seqNum, ld_inst->seqNum,
56110311Snilay@cs.wisc.edu                        ld_eff_addr1);
56210311Snilay@cs.wisc.edu                memDepViolator = ld_inst;
56310311Snilay@cs.wisc.edu
56411084Snilay@cs.wisc.edu                ++lsqMemOrderViolation;
56510311Snilay@cs.wisc.edu
56610311Snilay@cs.wisc.edu                return TheISA::genMachineCheckFault();
56711021Sjthestness@gmail.com            }
56811021Sjthestness@gmail.com        }
56910311Snilay@cs.wisc.edu
57010311Snilay@cs.wisc.edu        incrLdIdx(load_idx);
57110311Snilay@cs.wisc.edu    }
57210311Snilay@cs.wisc.edu    return NoFault;
57310311Snilay@cs.wisc.edu}
57410311Snilay@cs.wisc.edu
57510311Snilay@cs.wisc.edu
57610311Snilay@cs.wisc.edu
57710311Snilay@cs.wisc.edu
57810311Snilay@cs.wisc.edutemplate <class Impl>
57910311Snilay@cs.wisc.eduFault
58011021Sjthestness@gmail.comLSQUnit<Impl>::executeLoad(DynInstPtr &inst)
58111021Sjthestness@gmail.com{
58210311Snilay@cs.wisc.edu    using namespace TheISA;
58310311Snilay@cs.wisc.edu    // Execute a specific load.
58410311Snilay@cs.wisc.edu    Fault load_fault = NoFault;
58510311Snilay@cs.wisc.edu
58610311Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
58710311Snilay@cs.wisc.edu            inst->pcState(), inst->seqNum);
58810311Snilay@cs.wisc.edu
58910311Snilay@cs.wisc.edu    assert(!inst->isSquashed());
59010311Snilay@cs.wisc.edu
59110311Snilay@cs.wisc.edu    load_fault = inst->initiateAcc();
5927007Snate@binkert.org
5936657Snate@binkert.org    if (inst->isTranslationDelayed() &&
5947007Snate@binkert.org        load_fault == NoFault)
5956657Snate@binkert.org        return load_fault;
5966657Snate@binkert.org
5976657Snate@binkert.org    // If the instruction faulted or predicated false, then we need to send it
59810311Snilay@cs.wisc.edu    // along to commit without the instruction completing.
5996657Snate@binkert.org    if (load_fault != NoFault || inst->readPredicate() == false) {
6006657Snate@binkert.org        // Send this instruction to commit, also make sure iew stage
60110305Snilay@cs.wisc.edu        // realizes there is activity.
6026657Snate@binkert.org        // Mark it as executed unless it is an uncached load that
6036657Snate@binkert.org        // needs to hit the head of commit.
6046657Snate@binkert.org        if (inst->readPredicate() == false)
6056657Snate@binkert.org            inst->forwardOldRegs();
6066657Snate@binkert.org        DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
6076657Snate@binkert.org                inst->seqNum,
6086657Snate@binkert.org                (load_fault != NoFault ? "fault" : "predication"));
6096657Snate@binkert.org        if (!(inst->hasRequest() && inst->uncacheable()) ||
61011084Snilay@cs.wisc.edu            inst->isAtCommit()) {
61111084Snilay@cs.wisc.edu            inst->setExecuted();
61211084Snilay@cs.wisc.edu        }
61311084Snilay@cs.wisc.edu        iewStage->instToCommit(inst);
61411084Snilay@cs.wisc.edu        iewStage->activityThisCycle();
6156657Snate@binkert.org    } else if (!loadBlocked()) {
61611084Snilay@cs.wisc.edu        assert(inst->effAddrValid);
6176657Snate@binkert.org        int load_idx = inst->lqIdx;
6186657Snate@binkert.org        incrLdIdx(load_idx);
6196657Snate@binkert.org
6207007Snate@binkert.org        if (checkLoads)
6216657Snate@binkert.org            return checkViolations(load_idx, inst);
6227007Snate@binkert.org    }
6237007Snate@binkert.org
6246657Snate@binkert.org    return load_fault;
6259366Snilay@cs.wisc.edu}
6269366Snilay@cs.wisc.edu
6279366Snilay@cs.wisc.edutemplate <class Impl>
6289366Snilay@cs.wisc.eduFault
6297566SBrad.Beckmann@amd.comLSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
6307672Snate@binkert.org{
6316657Snate@binkert.org    using namespace TheISA;
6329465Snilay@cs.wisc.edu    // Make sure that a store exists.
6336657Snate@binkert.org    assert(stores != 0);
6346657Snate@binkert.org
6356657Snate@binkert.org    int store_idx = store_inst->sqIdx;
6367672Snate@binkert.org
6376657Snate@binkert.org    DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
6386657Snate@binkert.org            store_inst->pcState(), store_inst->seqNum);
6396657Snate@binkert.org
6406657Snate@binkert.org    assert(!store_inst->isSquashed());
6416657Snate@binkert.org
6426657Snate@binkert.org    // Check the recently completed loads to see if any match this store's
6436657Snate@binkert.org    // address.  If so, then we have a memory ordering violation.
6446657Snate@binkert.org    int load_idx = store_inst->lqIdx;
6456657Snate@binkert.org
6466657Snate@binkert.org    Fault store_fault = store_inst->initiateAcc();
6476657Snate@binkert.org
6489745Snilay@cs.wisc.edu    if (store_inst->isTranslationDelayed() &&
6496657Snate@binkert.org        store_fault == NoFault)
6506657Snate@binkert.org        return store_fault;
6519496Snilay@cs.wisc.edu
6529496Snilay@cs.wisc.edu    if (store_inst->readPredicate() == false)
65310012Snilay@cs.wisc.edu        store_inst->forwardOldRegs();
6549496Snilay@cs.wisc.edu
6559496Snilay@cs.wisc.edu    if (storeQueue[store_idx].size == 0) {
6566657Snate@binkert.org        DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
65710121Snilay@cs.wisc.edu                store_inst->pcState(), store_inst->seqNum);
6586657Snate@binkert.org
6596657Snate@binkert.org        return store_fault;
66010305Snilay@cs.wisc.edu    } else if (store_inst->readPredicate() == false) {
6616657Snate@binkert.org        DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
66211021Sjthestness@gmail.com                store_inst->seqNum);
66311021Sjthestness@gmail.com        return store_fault;
66411021Sjthestness@gmail.com    }
66511021Sjthestness@gmail.com
66611021Sjthestness@gmail.com    assert(store_fault == NoFault);
6678683Snilay@cs.wisc.edu
6688683Snilay@cs.wisc.edu    if (store_inst->isStoreConditional()) {
66910308Snilay@cs.wisc.edu        // Store conditionals need to set themselves as able to
6708683Snilay@cs.wisc.edu        // writeback if we haven't had a fault by here.
67110308Snilay@cs.wisc.edu        storeQueue[store_idx].canWB = true;
6728683Snilay@cs.wisc.edu
6736657Snate@binkert.org        ++storesToWB;
6749745Snilay@cs.wisc.edu    }
6759745Snilay@cs.wisc.edu
6769745Snilay@cs.wisc.edu    return checkViolations(load_idx, store_inst);
6779745Snilay@cs.wisc.edu
67810012Snilay@cs.wisc.edu}
67910012Snilay@cs.wisc.edu
6809745Snilay@cs.wisc.edutemplate <class Impl>
6819745Snilay@cs.wisc.eduvoid
6829745Snilay@cs.wisc.eduLSQUnit<Impl>::commitLoad()
6839745Snilay@cs.wisc.edu{
6849745Snilay@cs.wisc.edu    assert(loadQueue[loadHead]);
68510919Sbrandon.potter@amd.com
68610012Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
6879745Snilay@cs.wisc.edu            loadQueue[loadHead]->pcState());
6889745Snilay@cs.wisc.edu
6899745Snilay@cs.wisc.edu    loadQueue[loadHead] = NULL;
6909745Snilay@cs.wisc.edu
6919745Snilay@cs.wisc.edu    incrLdIdx(loadHead);
6929745Snilay@cs.wisc.edu
6939745Snilay@cs.wisc.edu    --loads;
6949745Snilay@cs.wisc.edu}
6959745Snilay@cs.wisc.edu
6969745Snilay@cs.wisc.edutemplate <class Impl>
6979745Snilay@cs.wisc.eduvoid
6989745Snilay@cs.wisc.eduLSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
6999745Snilay@cs.wisc.edu{
7009745Snilay@cs.wisc.edu    assert(loads == 0 || loadQueue[loadHead]);
7019745Snilay@cs.wisc.edu
7029745Snilay@cs.wisc.edu    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
70310919Sbrandon.potter@amd.com        commitLoad();
70410012Snilay@cs.wisc.edu    }
7059745Snilay@cs.wisc.edu}
7069745Snilay@cs.wisc.edu
7079745Snilay@cs.wisc.edutemplate <class Impl>
7089745Snilay@cs.wisc.eduvoid
7099745Snilay@cs.wisc.eduLSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
7109745Snilay@cs.wisc.edu{
7119745Snilay@cs.wisc.edu    assert(stores == 0 || storeQueue[storeHead].inst);
7129745Snilay@cs.wisc.edu
7139745Snilay@cs.wisc.edu    int store_idx = storeHead;
7149745Snilay@cs.wisc.edu
7159745Snilay@cs.wisc.edu    while (store_idx != storeTail) {
7169745Snilay@cs.wisc.edu        assert(storeQueue[store_idx].inst);
7179745Snilay@cs.wisc.edu        // Mark any stores that are now committed and have not yet
7189745Snilay@cs.wisc.edu        // been marked as able to write back.
7199745Snilay@cs.wisc.edu        if (!storeQueue[store_idx].canWB) {
7209745Snilay@cs.wisc.edu            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
72110920Sbrandon.potter@amd.com                break;
7229745Snilay@cs.wisc.edu            }
72310920Sbrandon.potter@amd.com            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
72410920Sbrandon.potter@amd.com                    "%s [sn:%lli]\n",
7259745Snilay@cs.wisc.edu                    storeQueue[store_idx].inst->pcState(),
7269745Snilay@cs.wisc.edu                    storeQueue[store_idx].inst->seqNum);
7279745Snilay@cs.wisc.edu
7289745Snilay@cs.wisc.edu            storeQueue[store_idx].canWB = true;
7299745Snilay@cs.wisc.edu
7309745Snilay@cs.wisc.edu            ++storesToWB;
7319745Snilay@cs.wisc.edu        }
7329745Snilay@cs.wisc.edu
7339745Snilay@cs.wisc.edu        incrStIdx(store_idx);
7349745Snilay@cs.wisc.edu    }
7359745Snilay@cs.wisc.edu}
7369745Snilay@cs.wisc.edu
73710920Sbrandon.potter@amd.comtemplate <class Impl>
7389745Snilay@cs.wisc.eduvoid
73910920Sbrandon.potter@amd.comLSQUnit<Impl>::writebackPendingStore()
74010920Sbrandon.potter@amd.com{
7419745Snilay@cs.wisc.edu    if (hasPendingPkt) {
7429745Snilay@cs.wisc.edu        assert(pendingPkt != NULL);
7439745Snilay@cs.wisc.edu
7449745Snilay@cs.wisc.edu        // If the cache is blocked, this will store the packet for retry.
7459745Snilay@cs.wisc.edu        if (sendStore(pendingPkt)) {
7469745Snilay@cs.wisc.edu            storePostSend(pendingPkt);
7479745Snilay@cs.wisc.edu        }
7489745Snilay@cs.wisc.edu        pendingPkt = NULL;
7499745Snilay@cs.wisc.edu        hasPendingPkt = false;
7509745Snilay@cs.wisc.edu    }
7519745Snilay@cs.wisc.edu}
7529745Snilay@cs.wisc.edu
7539745Snilay@cs.wisc.edutemplate <class Impl>
7549745Snilay@cs.wisc.eduvoid
7559745Snilay@cs.wisc.eduLSQUnit<Impl>::writebackStores()
7569745Snilay@cs.wisc.edu{
7579745Snilay@cs.wisc.edu    // First writeback the second packet from any split store that didn't
7589745Snilay@cs.wisc.edu    // complete last cycle because there weren't enough cache ports available.
7599745Snilay@cs.wisc.edu    if (TheISA::HasUnalignedMemAcc) {
7609745Snilay@cs.wisc.edu        writebackPendingStore();
7619745Snilay@cs.wisc.edu    }
76211061Snilay@cs.wisc.edu
7639745Snilay@cs.wisc.edu    while (storesToWB > 0 &&
7649745Snilay@cs.wisc.edu           storeWBIdx != storeTail &&
7659745Snilay@cs.wisc.edu           storeQueue[storeWBIdx].inst &&
7669745Snilay@cs.wisc.edu           storeQueue[storeWBIdx].canWB &&
7679745Snilay@cs.wisc.edu           usedPorts < cachePorts) {
7689745Snilay@cs.wisc.edu
7699745Snilay@cs.wisc.edu        if (isStoreBlocked || lsq->cacheBlocked()) {
7709745Snilay@cs.wisc.edu            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
7719745Snilay@cs.wisc.edu                    " is blocked!\n");
7729745Snilay@cs.wisc.edu            break;
7739745Snilay@cs.wisc.edu        }
77411061Snilay@cs.wisc.edu
7759745Snilay@cs.wisc.edu        // Store didn't write any data so no need to write it back to
7769745Snilay@cs.wisc.edu        // memory.
7779745Snilay@cs.wisc.edu        if (storeQueue[storeWBIdx].size == 0) {
7789745Snilay@cs.wisc.edu            completeStore(storeWBIdx);
7799745Snilay@cs.wisc.edu
7809745Snilay@cs.wisc.edu            incrStIdx(storeWBIdx);
7817007Snate@binkert.org
7827007Snate@binkert.org            continue;
7837007Snate@binkert.org        }
7846657Snate@binkert.org
7856657Snate@binkert.org        ++usedPorts;
7866657Snate@binkert.org
7877007Snate@binkert.org        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
7887007Snate@binkert.org            incrStIdx(storeWBIdx);
7897007Snate@binkert.org
7906657Snate@binkert.org            continue;
7916657Snate@binkert.org        }
7926657Snate@binkert.org
79311021Sjthestness@gmail.com        assert(storeQueue[storeWBIdx].req);
79411021Sjthestness@gmail.com        assert(!storeQueue[storeWBIdx].committed);
79511021Sjthestness@gmail.com
79611021Sjthestness@gmail.com        if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
79711021Sjthestness@gmail.com            assert(storeQueue[storeWBIdx].sreqLow);
79811021Sjthestness@gmail.com            assert(storeQueue[storeWBIdx].sreqHigh);
7998683Snilay@cs.wisc.edu        }
8008683Snilay@cs.wisc.edu
8018683Snilay@cs.wisc.edu        DynInstPtr inst = storeQueue[storeWBIdx].inst;
8028683Snilay@cs.wisc.edu
8038683Snilay@cs.wisc.edu        Request *req = storeQueue[storeWBIdx].req;
8048683Snilay@cs.wisc.edu        RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
8057007Snate@binkert.org        RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
8067007Snate@binkert.org
8077007Snate@binkert.org        storeQueue[storeWBIdx].committed = true;
8087007Snate@binkert.org
8097007Snate@binkert.org        assert(!inst->memData);
8106657Snate@binkert.org        inst->memData = new uint8_t[64];
81110012Snilay@cs.wisc.edu
8129745Snilay@cs.wisc.edu        memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
8139745Snilay@cs.wisc.edu
8149745Snilay@cs.wisc.edu        MemCmd command =
8159745Snilay@cs.wisc.edu            req->isSwap() ? MemCmd::SwapReq :
8169745Snilay@cs.wisc.edu            (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
8179745Snilay@cs.wisc.edu        PacketPtr data_pkt;
8186902SBrad.Beckmann@amd.com        PacketPtr snd_data_pkt = NULL;
8199745Snilay@cs.wisc.edu
8209745Snilay@cs.wisc.edu        LSQSenderState *state = new LSQSenderState;
8219745Snilay@cs.wisc.edu        state->isLoad = false;
8229745Snilay@cs.wisc.edu        state->idx = storeWBIdx;
82310012Snilay@cs.wisc.edu        state->inst = inst;
8246902SBrad.Beckmann@amd.com
8257839Snilay@cs.wisc.edu        if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
8267839Snilay@cs.wisc.edu
8277839Snilay@cs.wisc.edu            // Build a single data packet if the store isn't split.
8287839Snilay@cs.wisc.edu            data_pkt = new Packet(req, command, Packet::Broadcast);
8297839Snilay@cs.wisc.edu            data_pkt->dataStatic(inst->memData);
8307839Snilay@cs.wisc.edu            data_pkt->senderState = state;
8317839Snilay@cs.wisc.edu        } else {
8327839Snilay@cs.wisc.edu            // Create two packets if the store is split in two.
8337839Snilay@cs.wisc.edu            data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
8347839Snilay@cs.wisc.edu            snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
8357839Snilay@cs.wisc.edu
8367839Snilay@cs.wisc.edu            data_pkt->dataStatic(inst->memData);
8377839Snilay@cs.wisc.edu            snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
8387839Snilay@cs.wisc.edu
8397839Snilay@cs.wisc.edu            data_pkt->senderState = state;
8407839Snilay@cs.wisc.edu            snd_data_pkt->senderState = state;
8417839Snilay@cs.wisc.edu
8427839Snilay@cs.wisc.edu            state->isSplit = true;
8437839Snilay@cs.wisc.edu            state->outstanding = 2;
8447839Snilay@cs.wisc.edu
8457839Snilay@cs.wisc.edu            // Can delete the main request now.
8467839Snilay@cs.wisc.edu            delete req;
8477839Snilay@cs.wisc.edu            req = sreqLow;
8487839Snilay@cs.wisc.edu        }
8497839Snilay@cs.wisc.edu
8507839Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
8517839Snilay@cs.wisc.edu                "to Addr:%#x, data:%#x [sn:%lli]\n",
8527839Snilay@cs.wisc.edu                storeWBIdx, inst->pcState(),
8537839Snilay@cs.wisc.edu                req->getPaddr(), (int)*(inst->memData),
8547839Snilay@cs.wisc.edu                inst->seqNum);
8557839Snilay@cs.wisc.edu
8567839Snilay@cs.wisc.edu        // @todo: Remove this SC hack once the memory system handles it.
8577839Snilay@cs.wisc.edu        if (inst->isStoreConditional()) {
8587839Snilay@cs.wisc.edu            assert(!storeQueue[storeWBIdx].isSplit);
8597839Snilay@cs.wisc.edu            // Disable recording the result temporarily.  Writing to
8607839Snilay@cs.wisc.edu            // misc regs normally updates the result, but this is not
8617839Snilay@cs.wisc.edu            // the desired behavior when handling store conditionals.
8626902SBrad.Beckmann@amd.com            inst->recordResult = false;
8638683Snilay@cs.wisc.edu            bool success = TheISA::handleLockedWrite(inst.get(), req);
8648683Snilay@cs.wisc.edu            inst->recordResult = true;
8658683Snilay@cs.wisc.edu
8668683Snilay@cs.wisc.edu            if (!success) {
8678683Snilay@cs.wisc.edu                // Instantly complete this store.
8688683Snilay@cs.wisc.edu                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
8698683Snilay@cs.wisc.edu                        "Instantly completing it.\n",
8708683Snilay@cs.wisc.edu                        inst->seqNum);
8718683Snilay@cs.wisc.edu                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
8728683Snilay@cs.wisc.edu                cpu->schedule(wb, curTick() + 1);
8738683Snilay@cs.wisc.edu                completeStore(storeWBIdx);
8748683Snilay@cs.wisc.edu                incrStIdx(storeWBIdx);
8758683Snilay@cs.wisc.edu                continue;
8768683Snilay@cs.wisc.edu            }
8778683Snilay@cs.wisc.edu        } else {
8788683Snilay@cs.wisc.edu            // Non-store conditionals do not need a writeback.
8798683Snilay@cs.wisc.edu            state->noWB = true;
8806657Snate@binkert.org        }
8816657Snate@binkert.org
8827839Snilay@cs.wisc.edu        bool split =
8837839Snilay@cs.wisc.edu            TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
8847839Snilay@cs.wisc.edu
8857839Snilay@cs.wisc.edu        ThreadContext *thread = cpu->tcBase(lsqID);
8866657Snate@binkert.org
8877839Snilay@cs.wisc.edu        if (req->isMmappedIpr()) {
8887839Snilay@cs.wisc.edu            assert(!inst->isStoreConditional());
8897839Snilay@cs.wisc.edu            TheISA::handleIprWrite(thread, data_pkt);
89011025Snilay@cs.wisc.edu            delete data_pkt;
8917839Snilay@cs.wisc.edu            if (split) {
8928055Sksewell@umich.edu                assert(snd_data_pkt->req->isMmappedIpr());
89310963Sdavid.hashe@amd.com                TheISA::handleIprWrite(thread, snd_data_pkt);
89410963Sdavid.hashe@amd.com                delete snd_data_pkt;
89510963Sdavid.hashe@amd.com                delete sreqLow;
89610963Sdavid.hashe@amd.com                delete sreqHigh;
89710963Sdavid.hashe@amd.com            }
89810963Sdavid.hashe@amd.com            delete state;
89910963Sdavid.hashe@amd.com            delete req;
9007839Snilay@cs.wisc.edu            completeStore(storeWBIdx);
9016657Snate@binkert.org            incrStIdx(storeWBIdx);
9027839Snilay@cs.wisc.edu        } else if (!sendStore(data_pkt)) {
9037839Snilay@cs.wisc.edu            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
9047839Snilay@cs.wisc.edu                    "retry later\n",
9057839Snilay@cs.wisc.edu                    inst->seqNum);
9067839Snilay@cs.wisc.edu
9077839Snilay@cs.wisc.edu            // Need to store the second packet, if split.
9087839Snilay@cs.wisc.edu            if (split) {
9097839Snilay@cs.wisc.edu                state->pktToSend = true;
9107839Snilay@cs.wisc.edu                state->pendingPacket = snd_data_pkt;
91111025Snilay@cs.wisc.edu            }
9127839Snilay@cs.wisc.edu        } else {
9138055Sksewell@umich.edu
9147839Snilay@cs.wisc.edu            // If split, try to send the second packet too
9157839Snilay@cs.wisc.edu            if (split) {
9167839Snilay@cs.wisc.edu                assert(snd_data_pkt);
9177839Snilay@cs.wisc.edu
9187839Snilay@cs.wisc.edu                // Ensure there are enough ports to use.
9197839Snilay@cs.wisc.edu                if (usedPorts < cachePorts) {
9207839Snilay@cs.wisc.edu                    ++usedPorts;
9217839Snilay@cs.wisc.edu                    if (sendStore(snd_data_pkt)) {
9227839Snilay@cs.wisc.edu                        storePostSend(snd_data_pkt);
9237839Snilay@cs.wisc.edu                    } else {
9247839Snilay@cs.wisc.edu                        DPRINTF(IEW, "D-Cache became blocked when writing"
9257839Snilay@cs.wisc.edu                                " [sn:%lli] second packet, will retry later\n",
92611025Snilay@cs.wisc.edu                                inst->seqNum);
9277839Snilay@cs.wisc.edu                    }
9288055Sksewell@umich.edu                } else {
9297839Snilay@cs.wisc.edu
9307839Snilay@cs.wisc.edu                    // Store the packet for when there's free ports.
9317839Snilay@cs.wisc.edu                    assert(pendingPkt == NULL);
9327839Snilay@cs.wisc.edu                    pendingPkt = snd_data_pkt;
9337839Snilay@cs.wisc.edu                    hasPendingPkt = true;
9347839Snilay@cs.wisc.edu                }
9357839Snilay@cs.wisc.edu            } else {
9367839Snilay@cs.wisc.edu
9377839Snilay@cs.wisc.edu                // Not a split store.
9387839Snilay@cs.wisc.edu                storePostSend(data_pkt);
9396657Snate@binkert.org            }
9407007Snate@binkert.org        }
94111025Snilay@cs.wisc.edu    }
9426657Snate@binkert.org
9438055Sksewell@umich.edu    // Not sure this should set it to 0.
9446657Snate@binkert.org    usedPorts = 0;
9456657Snate@binkert.org
9466657Snate@binkert.org    assert(stores >= 0 && storesToWB >= 0);
9476657Snate@binkert.org}
9488478Snilay@cs.wisc.edu
9498478Snilay@cs.wisc.edu/*template <class Impl>
9508478Snilay@cs.wisc.eduvoid
9519302Snilay@cs.wisc.eduLSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
9529302Snilay@cs.wisc.edu{
95310524Snilay@cs.wisc.edu    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
9549302Snilay@cs.wisc.edu                                              mshrSeqNums.end(),
9559302Snilay@cs.wisc.edu                                              seqNum);
95610524Snilay@cs.wisc.edu
9579302Snilay@cs.wisc.edu    if (mshr_it != mshrSeqNums.end()) {
9589302Snilay@cs.wisc.edu        mshrSeqNums.erase(mshr_it);
9599302Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
9609302Snilay@cs.wisc.edu    }
96110305Snilay@cs.wisc.edu}*/
9629302Snilay@cs.wisc.edu
96310311Snilay@cs.wisc.edutemplate <class Impl>
96410311Snilay@cs.wisc.eduvoid
96510311Snilay@cs.wisc.eduLSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
96610311Snilay@cs.wisc.edu{
96710311Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
96810311Snilay@cs.wisc.edu            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
96910311Snilay@cs.wisc.edu
9709302Snilay@cs.wisc.edu    int load_idx = loadTail;
9719302Snilay@cs.wisc.edu    decrLdIdx(load_idx);
9729302Snilay@cs.wisc.edu
9739302Snilay@cs.wisc.edu    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
9749302Snilay@cs.wisc.edu        DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
9756657Snate@binkert.org                "[sn:%lli]\n",
9766657Snate@binkert.org                loadQueue[load_idx]->pcState(),
9779219Spower.jg@gmail.com                loadQueue[load_idx]->seqNum);
9786657Snate@binkert.org
9796657Snate@binkert.org        if (isStalled() && load_idx == stallingLoadIdx) {
9806999Snate@binkert.org            stalled = false;
9816657Snate@binkert.org            stallingStoreIsn = 0;
9826657Snate@binkert.org            stallingLoadIdx = 0;
9839104Shestness@cs.utexas.edu        }
9849104Shestness@cs.utexas.edu
9859104Shestness@cs.utexas.edu        // Clear the smart pointer to make sure it is decremented.
9869104Shestness@cs.utexas.edu        loadQueue[load_idx]->setSquashed();
9876657Snate@binkert.org        loadQueue[load_idx] = NULL;
9886657Snate@binkert.org        --loads;
9896657Snate@binkert.org
9906657Snate@binkert.org        // Inefficient!
9918946Sandreas.hansson@arm.com        loadTail = load_idx;
9928946Sandreas.hansson@arm.com
9938946Sandreas.hansson@arm.com        decrLdIdx(load_idx);
9947832Snate@binkert.org        ++lsqSquashedLoads;
99510972Sdavid.hashe@amd.com    }
9967832Snate@binkert.org
9977007Snate@binkert.org    if (isLoadBlocked) {
99810972Sdavid.hashe@amd.com        if (squashed_num < blockedLoadSeqNum) {
99910972Sdavid.hashe@amd.com            isLoadBlocked = false;
100010972Sdavid.hashe@amd.com            loadBlockedHandled = false;
100110972Sdavid.hashe@amd.com            blockedLoadSeqNum = 0;
100210972Sdavid.hashe@amd.com        }
10038229Snate@binkert.org    }
10048229Snate@binkert.org
10058229Snate@binkert.org    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
100610972Sdavid.hashe@amd.com        memDepViolator = NULL;
10079104Shestness@cs.utexas.edu    }
10089104Shestness@cs.utexas.edu
10099104Shestness@cs.utexas.edu    int store_idx = storeTail;
10109104Shestness@cs.utexas.edu    decrStIdx(store_idx);
10119104Shestness@cs.utexas.edu
10129104Shestness@cs.utexas.edu    while (stores != 0 &&
10138229Snate@binkert.org           storeQueue[store_idx].inst->seqNum > squashed_num) {
101411108Sdavid.hashe@amd.com        // Instructions marked as can WB are already committed.
101510972Sdavid.hashe@amd.com        if (storeQueue[store_idx].canWB) {
10169219Spower.jg@gmail.com            break;
10179219Spower.jg@gmail.com        }
10189219Spower.jg@gmail.com
10199219Spower.jg@gmail.com        DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
10209219Spower.jg@gmail.com                "idx:%i [sn:%lli]\n",
10219219Spower.jg@gmail.com                storeQueue[store_idx].inst->pcState(),
102210963Sdavid.hashe@amd.com                store_idx, storeQueue[store_idx].inst->seqNum);
102310963Sdavid.hashe@amd.com
10249219Spower.jg@gmail.com        // I don't think this can happen.  It should have been cleared
10256657Snate@binkert.org        // by the stalling load.
10267055Snate@binkert.org        if (isStalled() &&
10277055Snate@binkert.org            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
10287007Snate@binkert.org            panic("Is stalled should have been cleared by stalling load!\n");
10297007Snate@binkert.org            stalled = false;
10306657Snate@binkert.org            stallingStoreIsn = 0;
10316657Snate@binkert.org        }
10326657Snate@binkert.org
103310963Sdavid.hashe@amd.com        // Clear the smart pointer to make sure it is decremented.
103410963Sdavid.hashe@amd.com        storeQueue[store_idx].inst->setSquashed();
10356657Snate@binkert.org        storeQueue[store_idx].inst = NULL;
10366657Snate@binkert.org        storeQueue[store_idx].canWB = 0;
10376657Snate@binkert.org
10387007Snate@binkert.org        // Must delete request now that it wasn't handed off to
10399496Snilay@cs.wisc.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
10407007Snate@binkert.org        // place to really handle request deletes.
10417007Snate@binkert.org        delete storeQueue[store_idx].req;
10429499Snilay@cs.wisc.edu        if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
10436657Snate@binkert.org            delete storeQueue[store_idx].sreqLow;
10446657Snate@binkert.org            delete storeQueue[store_idx].sreqHigh;
10456657Snate@binkert.org
10466657Snate@binkert.org            storeQueue[store_idx].sreqLow = NULL;
10476657Snate@binkert.org            storeQueue[store_idx].sreqHigh = NULL;
10486657Snate@binkert.org        }
10496657Snate@binkert.org
10506657Snate@binkert.org        storeQueue[store_idx].req = NULL;
10516657Snate@binkert.org        --stores;
10526657Snate@binkert.org
10536657Snate@binkert.org        // Inefficient!
10546657Snate@binkert.org        storeTail = store_idx;
10557567SBrad.Beckmann@amd.com
10569996Snilay@cs.wisc.edu        decrStIdx(store_idx);
10577567SBrad.Beckmann@amd.com        ++lsqSquashedStores;
10589996Snilay@cs.wisc.edu    }
105910963Sdavid.hashe@amd.com}
106010963Sdavid.hashe@amd.com
106110963Sdavid.hashe@amd.comtemplate <class Impl>
10626657Snate@binkert.orgvoid
106310963Sdavid.hashe@amd.comLSQUnit<Impl>::storePostSend(PacketPtr pkt)
106410963Sdavid.hashe@amd.com{
106510963Sdavid.hashe@amd.com    if (isStalled() &&
106610963Sdavid.hashe@amd.com        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
106710963Sdavid.hashe@amd.com        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
106810963Sdavid.hashe@amd.com                "load idx:%i\n",
106910963Sdavid.hashe@amd.com                stallingStoreIsn, stallingLoadIdx);
107010963Sdavid.hashe@amd.com        stalled = false;
10716657Snate@binkert.org        stallingStoreIsn = 0;
10726657Snate@binkert.org        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
10736657Snate@binkert.org    }
10746657Snate@binkert.org
10756657Snate@binkert.org    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
10766657Snate@binkert.org        // The store is basically completed at this time. This
107710963Sdavid.hashe@amd.com        // only works so long as the checker doesn't try to
107810963Sdavid.hashe@amd.com        // verify the value in memory for stores.
107910963Sdavid.hashe@amd.com        storeQueue[storeWBIdx].inst->setCompleted();
108010963Sdavid.hashe@amd.com#if USE_CHECKER
108110963Sdavid.hashe@amd.com        if (cpu->checker) {
108210963Sdavid.hashe@amd.com            cpu->checker->verify(storeQueue[storeWBIdx].inst);
108311116Santhony.gutierrez@amd.com        }
108410963Sdavid.hashe@amd.com#endif
108510963Sdavid.hashe@amd.com    }
108610963Sdavid.hashe@amd.com
108710963Sdavid.hashe@amd.com    incrStIdx(storeWBIdx);
108810963Sdavid.hashe@amd.com}
108910963Sdavid.hashe@amd.com
109010963Sdavid.hashe@amd.comtemplate <class Impl>
109110963Sdavid.hashe@amd.comvoid
109210963Sdavid.hashe@amd.comLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
109310963Sdavid.hashe@amd.com{
109410963Sdavid.hashe@amd.com    iewStage->wakeCPU();
109510963Sdavid.hashe@amd.com
10966657Snate@binkert.org    // Squashed instructions do not need to complete their access.
10976657Snate@binkert.org    if (inst->isSquashed()) {
10986657Snate@binkert.org        iewStage->decrWb(inst->seqNum);
10996657Snate@binkert.org        assert(!inst->isStore());
11006657Snate@binkert.org        ++lsqIgnoredResponses;
11016657Snate@binkert.org        return;
11026657Snate@binkert.org    }
11036657Snate@binkert.org
11046657Snate@binkert.org    if (!inst->isExecuted()) {
11056999Snate@binkert.org        inst->setExecuted();
11066657Snate@binkert.org
11076657Snate@binkert.org        // Complete access to copy data to proper place.
11086657Snate@binkert.org        inst->completeAcc(pkt);
11096657Snate@binkert.org    }
11106657Snate@binkert.org
11116657Snate@binkert.org    // Need to insert instruction into queue to commit
11127832Snate@binkert.org    iewStage->instToCommit(inst);
11137832Snate@binkert.org
11147805Snilay@cs.wisc.edu    iewStage->activityThisCycle();
11157832Snate@binkert.org
11168232Snate@binkert.org    // see if this load changed the PC
11178232Snate@binkert.org    iewStage->checkMisprediction(inst);
11188229Snate@binkert.org}
11198229Snate@binkert.org
11208229Snate@binkert.orgtemplate <class Impl>
11218229Snate@binkert.orgvoid
112211108Sdavid.hashe@amd.comLSQUnit<Impl>::completeStore(int store_idx)
11236657Snate@binkert.org{
11246657Snate@binkert.org    assert(storeQueue[store_idx].inst);
11256657Snate@binkert.org    storeQueue[store_idx].completed = true;
11266657Snate@binkert.org    --storesToWB;
11276657Snate@binkert.org    // A bit conservative because a store completion may not free up entries,
11286657Snate@binkert.org    // but hopefully avoids two store completions in one cycle from making
11297007Snate@binkert.org    // the CPU tick twice.
11307007Snate@binkert.org    cpu->wakeCPU();
11317839Snilay@cs.wisc.edu    cpu->activityThisCycle();
11327839Snilay@cs.wisc.edu
11337839Snilay@cs.wisc.edu    if (store_idx == storeHead) {
11347839Snilay@cs.wisc.edu        do {
11357839Snilay@cs.wisc.edu            incrStIdx(storeHead);
11367839Snilay@cs.wisc.edu
11377839Snilay@cs.wisc.edu            --stores;
11387839Snilay@cs.wisc.edu        } while (storeQueue[storeHead].completed &&
11397839Snilay@cs.wisc.edu                 storeHead != storeTail);
11407839Snilay@cs.wisc.edu
114111025Snilay@cs.wisc.edu        iewStage->updateLSQNextCycle = true;
11426657Snate@binkert.org    }
11437839Snilay@cs.wisc.edu
114410305Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
114510305Snilay@cs.wisc.edu            "idx:%i\n",
11467839Snilay@cs.wisc.edu            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
11478337Snilay@cs.wisc.edu
11487839Snilay@cs.wisc.edu    if (isStalled() &&
11498337Snilay@cs.wisc.edu        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
11507839Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
11518337Snilay@cs.wisc.edu                "load idx:%i\n",
11527839Snilay@cs.wisc.edu                stallingStoreIsn, stallingLoadIdx);
11538337Snilay@cs.wisc.edu        stalled = false;
11547839Snilay@cs.wisc.edu        stallingStoreIsn = 0;
11557839Snilay@cs.wisc.edu        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
115610305Snilay@cs.wisc.edu    }
11576657Snate@binkert.org
115810305Snilay@cs.wisc.edu    storeQueue[store_idx].inst->setCompleted();
115910305Snilay@cs.wisc.edu
116010305Snilay@cs.wisc.edu    // Tell the checker we've completed this instruction.  Some stores
11616657Snate@binkert.org    // may get reported twice to the checker, but the checker can
116210305Snilay@cs.wisc.edu    // handle that case.
11637839Snilay@cs.wisc.edu#if USE_CHECKER
11647839Snilay@cs.wisc.edu    if (cpu->checker) {
11657839Snilay@cs.wisc.edu        cpu->checker->verify(storeQueue[store_idx].inst);
11667839Snilay@cs.wisc.edu    }
11677839Snilay@cs.wisc.edu#endif
11687839Snilay@cs.wisc.edu}
11697839Snilay@cs.wisc.edu
11707839Snilay@cs.wisc.edutemplate <class Impl>
11717839Snilay@cs.wisc.edubool
11726657Snate@binkert.orgLSQUnit<Impl>::sendStore(PacketPtr data_pkt)
117311049Snilay@cs.wisc.edu{
117411049Snilay@cs.wisc.edu    if (!dcachePort->sendTiming(data_pkt)) {
11757839Snilay@cs.wisc.edu        // Need to handle becoming blocked on a store.
11766657Snate@binkert.org        isStoreBlocked = true;
117710305Snilay@cs.wisc.edu        ++lsqCacheBlocked;
117810305Snilay@cs.wisc.edu        assert(retryPkt == NULL);
117910305Snilay@cs.wisc.edu        retryPkt = data_pkt;
118010305Snilay@cs.wisc.edu        lsq->setRetryTid(lsqID);
118110305Snilay@cs.wisc.edu        return false;
118211025Snilay@cs.wisc.edu    }
118310305Snilay@cs.wisc.edu    return true;
118410305Snilay@cs.wisc.edu}
118510305Snilay@cs.wisc.edu
118610305Snilay@cs.wisc.edutemplate <class Impl>
118710305Snilay@cs.wisc.eduvoid
118810305Snilay@cs.wisc.eduLSQUnit<Impl>::recvRetry()
118910305Snilay@cs.wisc.edu{
11907839Snilay@cs.wisc.edu    if (isStoreBlocked) {
11917839Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
11928337Snilay@cs.wisc.edu        assert(retryPkt != NULL);
11938341Snilay@cs.wisc.edu
11947839Snilay@cs.wisc.edu        if (dcachePort->sendTiming(retryPkt)) {
11958337Snilay@cs.wisc.edu            LSQSenderState *state =
11968341Snilay@cs.wisc.edu                dynamic_cast<LSQSenderState *>(retryPkt->senderState);
11977839Snilay@cs.wisc.edu
11988337Snilay@cs.wisc.edu            // Don't finish the store unless this is the last packet.
11998341Snilay@cs.wisc.edu            if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
12007839Snilay@cs.wisc.edu                    state->pendingPacket == retryPkt) {
12018337Snilay@cs.wisc.edu                state->pktToSend = false;
12028341Snilay@cs.wisc.edu                storePostSend(retryPkt);
12037839Snilay@cs.wisc.edu            }
12047839Snilay@cs.wisc.edu            retryPkt = NULL;
120510305Snilay@cs.wisc.edu            isStoreBlocked = false;
120611025Snilay@cs.wisc.edu            lsq->setRetryTid(InvalidThreadID);
120710305Snilay@cs.wisc.edu
120810305Snilay@cs.wisc.edu            // Send any outstanding packet.
120910305Snilay@cs.wisc.edu            if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
121010305Snilay@cs.wisc.edu                assert(state->pendingPacket);
121110305Snilay@cs.wisc.edu                if (sendStore(state->pendingPacket)) {
121210305Snilay@cs.wisc.edu                    storePostSend(state->pendingPacket);
121310305Snilay@cs.wisc.edu                }
121411025Snilay@cs.wisc.edu            }
121510305Snilay@cs.wisc.edu        } else {
121610305Snilay@cs.wisc.edu            // Still blocked!
121710305Snilay@cs.wisc.edu            ++lsqCacheBlocked;
121810305Snilay@cs.wisc.edu            lsq->setRetryTid(lsqID);
121910305Snilay@cs.wisc.edu        }
122010305Snilay@cs.wisc.edu    } else if (isLoadBlocked) {
12216657Snate@binkert.org        DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
122210305Snilay@cs.wisc.edu                "no need to resend packet.\n");
122310305Snilay@cs.wisc.edu    } else {
122410305Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
122510305Snilay@cs.wisc.edu    }
12266657Snate@binkert.org}
12276657Snate@binkert.org
12287007Snate@binkert.orgtemplate <class Impl>
12297007Snate@binkert.orginline void
12307007Snate@binkert.orgLSQUnit<Impl>::incrStIdx(int &store_idx)
12317007Snate@binkert.org{
12327839Snilay@cs.wisc.edu    if (++store_idx >= SQEntries)
12337839Snilay@cs.wisc.edu        store_idx = 0;
12347839Snilay@cs.wisc.edu}
12357839Snilay@cs.wisc.edu
12367839Snilay@cs.wisc.edutemplate <class Impl>
12377839Snilay@cs.wisc.eduinline void
12387839Snilay@cs.wisc.eduLSQUnit<Impl>::decrStIdx(int &store_idx)
12397839Snilay@cs.wisc.edu{
12407839Snilay@cs.wisc.edu    if (--store_idx < 0)
12417839Snilay@cs.wisc.edu        store_idx += SQEntries;
12427839Snilay@cs.wisc.edu}
124311025Snilay@cs.wisc.edu
12446657Snate@binkert.orgtemplate <class Impl>
12456657Snate@binkert.orginline void
12466657Snate@binkert.orgLSQUnit<Impl>::incrLdIdx(int &load_idx)
12476657Snate@binkert.org{
12486657Snate@binkert.org    if (++load_idx >= LQEntries)
12496657Snate@binkert.org        load_idx = 0;
12506657Snate@binkert.org}
12516657Snate@binkert.org
12526657Snate@binkert.orgtemplate <class Impl>
12536657Snate@binkert.orginline void
12546657Snate@binkert.orgLSQUnit<Impl>::decrLdIdx(int &load_idx)
12556999Snate@binkert.org{
12566657Snate@binkert.org    if (--load_idx < 0)
12576657Snate@binkert.org        load_idx += LQEntries;
125810964Sdavid.hashe@amd.com}
125910964Sdavid.hashe@amd.com
126010964Sdavid.hashe@amd.comtemplate <class Impl>
126110964Sdavid.hashe@amd.comvoid
126210964Sdavid.hashe@amd.comLSQUnit<Impl>::dumpInsts()
126310964Sdavid.hashe@amd.com{
126410964Sdavid.hashe@amd.com    cprintf("Load store queue: Dumping instructions.\n");
126510964Sdavid.hashe@amd.com    cprintf("Load queue size: %i\n", loads);
126610964Sdavid.hashe@amd.com    cprintf("Load queue: ");
126710964Sdavid.hashe@amd.com
126810964Sdavid.hashe@amd.com    int load_idx = loadHead;
12696657Snate@binkert.org
12706657Snate@binkert.org    while (load_idx != loadTail && loadQueue[load_idx]) {
12719104Shestness@cs.utexas.edu        cprintf("%s ", loadQueue[load_idx]->pcState());
12726657Snate@binkert.org
12736657Snate@binkert.org        incrLdIdx(load_idx);
12746657Snate@binkert.org    }
12756657Snate@binkert.org
12766657Snate@binkert.org    cprintf("Store queue size: %i\n", stores);
127710228Snilay@cs.wisc.edu    cprintf("Store queue: ");
127811111Snilay@cs.wisc.edu
12796657Snate@binkert.org    int store_idx = storeHead;
12806657Snate@binkert.org
12816657Snate@binkert.org    while (store_idx != storeTail && storeQueue[store_idx].inst) {
12826657Snate@binkert.org        cprintf("%s ", storeQueue[store_idx].inst->pcState());
12839105SBrad.Beckmann@amd.com
12849105SBrad.Beckmann@amd.com        incrStIdx(store_idx);
12859105SBrad.Beckmann@amd.com    }
12869105SBrad.Beckmann@amd.com
12879105SBrad.Beckmann@amd.com    cprintf("\n");
12889105SBrad.Beckmann@amd.com}
12899105SBrad.Beckmann@amd.com