lsq_unit_impl.hh revision 9444
12292SN/A/*
29383SAli.Saidi@ARM.com * Copyright (c) 2010-2012 ARM Limited
37597Sminkyu.jeong@arm.com * All rights reserved
47597Sminkyu.jeong@arm.com *
57597Sminkyu.jeong@arm.com * The license below extends only to copyright in the software and shall
67597Sminkyu.jeong@arm.com * not be construed as granting a license to any other intellectual
77597Sminkyu.jeong@arm.com * property including but not limited to intellectual property relating
87597Sminkyu.jeong@arm.com * to a hardware implementation of the functionality of the software
97597Sminkyu.jeong@arm.com * licensed hereunder.  You may use the software subject to the license
107597Sminkyu.jeong@arm.com * terms below provided that you ensure that this notice is replicated
117597Sminkyu.jeong@arm.com * unmodified and in its entirety in all distributions of the software,
127597Sminkyu.jeong@arm.com * modified or unmodified, in source code or in binary form.
137597Sminkyu.jeong@arm.com *
142292SN/A * Copyright (c) 2004-2005 The Regents of The University of Michigan
152292SN/A * All rights reserved.
162292SN/A *
172292SN/A * Redistribution and use in source and binary forms, with or without
182292SN/A * modification, are permitted provided that the following conditions are
192292SN/A * met: redistributions of source code must retain the above copyright
202292SN/A * notice, this list of conditions and the following disclaimer;
212292SN/A * redistributions in binary form must reproduce the above copyright
222292SN/A * notice, this list of conditions and the following disclaimer in the
232292SN/A * documentation and/or other materials provided with the distribution;
242292SN/A * neither the name of the copyright holders nor the names of its
252292SN/A * contributors may be used to endorse or promote products derived from
262292SN/A * this software without specific prior written permission.
272292SN/A *
282292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392689Sktlim@umich.edu *
402689Sktlim@umich.edu * Authors: Kevin Lim
412689Sktlim@umich.edu *          Korey Sewell
422292SN/A */
432292SN/A
448591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh"
453326Sktlim@umich.edu#include "arch/locked_mem.hh"
468229Snate@binkert.org#include "base/str.hh"
476658Snate@binkert.org#include "config/the_isa.hh"
488887Sgeoffrey.blake@arm.com#include "cpu/checker/cpu.hh"
492907Sktlim@umich.edu#include "cpu/o3/lsq.hh"
502292SN/A#include "cpu/o3/lsq_unit.hh"
518232Snate@binkert.org#include "debug/Activity.hh"
528232Snate@binkert.org#include "debug/IEW.hh"
538232Snate@binkert.org#include "debug/LSQUnit.hh"
542722Sktlim@umich.edu#include "mem/packet.hh"
552669Sktlim@umich.edu#include "mem/request.hh"
562292SN/A
572669Sktlim@umich.edutemplate<class Impl>
582678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
592678Sktlim@umich.edu                                              LSQUnit *lsq_ptr)
608581Ssteve.reinhardt@amd.com    : Event(Default_Pri, AutoDelete),
618581Ssteve.reinhardt@amd.com      inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
622292SN/A{
632292SN/A}
642292SN/A
652669Sktlim@umich.edutemplate<class Impl>
662292SN/Avoid
672678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::process()
682292SN/A{
699444SAndreas.Sandberg@ARM.com    assert(!lsqPtr->cpu->switchedOut());
709444SAndreas.Sandberg@ARM.com
719444SAndreas.Sandberg@ARM.com    lsqPtr->writeback(inst, pkt);
724319Sktlim@umich.edu
734319Sktlim@umich.edu    if (pkt->senderState)
744319Sktlim@umich.edu        delete pkt->senderState;
754319Sktlim@umich.edu
764319Sktlim@umich.edu    delete pkt->req;
772678Sktlim@umich.edu    delete pkt;
782678Sktlim@umich.edu}
792292SN/A
802678Sktlim@umich.edutemplate<class Impl>
812678Sktlim@umich.educonst char *
825336Shines@cs.fsu.eduLSQUnit<Impl>::WritebackEvent::description() const
832678Sktlim@umich.edu{
844873Sstever@eecs.umich.edu    return "Store writeback";
852678Sktlim@umich.edu}
862292SN/A
872678Sktlim@umich.edutemplate<class Impl>
882678Sktlim@umich.eduvoid
892678Sktlim@umich.eduLSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
902678Sktlim@umich.edu{
912678Sktlim@umich.edu    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
922678Sktlim@umich.edu    DynInstPtr inst = state->inst;
937852SMatt.Horsnell@arm.com    DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
947852SMatt.Horsnell@arm.com    DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
952344SN/A
962678Sktlim@umich.edu    //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
972678Sktlim@umich.edu
986974Stjones1@inf.ed.ac.uk    // If this is a split access, wait until all packets are received.
996974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && !state->complete()) {
1006974Stjones1@inf.ed.ac.uk        delete pkt->req;
1016974Stjones1@inf.ed.ac.uk        delete pkt;
1026974Stjones1@inf.ed.ac.uk        return;
1036974Stjones1@inf.ed.ac.uk    }
1046974Stjones1@inf.ed.ac.uk
1059444SAndreas.Sandberg@ARM.com    assert(!cpu->switchedOut());
1069444SAndreas.Sandberg@ARM.com    if (inst->isSquashed()) {
1072820Sktlim@umich.edu        iewStage->decrWb(inst->seqNum);
1082678Sktlim@umich.edu    } else {
1092678Sktlim@umich.edu        if (!state->noWB) {
1106974Stjones1@inf.ed.ac.uk            if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
1116974Stjones1@inf.ed.ac.uk                !state->isLoad) {
1126974Stjones1@inf.ed.ac.uk                writeback(inst, pkt);
1136974Stjones1@inf.ed.ac.uk            } else {
1146974Stjones1@inf.ed.ac.uk                writeback(inst, state->mainPkt);
1156974Stjones1@inf.ed.ac.uk            }
1162678Sktlim@umich.edu        }
1172678Sktlim@umich.edu
1182678Sktlim@umich.edu        if (inst->isStore()) {
1192678Sktlim@umich.edu            completeStore(state->idx);
1202678Sktlim@umich.edu        }
1212344SN/A    }
1222307SN/A
1236974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
1246974Stjones1@inf.ed.ac.uk        delete state->mainPkt->req;
1256974Stjones1@inf.ed.ac.uk        delete state->mainPkt;
1266974Stjones1@inf.ed.ac.uk    }
1272678Sktlim@umich.edu    delete state;
1284032Sktlim@umich.edu    delete pkt->req;
1292678Sktlim@umich.edu    delete pkt;
1302292SN/A}
1312292SN/A
1322292SN/Atemplate <class Impl>
1332292SN/ALSQUnit<Impl>::LSQUnit()
1348545Ssaidi@eecs.umich.edu    : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
1352678Sktlim@umich.edu      isStoreBlocked(false), isLoadBlocked(false),
1368727Snilay@cs.wisc.edu      loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
1372292SN/A{
1382292SN/A}
1392292SN/A
1402292SN/Atemplate<class Impl>
1412292SN/Avoid
1425529Snate@binkert.orgLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
1435529Snate@binkert.org        LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
1445529Snate@binkert.org        unsigned id)
1452292SN/A{
1464329Sktlim@umich.edu    cpu = cpu_ptr;
1474329Sktlim@umich.edu    iewStage = iew_ptr;
1484329Sktlim@umich.edu
1494329Sktlim@umich.edu    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
1502292SN/A
1512907Sktlim@umich.edu    lsq = lsq_ptr;
1522907Sktlim@umich.edu
1532292SN/A    lsqID = id;
1542292SN/A
1552329SN/A    // Add 1 for the sentinel entry (they are circular queues).
1562329SN/A    LQEntries = maxLQEntries + 1;
1572329SN/A    SQEntries = maxSQEntries + 1;
1582292SN/A
1592292SN/A    loadQueue.resize(LQEntries);
1602292SN/A    storeQueue.resize(SQEntries);
1612292SN/A
1628199SAli.Saidi@ARM.com    depCheckShift = params->LSQDepCheckShift;
1638199SAli.Saidi@ARM.com    checkLoads = params->LSQCheckLoads;
1649444SAndreas.Sandberg@ARM.com    cachePorts = params->cachePorts;
1659444SAndreas.Sandberg@ARM.com    needsTSO = params->needsTSO;
1669444SAndreas.Sandberg@ARM.com
1679444SAndreas.Sandberg@ARM.com    resetState();
1689444SAndreas.Sandberg@ARM.com}
1699444SAndreas.Sandberg@ARM.com
1709444SAndreas.Sandberg@ARM.com
1719444SAndreas.Sandberg@ARM.comtemplate<class Impl>
1729444SAndreas.Sandberg@ARM.comvoid
1739444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::resetState()
1749444SAndreas.Sandberg@ARM.com{
1759444SAndreas.Sandberg@ARM.com    loads = stores = storesToWB = 0;
1768199SAli.Saidi@ARM.com
1772292SN/A    loadHead = loadTail = 0;
1782292SN/A
1792292SN/A    storeHead = storeWBIdx = storeTail = 0;
1802292SN/A
1812292SN/A    usedPorts = 0;
1822292SN/A
1833492Sktlim@umich.edu    retryPkt = NULL;
1842329SN/A    memDepViolator = NULL;
1852292SN/A
1862292SN/A    blockedLoadSeqNum = 0;
1879444SAndreas.Sandberg@ARM.com
1889444SAndreas.Sandberg@ARM.com    stalled = false;
1899444SAndreas.Sandberg@ARM.com    isLoadBlocked = false;
1909444SAndreas.Sandberg@ARM.com    loadBlockedHandled = false;
1919444SAndreas.Sandberg@ARM.com
1929444SAndreas.Sandberg@ARM.com    cacheBlockMask = 0;
1932292SN/A}
1942292SN/A
1952292SN/Atemplate<class Impl>
1962292SN/Astd::string
1972292SN/ALSQUnit<Impl>::name() const
1982292SN/A{
1992292SN/A    if (Impl::MaxThreads == 1) {
2002292SN/A        return iewStage->name() + ".lsq";
2012292SN/A    } else {
2028247Snate@binkert.org        return iewStage->name() + ".lsq.thread" + to_string(lsqID);
2032292SN/A    }
2042292SN/A}
2052292SN/A
2062292SN/Atemplate<class Impl>
2072292SN/Avoid
2082727Sktlim@umich.eduLSQUnit<Impl>::regStats()
2092727Sktlim@umich.edu{
2102727Sktlim@umich.edu    lsqForwLoads
2112727Sktlim@umich.edu        .name(name() + ".forwLoads")
2122727Sktlim@umich.edu        .desc("Number of loads that had data forwarded from stores");
2132727Sktlim@umich.edu
2142727Sktlim@umich.edu    invAddrLoads
2152727Sktlim@umich.edu        .name(name() + ".invAddrLoads")
2162727Sktlim@umich.edu        .desc("Number of loads ignored due to an invalid address");
2172727Sktlim@umich.edu
2182727Sktlim@umich.edu    lsqSquashedLoads
2192727Sktlim@umich.edu        .name(name() + ".squashedLoads")
2202727Sktlim@umich.edu        .desc("Number of loads squashed");
2212727Sktlim@umich.edu
2222727Sktlim@umich.edu    lsqIgnoredResponses
2232727Sktlim@umich.edu        .name(name() + ".ignoredResponses")
2242727Sktlim@umich.edu        .desc("Number of memory responses ignored because the instruction is squashed");
2252727Sktlim@umich.edu
2262361SN/A    lsqMemOrderViolation
2272361SN/A        .name(name() + ".memOrderViolation")
2282361SN/A        .desc("Number of memory ordering violations");
2292361SN/A
2302727Sktlim@umich.edu    lsqSquashedStores
2312727Sktlim@umich.edu        .name(name() + ".squashedStores")
2322727Sktlim@umich.edu        .desc("Number of stores squashed");
2332727Sktlim@umich.edu
2342727Sktlim@umich.edu    invAddrSwpfs
2352727Sktlim@umich.edu        .name(name() + ".invAddrSwpfs")
2362727Sktlim@umich.edu        .desc("Number of software prefetches ignored due to an invalid address");
2372727Sktlim@umich.edu
2382727Sktlim@umich.edu    lsqBlockedLoads
2392727Sktlim@umich.edu        .name(name() + ".blockedLoads")
2402727Sktlim@umich.edu        .desc("Number of blocked loads due to partial load-store forwarding");
2412727Sktlim@umich.edu
2422727Sktlim@umich.edu    lsqRescheduledLoads
2432727Sktlim@umich.edu        .name(name() + ".rescheduledLoads")
2442727Sktlim@umich.edu        .desc("Number of loads that were rescheduled");
2452727Sktlim@umich.edu
2462727Sktlim@umich.edu    lsqCacheBlocked
2472727Sktlim@umich.edu        .name(name() + ".cacheBlocked")
2482727Sktlim@umich.edu        .desc("Number of times an access to memory failed due to the cache being blocked");
2492727Sktlim@umich.edu}
2502727Sktlim@umich.edu
2512727Sktlim@umich.edutemplate<class Impl>
2522727Sktlim@umich.eduvoid
2538922Swilliam.wang@arm.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
2544329Sktlim@umich.edu{
2554329Sktlim@umich.edu    dcachePort = dcache_port;
2564329Sktlim@umich.edu}
2574329Sktlim@umich.edu
2584329Sktlim@umich.edutemplate<class Impl>
2594329Sktlim@umich.eduvoid
2602292SN/ALSQUnit<Impl>::clearLQ()
2612292SN/A{
2622292SN/A    loadQueue.clear();
2632292SN/A}
2642292SN/A
2652292SN/Atemplate<class Impl>
2662292SN/Avoid
2672292SN/ALSQUnit<Impl>::clearSQ()
2682292SN/A{
2692292SN/A    storeQueue.clear();
2702292SN/A}
2712292SN/A
2722292SN/Atemplate<class Impl>
2732292SN/Avoid
2749444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::drainSanityCheck() const
2752307SN/A{
2769444SAndreas.Sandberg@ARM.com    for (int i = 0; i < loadQueue.size(); ++i)
2772367SN/A        assert(!loadQueue[i]);
2782307SN/A
2792329SN/A    assert(storesToWB == 0);
2809444SAndreas.Sandberg@ARM.com    assert(!retryPkt);
2812307SN/A}
2822307SN/A
2832307SN/Atemplate<class Impl>
2842307SN/Avoid
2852307SN/ALSQUnit<Impl>::takeOverFrom()
2862307SN/A{
2879444SAndreas.Sandberg@ARM.com    resetState();
2882307SN/A}
2892307SN/A
2902307SN/Atemplate<class Impl>
2912307SN/Avoid
2922292SN/ALSQUnit<Impl>::resizeLQ(unsigned size)
2932292SN/A{
2942329SN/A    unsigned size_plus_sentinel = size + 1;
2952329SN/A    assert(size_plus_sentinel >= LQEntries);
2962292SN/A
2972329SN/A    if (size_plus_sentinel > LQEntries) {
2982329SN/A        while (size_plus_sentinel > loadQueue.size()) {
2992292SN/A            DynInstPtr dummy;
3002292SN/A            loadQueue.push_back(dummy);
3012292SN/A            LQEntries++;
3022292SN/A        }
3032292SN/A    } else {
3042329SN/A        LQEntries = size_plus_sentinel;
3052292SN/A    }
3062292SN/A
3072292SN/A}
3082292SN/A
3092292SN/Atemplate<class Impl>
3102292SN/Avoid
3112292SN/ALSQUnit<Impl>::resizeSQ(unsigned size)
3122292SN/A{
3132329SN/A    unsigned size_plus_sentinel = size + 1;
3142329SN/A    if (size_plus_sentinel > SQEntries) {
3152329SN/A        while (size_plus_sentinel > storeQueue.size()) {
3162292SN/A            SQEntry dummy;
3172292SN/A            storeQueue.push_back(dummy);
3182292SN/A            SQEntries++;
3192292SN/A        }
3202292SN/A    } else {
3212329SN/A        SQEntries = size_plus_sentinel;
3222292SN/A    }
3232292SN/A}
3242292SN/A
3252292SN/Atemplate <class Impl>
3262292SN/Avoid
3272292SN/ALSQUnit<Impl>::insert(DynInstPtr &inst)
3282292SN/A{
3292292SN/A    assert(inst->isMemRef());
3302292SN/A
3312292SN/A    assert(inst->isLoad() || inst->isStore());
3322292SN/A
3332292SN/A    if (inst->isLoad()) {
3342292SN/A        insertLoad(inst);
3352292SN/A    } else {
3362292SN/A        insertStore(inst);
3372292SN/A    }
3382292SN/A
3392292SN/A    inst->setInLSQ();
3402292SN/A}
3412292SN/A
3422292SN/Atemplate <class Impl>
3432292SN/Avoid
3442292SN/ALSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
3452292SN/A{
3462329SN/A    assert((loadTail + 1) % LQEntries != loadHead);
3472329SN/A    assert(loads < LQEntries);
3482292SN/A
3497720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
3507720Sgblack@eecs.umich.edu            load_inst->pcState(), loadTail, load_inst->seqNum);
3512292SN/A
3522292SN/A    load_inst->lqIdx = loadTail;
3532292SN/A
3542292SN/A    if (stores == 0) {
3552292SN/A        load_inst->sqIdx = -1;
3562292SN/A    } else {
3572292SN/A        load_inst->sqIdx = storeTail;
3582292SN/A    }
3592292SN/A
3602292SN/A    loadQueue[loadTail] = load_inst;
3612292SN/A
3622292SN/A    incrLdIdx(loadTail);
3632292SN/A
3642292SN/A    ++loads;
3652292SN/A}
3662292SN/A
3672292SN/Atemplate <class Impl>
3682292SN/Avoid
3692292SN/ALSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
3702292SN/A{
3712292SN/A    // Make sure it is not full before inserting an instruction.
3722292SN/A    assert((storeTail + 1) % SQEntries != storeHead);
3732292SN/A    assert(stores < SQEntries);
3742292SN/A
3757720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
3767720Sgblack@eecs.umich.edu            store_inst->pcState(), storeTail, store_inst->seqNum);
3772292SN/A
3782292SN/A    store_inst->sqIdx = storeTail;
3792292SN/A    store_inst->lqIdx = loadTail;
3802292SN/A
3812292SN/A    storeQueue[storeTail] = SQEntry(store_inst);
3822292SN/A
3832292SN/A    incrStIdx(storeTail);
3842292SN/A
3852292SN/A    ++stores;
3862292SN/A}
3872292SN/A
3882292SN/Atemplate <class Impl>
3892292SN/Atypename Impl::DynInstPtr
3902292SN/ALSQUnit<Impl>::getMemDepViolator()
3912292SN/A{
3922292SN/A    DynInstPtr temp = memDepViolator;
3932292SN/A
3942292SN/A    memDepViolator = NULL;
3952292SN/A
3962292SN/A    return temp;
3972292SN/A}
3982292SN/A
3992292SN/Atemplate <class Impl>
4002292SN/Aunsigned
4012292SN/ALSQUnit<Impl>::numFreeEntries()
4022292SN/A{
4032292SN/A    unsigned free_lq_entries = LQEntries - loads;
4042292SN/A    unsigned free_sq_entries = SQEntries - stores;
4052292SN/A
4062292SN/A    // Both the LQ and SQ entries have an extra dummy entry to differentiate
4072292SN/A    // empty/full conditions.  Subtract 1 from the free entries.
4082292SN/A    if (free_lq_entries < free_sq_entries) {
4092292SN/A        return free_lq_entries - 1;
4102292SN/A    } else {
4112292SN/A        return free_sq_entries - 1;
4122292SN/A    }
4132292SN/A}
4142292SN/A
4152292SN/Atemplate <class Impl>
4168545Ssaidi@eecs.umich.eduvoid
4178545Ssaidi@eecs.umich.eduLSQUnit<Impl>::checkSnoop(PacketPtr pkt)
4188545Ssaidi@eecs.umich.edu{
4198545Ssaidi@eecs.umich.edu    int load_idx = loadHead;
4208545Ssaidi@eecs.umich.edu
4218545Ssaidi@eecs.umich.edu    if (!cacheBlockMask) {
4228545Ssaidi@eecs.umich.edu        assert(dcachePort);
4238545Ssaidi@eecs.umich.edu        Addr bs = dcachePort->peerBlockSize();
4248545Ssaidi@eecs.umich.edu
4258545Ssaidi@eecs.umich.edu        // Make sure we actually got a size
4268545Ssaidi@eecs.umich.edu        assert(bs != 0);
4278545Ssaidi@eecs.umich.edu
4288545Ssaidi@eecs.umich.edu        cacheBlockMask = ~(bs - 1);
4298545Ssaidi@eecs.umich.edu    }
4308545Ssaidi@eecs.umich.edu
4319383SAli.Saidi@ARM.com    // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
4329383SAli.Saidi@ARM.com    // address. The CPU can speculatively execute a LL operation after a pending
4339383SAli.Saidi@ARM.com    // SC operation in the pipeline and that can make the cache monitor the CPU
4349383SAli.Saidi@ARM.com    // is connected to valid while it really shouldn't be.
4359383SAli.Saidi@ARM.com    for (int x = 0; x < cpu->numActiveThreads(); x++) {
4369383SAli.Saidi@ARM.com        ThreadContext *tc = cpu->getContext(x);
4379383SAli.Saidi@ARM.com        bool no_squash = cpu->thread[x]->noSquashFromTC;
4389383SAli.Saidi@ARM.com        cpu->thread[x]->noSquashFromTC = true;
4399383SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
4409383SAli.Saidi@ARM.com        cpu->thread[x]->noSquashFromTC = no_squash;
4419383SAli.Saidi@ARM.com    }
4429383SAli.Saidi@ARM.com
4438545Ssaidi@eecs.umich.edu    // If this is the only load in the LSQ we don't care
4448545Ssaidi@eecs.umich.edu    if (load_idx == loadTail)
4458545Ssaidi@eecs.umich.edu        return;
4468545Ssaidi@eecs.umich.edu    incrLdIdx(load_idx);
4478545Ssaidi@eecs.umich.edu
4488545Ssaidi@eecs.umich.edu    DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
4498545Ssaidi@eecs.umich.edu    Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
4508545Ssaidi@eecs.umich.edu    while (load_idx != loadTail) {
4518545Ssaidi@eecs.umich.edu        DynInstPtr ld_inst = loadQueue[load_idx];
4528545Ssaidi@eecs.umich.edu
4539046SAli.Saidi@ARM.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
4548545Ssaidi@eecs.umich.edu            incrLdIdx(load_idx);
4558545Ssaidi@eecs.umich.edu            continue;
4568545Ssaidi@eecs.umich.edu        }
4578545Ssaidi@eecs.umich.edu
4588545Ssaidi@eecs.umich.edu        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
4598545Ssaidi@eecs.umich.edu        DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
4608545Ssaidi@eecs.umich.edu                    ld_inst->seqNum, load_addr, invalidate_addr);
4618545Ssaidi@eecs.umich.edu
4628545Ssaidi@eecs.umich.edu        if (load_addr == invalidate_addr) {
4639046SAli.Saidi@ARM.com            if (ld_inst->possibleLoadViolation()) {
4648545Ssaidi@eecs.umich.edu                DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
4658545Ssaidi@eecs.umich.edu                        ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum);
4668545Ssaidi@eecs.umich.edu
4678545Ssaidi@eecs.umich.edu                // Mark the load for re-execution
4688545Ssaidi@eecs.umich.edu                ld_inst->fault = new ReExec;
4698545Ssaidi@eecs.umich.edu            } else {
4708545Ssaidi@eecs.umich.edu                // If a older load checks this and it's true
4718545Ssaidi@eecs.umich.edu                // then we might have missed the snoop
4728545Ssaidi@eecs.umich.edu                // in which case we need to invalidate to be sure
4739046SAli.Saidi@ARM.com                ld_inst->hitExternalSnoop(true);
4748545Ssaidi@eecs.umich.edu            }
4758545Ssaidi@eecs.umich.edu        }
4768545Ssaidi@eecs.umich.edu        incrLdIdx(load_idx);
4778545Ssaidi@eecs.umich.edu    }
4788545Ssaidi@eecs.umich.edu    return;
4798545Ssaidi@eecs.umich.edu}
4808545Ssaidi@eecs.umich.edu
4818545Ssaidi@eecs.umich.edutemplate <class Impl>
4822292SN/AFault
4838199SAli.Saidi@ARM.comLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
4848199SAli.Saidi@ARM.com{
4858199SAli.Saidi@ARM.com    Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
4868199SAli.Saidi@ARM.com    Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
4878199SAli.Saidi@ARM.com
4888199SAli.Saidi@ARM.com    /** @todo in theory you only need to check an instruction that has executed
4898199SAli.Saidi@ARM.com     * however, there isn't a good way in the pipeline at the moment to check
4908199SAli.Saidi@ARM.com     * all instructions that will execute before the store writes back. Thus,
4918199SAli.Saidi@ARM.com     * like the implementation that came before it, we're overly conservative.
4928199SAli.Saidi@ARM.com     */
4938199SAli.Saidi@ARM.com    while (load_idx != loadTail) {
4948199SAli.Saidi@ARM.com        DynInstPtr ld_inst = loadQueue[load_idx];
4959046SAli.Saidi@ARM.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
4968199SAli.Saidi@ARM.com            incrLdIdx(load_idx);
4978199SAli.Saidi@ARM.com            continue;
4988199SAli.Saidi@ARM.com        }
4998199SAli.Saidi@ARM.com
5008199SAli.Saidi@ARM.com        Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
5018199SAli.Saidi@ARM.com        Addr ld_eff_addr2 =
5028199SAli.Saidi@ARM.com            (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
5038199SAli.Saidi@ARM.com
5048272SAli.Saidi@ARM.com        if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
5058545Ssaidi@eecs.umich.edu            if (inst->isLoad()) {
5068545Ssaidi@eecs.umich.edu                // If this load is to the same block as an external snoop
5078545Ssaidi@eecs.umich.edu                // invalidate that we've observed then the load needs to be
5088545Ssaidi@eecs.umich.edu                // squashed as it could have newer data
5099046SAli.Saidi@ARM.com                if (ld_inst->hitExternalSnoop()) {
5108545Ssaidi@eecs.umich.edu                    if (!memDepViolator ||
5118545Ssaidi@eecs.umich.edu                            ld_inst->seqNum < memDepViolator->seqNum) {
5128545Ssaidi@eecs.umich.edu                        DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
5138592Sgblack@eecs.umich.edu                                "and [sn:%lli] at address %#x\n",
5148592Sgblack@eecs.umich.edu                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5158545Ssaidi@eecs.umich.edu                        memDepViolator = ld_inst;
5168199SAli.Saidi@ARM.com
5178545Ssaidi@eecs.umich.edu                        ++lsqMemOrderViolation;
5188199SAli.Saidi@ARM.com
5198591Sgblack@eecs.umich.edu                        return new GenericISA::M5PanicFault(
5208591Sgblack@eecs.umich.edu                                "Detected fault with inst [sn:%lli] and "
5218591Sgblack@eecs.umich.edu                                "[sn:%lli] at address %#x\n",
5228591Sgblack@eecs.umich.edu                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5238545Ssaidi@eecs.umich.edu                    }
5248545Ssaidi@eecs.umich.edu                }
5258199SAli.Saidi@ARM.com
5268545Ssaidi@eecs.umich.edu                // Otherwise, mark the load has a possible load violation
5278545Ssaidi@eecs.umich.edu                // and if we see a snoop before it's commited, we need to squash
5289046SAli.Saidi@ARM.com                ld_inst->possibleLoadViolation(true);
5298545Ssaidi@eecs.umich.edu                DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
5308545Ssaidi@eecs.umich.edu                        " between instructions [sn:%lli] and [sn:%lli]\n",
5318545Ssaidi@eecs.umich.edu                        inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
5328545Ssaidi@eecs.umich.edu            } else {
5338545Ssaidi@eecs.umich.edu                // A load/store incorrectly passed this store.
5348545Ssaidi@eecs.umich.edu                // Check if we already have a violator, or if it's newer
5358545Ssaidi@eecs.umich.edu                // squash and refetch.
5368545Ssaidi@eecs.umich.edu                if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
5378545Ssaidi@eecs.umich.edu                    break;
5388545Ssaidi@eecs.umich.edu
5398592Sgblack@eecs.umich.edu                DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
5408592Sgblack@eecs.umich.edu                        "[sn:%lli] at address %#x\n",
5418592Sgblack@eecs.umich.edu                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5428545Ssaidi@eecs.umich.edu                memDepViolator = ld_inst;
5438545Ssaidi@eecs.umich.edu
5448545Ssaidi@eecs.umich.edu                ++lsqMemOrderViolation;
5458545Ssaidi@eecs.umich.edu
5468591Sgblack@eecs.umich.edu                return new GenericISA::M5PanicFault("Detected fault with "
5478591Sgblack@eecs.umich.edu                        "inst [sn:%lli] and [sn:%lli] at address %#x\n",
5488591Sgblack@eecs.umich.edu                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5498545Ssaidi@eecs.umich.edu            }
5508199SAli.Saidi@ARM.com        }
5518199SAli.Saidi@ARM.com
5528199SAli.Saidi@ARM.com        incrLdIdx(load_idx);
5538199SAli.Saidi@ARM.com    }
5548199SAli.Saidi@ARM.com    return NoFault;
5558199SAli.Saidi@ARM.com}
5568199SAli.Saidi@ARM.com
5578199SAli.Saidi@ARM.com
5588199SAli.Saidi@ARM.com
5598199SAli.Saidi@ARM.com
5608199SAli.Saidi@ARM.comtemplate <class Impl>
5618199SAli.Saidi@ARM.comFault
5622292SN/ALSQUnit<Impl>::executeLoad(DynInstPtr &inst)
5632292SN/A{
5644032Sktlim@umich.edu    using namespace TheISA;
5652292SN/A    // Execute a specific load.
5662292SN/A    Fault load_fault = NoFault;
5672292SN/A
5687720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
5697944SGiacomo.Gabrielli@arm.com            inst->pcState(), inst->seqNum);
5702292SN/A
5714032Sktlim@umich.edu    assert(!inst->isSquashed());
5724032Sktlim@umich.edu
5732669Sktlim@umich.edu    load_fault = inst->initiateAcc();
5742292SN/A
5757944SGiacomo.Gabrielli@arm.com    if (inst->isTranslationDelayed() &&
5767944SGiacomo.Gabrielli@arm.com        load_fault == NoFault)
5777944SGiacomo.Gabrielli@arm.com        return load_fault;
5787944SGiacomo.Gabrielli@arm.com
5797597Sminkyu.jeong@arm.com    // If the instruction faulted or predicated false, then we need to send it
5807597Sminkyu.jeong@arm.com    // along to commit without the instruction completing.
5817597Sminkyu.jeong@arm.com    if (load_fault != NoFault || inst->readPredicate() == false) {
5822329SN/A        // Send this instruction to commit, also make sure iew stage
5832329SN/A        // realizes there is activity.
5842367SN/A        // Mark it as executed unless it is an uncached load that
5852367SN/A        // needs to hit the head of commit.
5867848SAli.Saidi@ARM.com        if (inst->readPredicate() == false)
5877848SAli.Saidi@ARM.com            inst->forwardOldRegs();
5887600Sminkyu.jeong@arm.com        DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
5897600Sminkyu.jeong@arm.com                inst->seqNum,
5907600Sminkyu.jeong@arm.com                (load_fault != NoFault ? "fault" : "predication"));
5914032Sktlim@umich.edu        if (!(inst->hasRequest() && inst->uncacheable()) ||
5923731Sktlim@umich.edu            inst->isAtCommit()) {
5932367SN/A            inst->setExecuted();
5942367SN/A        }
5952292SN/A        iewStage->instToCommit(inst);
5962292SN/A        iewStage->activityThisCycle();
5974032Sktlim@umich.edu    } else if (!loadBlocked()) {
5989046SAli.Saidi@ARM.com        assert(inst->effAddrValid());
5994032Sktlim@umich.edu        int load_idx = inst->lqIdx;
6004032Sktlim@umich.edu        incrLdIdx(load_idx);
6014032Sktlim@umich.edu
6028199SAli.Saidi@ARM.com        if (checkLoads)
6038199SAli.Saidi@ARM.com            return checkViolations(load_idx, inst);
6042292SN/A    }
6052292SN/A
6062292SN/A    return load_fault;
6072292SN/A}
6082292SN/A
6092292SN/Atemplate <class Impl>
6102292SN/AFault
6112292SN/ALSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
6122292SN/A{
6132292SN/A    using namespace TheISA;
6142292SN/A    // Make sure that a store exists.
6152292SN/A    assert(stores != 0);
6162292SN/A
6172292SN/A    int store_idx = store_inst->sqIdx;
6182292SN/A
6197720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
6207720Sgblack@eecs.umich.edu            store_inst->pcState(), store_inst->seqNum);
6212292SN/A
6224032Sktlim@umich.edu    assert(!store_inst->isSquashed());
6234032Sktlim@umich.edu
6242292SN/A    // Check the recently completed loads to see if any match this store's
6252292SN/A    // address.  If so, then we have a memory ordering violation.
6262292SN/A    int load_idx = store_inst->lqIdx;
6272292SN/A
6282292SN/A    Fault store_fault = store_inst->initiateAcc();
6292292SN/A
6307944SGiacomo.Gabrielli@arm.com    if (store_inst->isTranslationDelayed() &&
6317944SGiacomo.Gabrielli@arm.com        store_fault == NoFault)
6327944SGiacomo.Gabrielli@arm.com        return store_fault;
6337944SGiacomo.Gabrielli@arm.com
6347848SAli.Saidi@ARM.com    if (store_inst->readPredicate() == false)
6357848SAli.Saidi@ARM.com        store_inst->forwardOldRegs();
6367848SAli.Saidi@ARM.com
6372329SN/A    if (storeQueue[store_idx].size == 0) {
6387782Sminkyu.jeong@arm.com        DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
6397720Sgblack@eecs.umich.edu                store_inst->pcState(), store_inst->seqNum);
6402292SN/A
6412292SN/A        return store_fault;
6427782Sminkyu.jeong@arm.com    } else if (store_inst->readPredicate() == false) {
6437782Sminkyu.jeong@arm.com        DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
6447782Sminkyu.jeong@arm.com                store_inst->seqNum);
6457782Sminkyu.jeong@arm.com        return store_fault;
6462292SN/A    }
6472292SN/A
6482292SN/A    assert(store_fault == NoFault);
6492292SN/A
6502336SN/A    if (store_inst->isStoreConditional()) {
6512336SN/A        // Store conditionals need to set themselves as able to
6522336SN/A        // writeback if we haven't had a fault by here.
6532329SN/A        storeQueue[store_idx].canWB = true;
6542292SN/A
6552329SN/A        ++storesToWB;
6562292SN/A    }
6572292SN/A
6588199SAli.Saidi@ARM.com    return checkViolations(load_idx, store_inst);
6592292SN/A
6602292SN/A}
6612292SN/A
6622292SN/Atemplate <class Impl>
6632292SN/Avoid
6642292SN/ALSQUnit<Impl>::commitLoad()
6652292SN/A{
6662292SN/A    assert(loadQueue[loadHead]);
6672292SN/A
6687720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
6697720Sgblack@eecs.umich.edu            loadQueue[loadHead]->pcState());
6702292SN/A
6712292SN/A    loadQueue[loadHead] = NULL;
6722292SN/A
6732292SN/A    incrLdIdx(loadHead);
6742292SN/A
6752292SN/A    --loads;
6762292SN/A}
6772292SN/A
6782292SN/Atemplate <class Impl>
6792292SN/Avoid
6802292SN/ALSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
6812292SN/A{
6822292SN/A    assert(loads == 0 || loadQueue[loadHead]);
6832292SN/A
6842292SN/A    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
6852292SN/A        commitLoad();
6862292SN/A    }
6872292SN/A}
6882292SN/A
6892292SN/Atemplate <class Impl>
6902292SN/Avoid
6912292SN/ALSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
6922292SN/A{
6932292SN/A    assert(stores == 0 || storeQueue[storeHead].inst);
6942292SN/A
6952292SN/A    int store_idx = storeHead;
6962292SN/A
6972292SN/A    while (store_idx != storeTail) {
6982292SN/A        assert(storeQueue[store_idx].inst);
6992329SN/A        // Mark any stores that are now committed and have not yet
7002329SN/A        // been marked as able to write back.
7012292SN/A        if (!storeQueue[store_idx].canWB) {
7022292SN/A            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
7032292SN/A                break;
7042292SN/A            }
7052292SN/A            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
7067720Sgblack@eecs.umich.edu                    "%s [sn:%lli]\n",
7077720Sgblack@eecs.umich.edu                    storeQueue[store_idx].inst->pcState(),
7082292SN/A                    storeQueue[store_idx].inst->seqNum);
7092292SN/A
7102292SN/A            storeQueue[store_idx].canWB = true;
7112292SN/A
7122292SN/A            ++storesToWB;
7132292SN/A        }
7142292SN/A
7152292SN/A        incrStIdx(store_idx);
7162292SN/A    }
7172292SN/A}
7182292SN/A
7192292SN/Atemplate <class Impl>
7202292SN/Avoid
7216974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::writebackPendingStore()
7226974Stjones1@inf.ed.ac.uk{
7236974Stjones1@inf.ed.ac.uk    if (hasPendingPkt) {
7246974Stjones1@inf.ed.ac.uk        assert(pendingPkt != NULL);
7256974Stjones1@inf.ed.ac.uk
7266974Stjones1@inf.ed.ac.uk        // If the cache is blocked, this will store the packet for retry.
7276974Stjones1@inf.ed.ac.uk        if (sendStore(pendingPkt)) {
7286974Stjones1@inf.ed.ac.uk            storePostSend(pendingPkt);
7296974Stjones1@inf.ed.ac.uk        }
7306974Stjones1@inf.ed.ac.uk        pendingPkt = NULL;
7316974Stjones1@inf.ed.ac.uk        hasPendingPkt = false;
7326974Stjones1@inf.ed.ac.uk    }
7336974Stjones1@inf.ed.ac.uk}
7346974Stjones1@inf.ed.ac.uk
7356974Stjones1@inf.ed.ac.uktemplate <class Impl>
7366974Stjones1@inf.ed.ac.ukvoid
7372292SN/ALSQUnit<Impl>::writebackStores()
7382292SN/A{
7396974Stjones1@inf.ed.ac.uk    // First writeback the second packet from any split store that didn't
7406974Stjones1@inf.ed.ac.uk    // complete last cycle because there weren't enough cache ports available.
7416974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc) {
7426974Stjones1@inf.ed.ac.uk        writebackPendingStore();
7436974Stjones1@inf.ed.ac.uk    }
7446974Stjones1@inf.ed.ac.uk
7452292SN/A    while (storesToWB > 0 &&
7462292SN/A           storeWBIdx != storeTail &&
7472292SN/A           storeQueue[storeWBIdx].inst &&
7482292SN/A           storeQueue[storeWBIdx].canWB &&
7498727Snilay@cs.wisc.edu           ((!needsTSO) || (!storeInFlight)) &&
7502292SN/A           usedPorts < cachePorts) {
7512292SN/A
7522907Sktlim@umich.edu        if (isStoreBlocked || lsq->cacheBlocked()) {
7532678Sktlim@umich.edu            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
7542678Sktlim@umich.edu                    " is blocked!\n");
7552678Sktlim@umich.edu            break;
7562678Sktlim@umich.edu        }
7572678Sktlim@umich.edu
7582329SN/A        // Store didn't write any data so no need to write it back to
7592329SN/A        // memory.
7602292SN/A        if (storeQueue[storeWBIdx].size == 0) {
7612292SN/A            completeStore(storeWBIdx);
7622292SN/A
7632292SN/A            incrStIdx(storeWBIdx);
7642292SN/A
7652292SN/A            continue;
7662292SN/A        }
7672678Sktlim@umich.edu
7682292SN/A        ++usedPorts;
7692292SN/A
7702292SN/A        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
7712292SN/A            incrStIdx(storeWBIdx);
7722292SN/A
7732292SN/A            continue;
7742292SN/A        }
7752292SN/A
7762292SN/A        assert(storeQueue[storeWBIdx].req);
7772292SN/A        assert(!storeQueue[storeWBIdx].committed);
7782292SN/A
7796974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
7806974Stjones1@inf.ed.ac.uk            assert(storeQueue[storeWBIdx].sreqLow);
7816974Stjones1@inf.ed.ac.uk            assert(storeQueue[storeWBIdx].sreqHigh);
7826974Stjones1@inf.ed.ac.uk        }
7836974Stjones1@inf.ed.ac.uk
7842669Sktlim@umich.edu        DynInstPtr inst = storeQueue[storeWBIdx].inst;
7852669Sktlim@umich.edu
7862669Sktlim@umich.edu        Request *req = storeQueue[storeWBIdx].req;
7878481Sgblack@eecs.umich.edu        RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
7888481Sgblack@eecs.umich.edu        RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
7898481Sgblack@eecs.umich.edu
7902292SN/A        storeQueue[storeWBIdx].committed = true;
7912292SN/A
7922669Sktlim@umich.edu        assert(!inst->memData);
7932669Sktlim@umich.edu        inst->memData = new uint8_t[64];
7943772Sgblack@eecs.umich.edu
7954326Sgblack@eecs.umich.edu        memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
7962669Sktlim@umich.edu
7974878Sstever@eecs.umich.edu        MemCmd command =
7984878Sstever@eecs.umich.edu            req->isSwap() ? MemCmd::SwapReq :
7996102Sgblack@eecs.umich.edu            (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
8006974Stjones1@inf.ed.ac.uk        PacketPtr data_pkt;
8016974Stjones1@inf.ed.ac.uk        PacketPtr snd_data_pkt = NULL;
8022292SN/A
8032678Sktlim@umich.edu        LSQSenderState *state = new LSQSenderState;
8042678Sktlim@umich.edu        state->isLoad = false;
8052678Sktlim@umich.edu        state->idx = storeWBIdx;
8062678Sktlim@umich.edu        state->inst = inst;
8076974Stjones1@inf.ed.ac.uk
8086974Stjones1@inf.ed.ac.uk        if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
8096974Stjones1@inf.ed.ac.uk
8106974Stjones1@inf.ed.ac.uk            // Build a single data packet if the store isn't split.
8118949Sandreas.hansson@arm.com            data_pkt = new Packet(req, command);
8126974Stjones1@inf.ed.ac.uk            data_pkt->dataStatic(inst->memData);
8136974Stjones1@inf.ed.ac.uk            data_pkt->senderState = state;
8146974Stjones1@inf.ed.ac.uk        } else {
8156974Stjones1@inf.ed.ac.uk            // Create two packets if the store is split in two.
8168949Sandreas.hansson@arm.com            data_pkt = new Packet(sreqLow, command);
8178949Sandreas.hansson@arm.com            snd_data_pkt = new Packet(sreqHigh, command);
8186974Stjones1@inf.ed.ac.uk
8196974Stjones1@inf.ed.ac.uk            data_pkt->dataStatic(inst->memData);
8206974Stjones1@inf.ed.ac.uk            snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
8216974Stjones1@inf.ed.ac.uk
8226974Stjones1@inf.ed.ac.uk            data_pkt->senderState = state;
8236974Stjones1@inf.ed.ac.uk            snd_data_pkt->senderState = state;
8246974Stjones1@inf.ed.ac.uk
8256974Stjones1@inf.ed.ac.uk            state->isSplit = true;
8266974Stjones1@inf.ed.ac.uk            state->outstanding = 2;
8276974Stjones1@inf.ed.ac.uk
8286974Stjones1@inf.ed.ac.uk            // Can delete the main request now.
8296974Stjones1@inf.ed.ac.uk            delete req;
8306974Stjones1@inf.ed.ac.uk            req = sreqLow;
8316974Stjones1@inf.ed.ac.uk        }
8322678Sktlim@umich.edu
8337720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
8342292SN/A                "to Addr:%#x, data:%#x [sn:%lli]\n",
8357720Sgblack@eecs.umich.edu                storeWBIdx, inst->pcState(),
8363797Sgblack@eecs.umich.edu                req->getPaddr(), (int)*(inst->memData),
8373221Sktlim@umich.edu                inst->seqNum);
8382292SN/A
8392693Sktlim@umich.edu        // @todo: Remove this SC hack once the memory system handles it.
8404350Sgblack@eecs.umich.edu        if (inst->isStoreConditional()) {
8416974Stjones1@inf.ed.ac.uk            assert(!storeQueue[storeWBIdx].isSplit);
8423326Sktlim@umich.edu            // Disable recording the result temporarily.  Writing to
8433326Sktlim@umich.edu            // misc regs normally updates the result, but this is not
8443326Sktlim@umich.edu            // the desired behavior when handling store conditionals.
8459046SAli.Saidi@ARM.com            inst->recordResult(false);
8463326Sktlim@umich.edu            bool success = TheISA::handleLockedWrite(inst.get(), req);
8479046SAli.Saidi@ARM.com            inst->recordResult(true);
8483326Sktlim@umich.edu
8493326Sktlim@umich.edu            if (!success) {
8503326Sktlim@umich.edu                // Instantly complete this store.
8513326Sktlim@umich.edu                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
8523326Sktlim@umich.edu                        "Instantly completing it.\n",
8533326Sktlim@umich.edu                        inst->seqNum);
8543326Sktlim@umich.edu                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
8557823Ssteve.reinhardt@amd.com                cpu->schedule(wb, curTick() + 1);
8568887Sgeoffrey.blake@arm.com                if (cpu->checker) {
8578887Sgeoffrey.blake@arm.com                    // Make sure to set the LLSC data for verification
8588887Sgeoffrey.blake@arm.com                    // if checker is loaded
8598887Sgeoffrey.blake@arm.com                    inst->reqToVerify->setExtraData(0);
8608887Sgeoffrey.blake@arm.com                    inst->completeAcc(data_pkt);
8618887Sgeoffrey.blake@arm.com                }
8623326Sktlim@umich.edu                completeStore(storeWBIdx);
8633326Sktlim@umich.edu                incrStIdx(storeWBIdx);
8643326Sktlim@umich.edu                continue;
8652693Sktlim@umich.edu            }
8662693Sktlim@umich.edu        } else {
8672693Sktlim@umich.edu            // Non-store conditionals do not need a writeback.
8682693Sktlim@umich.edu            state->noWB = true;
8692693Sktlim@umich.edu        }
8702693Sktlim@umich.edu
8718481Sgblack@eecs.umich.edu        bool split =
8728481Sgblack@eecs.umich.edu            TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
8738481Sgblack@eecs.umich.edu
8748481Sgblack@eecs.umich.edu        ThreadContext *thread = cpu->tcBase(lsqID);
8758481Sgblack@eecs.umich.edu
8768481Sgblack@eecs.umich.edu        if (req->isMmappedIpr()) {
8778481Sgblack@eecs.umich.edu            assert(!inst->isStoreConditional());
8788481Sgblack@eecs.umich.edu            TheISA::handleIprWrite(thread, data_pkt);
8798481Sgblack@eecs.umich.edu            delete data_pkt;
8808481Sgblack@eecs.umich.edu            if (split) {
8818481Sgblack@eecs.umich.edu                assert(snd_data_pkt->req->isMmappedIpr());
8828481Sgblack@eecs.umich.edu                TheISA::handleIprWrite(thread, snd_data_pkt);
8838481Sgblack@eecs.umich.edu                delete snd_data_pkt;
8848481Sgblack@eecs.umich.edu                delete sreqLow;
8858481Sgblack@eecs.umich.edu                delete sreqHigh;
8868481Sgblack@eecs.umich.edu            }
8878481Sgblack@eecs.umich.edu            delete state;
8888481Sgblack@eecs.umich.edu            delete req;
8898481Sgblack@eecs.umich.edu            completeStore(storeWBIdx);
8908481Sgblack@eecs.umich.edu            incrStIdx(storeWBIdx);
8918481Sgblack@eecs.umich.edu        } else if (!sendStore(data_pkt)) {
8924032Sktlim@umich.edu            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
8933221Sktlim@umich.edu                    "retry later\n",
8943221Sktlim@umich.edu                    inst->seqNum);
8956974Stjones1@inf.ed.ac.uk
8966974Stjones1@inf.ed.ac.uk            // Need to store the second packet, if split.
8978481Sgblack@eecs.umich.edu            if (split) {
8986974Stjones1@inf.ed.ac.uk                state->pktToSend = true;
8996974Stjones1@inf.ed.ac.uk                state->pendingPacket = snd_data_pkt;
9006974Stjones1@inf.ed.ac.uk            }
9012669Sktlim@umich.edu        } else {
9026974Stjones1@inf.ed.ac.uk
9036974Stjones1@inf.ed.ac.uk            // If split, try to send the second packet too
9048481Sgblack@eecs.umich.edu            if (split) {
9056974Stjones1@inf.ed.ac.uk                assert(snd_data_pkt);
9066974Stjones1@inf.ed.ac.uk
9076974Stjones1@inf.ed.ac.uk                // Ensure there are enough ports to use.
9086974Stjones1@inf.ed.ac.uk                if (usedPorts < cachePorts) {
9096974Stjones1@inf.ed.ac.uk                    ++usedPorts;
9106974Stjones1@inf.ed.ac.uk                    if (sendStore(snd_data_pkt)) {
9116974Stjones1@inf.ed.ac.uk                        storePostSend(snd_data_pkt);
9126974Stjones1@inf.ed.ac.uk                    } else {
9136974Stjones1@inf.ed.ac.uk                        DPRINTF(IEW, "D-Cache became blocked when writing"
9146974Stjones1@inf.ed.ac.uk                                " [sn:%lli] second packet, will retry later\n",
9156974Stjones1@inf.ed.ac.uk                                inst->seqNum);
9166974Stjones1@inf.ed.ac.uk                    }
9176974Stjones1@inf.ed.ac.uk                } else {
9186974Stjones1@inf.ed.ac.uk
9196974Stjones1@inf.ed.ac.uk                    // Store the packet for when there's free ports.
9206974Stjones1@inf.ed.ac.uk                    assert(pendingPkt == NULL);
9216974Stjones1@inf.ed.ac.uk                    pendingPkt = snd_data_pkt;
9226974Stjones1@inf.ed.ac.uk                    hasPendingPkt = true;
9236974Stjones1@inf.ed.ac.uk                }
9246974Stjones1@inf.ed.ac.uk            } else {
9256974Stjones1@inf.ed.ac.uk
9266974Stjones1@inf.ed.ac.uk                // Not a split store.
9276974Stjones1@inf.ed.ac.uk                storePostSend(data_pkt);
9286974Stjones1@inf.ed.ac.uk            }
9292292SN/A        }
9302292SN/A    }
9312292SN/A
9322292SN/A    // Not sure this should set it to 0.
9332292SN/A    usedPorts = 0;
9342292SN/A
9352292SN/A    assert(stores >= 0 && storesToWB >= 0);
9362292SN/A}
9372292SN/A
9382292SN/A/*template <class Impl>
9392292SN/Avoid
9402292SN/ALSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
9412292SN/A{
9422292SN/A    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
9432292SN/A                                              mshrSeqNums.end(),
9442292SN/A                                              seqNum);
9452292SN/A
9462292SN/A    if (mshr_it != mshrSeqNums.end()) {
9472292SN/A        mshrSeqNums.erase(mshr_it);
9482292SN/A        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
9492292SN/A    }
9502292SN/A}*/
9512292SN/A
9522292SN/Atemplate <class Impl>
9532292SN/Avoid
9542292SN/ALSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
9552292SN/A{
9562292SN/A    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
9572329SN/A            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
9582292SN/A
9592292SN/A    int load_idx = loadTail;
9602292SN/A    decrLdIdx(load_idx);
9612292SN/A
9622292SN/A    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
9637720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
9642292SN/A                "[sn:%lli]\n",
9657720Sgblack@eecs.umich.edu                loadQueue[load_idx]->pcState(),
9662292SN/A                loadQueue[load_idx]->seqNum);
9672292SN/A
9682292SN/A        if (isStalled() && load_idx == stallingLoadIdx) {
9692292SN/A            stalled = false;
9702292SN/A            stallingStoreIsn = 0;
9712292SN/A            stallingLoadIdx = 0;
9722292SN/A        }
9732292SN/A
9742329SN/A        // Clear the smart pointer to make sure it is decremented.
9752731Sktlim@umich.edu        loadQueue[load_idx]->setSquashed();
9762292SN/A        loadQueue[load_idx] = NULL;
9772292SN/A        --loads;
9782292SN/A
9792292SN/A        // Inefficient!
9802292SN/A        loadTail = load_idx;
9812292SN/A
9822292SN/A        decrLdIdx(load_idx);
9832727Sktlim@umich.edu        ++lsqSquashedLoads;
9842292SN/A    }
9852292SN/A
9862292SN/A    if (isLoadBlocked) {
9872292SN/A        if (squashed_num < blockedLoadSeqNum) {
9882292SN/A            isLoadBlocked = false;
9892292SN/A            loadBlockedHandled = false;
9902292SN/A            blockedLoadSeqNum = 0;
9912292SN/A        }
9922292SN/A    }
9932292SN/A
9944032Sktlim@umich.edu    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
9954032Sktlim@umich.edu        memDepViolator = NULL;
9964032Sktlim@umich.edu    }
9974032Sktlim@umich.edu
9982292SN/A    int store_idx = storeTail;
9992292SN/A    decrStIdx(store_idx);
10002292SN/A
10012292SN/A    while (stores != 0 &&
10022292SN/A           storeQueue[store_idx].inst->seqNum > squashed_num) {
10032329SN/A        // Instructions marked as can WB are already committed.
10042292SN/A        if (storeQueue[store_idx].canWB) {
10052292SN/A            break;
10062292SN/A        }
10072292SN/A
10087720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
10092292SN/A                "idx:%i [sn:%lli]\n",
10107720Sgblack@eecs.umich.edu                storeQueue[store_idx].inst->pcState(),
10112292SN/A                store_idx, storeQueue[store_idx].inst->seqNum);
10122292SN/A
10132329SN/A        // I don't think this can happen.  It should have been cleared
10142329SN/A        // by the stalling load.
10152292SN/A        if (isStalled() &&
10162292SN/A            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
10172292SN/A            panic("Is stalled should have been cleared by stalling load!\n");
10182292SN/A            stalled = false;
10192292SN/A            stallingStoreIsn = 0;
10202292SN/A        }
10212292SN/A
10222329SN/A        // Clear the smart pointer to make sure it is decremented.
10232731Sktlim@umich.edu        storeQueue[store_idx].inst->setSquashed();
10242292SN/A        storeQueue[store_idx].inst = NULL;
10252292SN/A        storeQueue[store_idx].canWB = 0;
10262292SN/A
10274032Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
10284032Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
10294032Sktlim@umich.edu        // place to really handle request deletes.
10304032Sktlim@umich.edu        delete storeQueue[store_idx].req;
10316974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
10326974Stjones1@inf.ed.ac.uk            delete storeQueue[store_idx].sreqLow;
10336974Stjones1@inf.ed.ac.uk            delete storeQueue[store_idx].sreqHigh;
10346974Stjones1@inf.ed.ac.uk
10356974Stjones1@inf.ed.ac.uk            storeQueue[store_idx].sreqLow = NULL;
10366974Stjones1@inf.ed.ac.uk            storeQueue[store_idx].sreqHigh = NULL;
10376974Stjones1@inf.ed.ac.uk        }
10384032Sktlim@umich.edu
10392292SN/A        storeQueue[store_idx].req = NULL;
10402292SN/A        --stores;
10412292SN/A
10422292SN/A        // Inefficient!
10432292SN/A        storeTail = store_idx;
10442292SN/A
10452292SN/A        decrStIdx(store_idx);
10462727Sktlim@umich.edu        ++lsqSquashedStores;
10472292SN/A    }
10482292SN/A}
10492292SN/A
10502292SN/Atemplate <class Impl>
10512292SN/Avoid
10523349Sbinkertn@umich.eduLSQUnit<Impl>::storePostSend(PacketPtr pkt)
10532693Sktlim@umich.edu{
10542693Sktlim@umich.edu    if (isStalled() &&
10552693Sktlim@umich.edu        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
10562693Sktlim@umich.edu        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
10572693Sktlim@umich.edu                "load idx:%i\n",
10582693Sktlim@umich.edu                stallingStoreIsn, stallingLoadIdx);
10592693Sktlim@umich.edu        stalled = false;
10602693Sktlim@umich.edu        stallingStoreIsn = 0;
10612693Sktlim@umich.edu        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
10622693Sktlim@umich.edu    }
10632693Sktlim@umich.edu
10642693Sktlim@umich.edu    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
10652693Sktlim@umich.edu        // The store is basically completed at this time. This
10662693Sktlim@umich.edu        // only works so long as the checker doesn't try to
10672693Sktlim@umich.edu        // verify the value in memory for stores.
10682693Sktlim@umich.edu        storeQueue[storeWBIdx].inst->setCompleted();
10698887Sgeoffrey.blake@arm.com
10702693Sktlim@umich.edu        if (cpu->checker) {
10712732Sktlim@umich.edu            cpu->checker->verify(storeQueue[storeWBIdx].inst);
10722693Sktlim@umich.edu        }
10732693Sktlim@umich.edu    }
10742693Sktlim@umich.edu
10758727Snilay@cs.wisc.edu    if (needsTSO) {
10768727Snilay@cs.wisc.edu        storeInFlight = true;
10778727Snilay@cs.wisc.edu    }
10788727Snilay@cs.wisc.edu
10792693Sktlim@umich.edu    incrStIdx(storeWBIdx);
10802693Sktlim@umich.edu}
10812693Sktlim@umich.edu
10822693Sktlim@umich.edutemplate <class Impl>
10832693Sktlim@umich.eduvoid
10842678Sktlim@umich.eduLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
10852678Sktlim@umich.edu{
10862678Sktlim@umich.edu    iewStage->wakeCPU();
10872678Sktlim@umich.edu
10882678Sktlim@umich.edu    // Squashed instructions do not need to complete their access.
10892678Sktlim@umich.edu    if (inst->isSquashed()) {
10902927Sktlim@umich.edu        iewStage->decrWb(inst->seqNum);
10912678Sktlim@umich.edu        assert(!inst->isStore());
10922727Sktlim@umich.edu        ++lsqIgnoredResponses;
10932678Sktlim@umich.edu        return;
10942678Sktlim@umich.edu    }
10952678Sktlim@umich.edu
10962678Sktlim@umich.edu    if (!inst->isExecuted()) {
10972678Sktlim@umich.edu        inst->setExecuted();
10982678Sktlim@umich.edu
10992678Sktlim@umich.edu        // Complete access to copy data to proper place.
11002678Sktlim@umich.edu        inst->completeAcc(pkt);
11012678Sktlim@umich.edu    }
11022678Sktlim@umich.edu
11032678Sktlim@umich.edu    // Need to insert instruction into queue to commit
11042678Sktlim@umich.edu    iewStage->instToCommit(inst);
11052678Sktlim@umich.edu
11062678Sktlim@umich.edu    iewStage->activityThisCycle();
11077598Sminkyu.jeong@arm.com
11087598Sminkyu.jeong@arm.com    // see if this load changed the PC
11097598Sminkyu.jeong@arm.com    iewStage->checkMisprediction(inst);
11102678Sktlim@umich.edu}
11112678Sktlim@umich.edu
11122678Sktlim@umich.edutemplate <class Impl>
11132678Sktlim@umich.eduvoid
11142292SN/ALSQUnit<Impl>::completeStore(int store_idx)
11152292SN/A{
11162292SN/A    assert(storeQueue[store_idx].inst);
11172292SN/A    storeQueue[store_idx].completed = true;
11182292SN/A    --storesToWB;
11192292SN/A    // A bit conservative because a store completion may not free up entries,
11202292SN/A    // but hopefully avoids two store completions in one cycle from making
11212292SN/A    // the CPU tick twice.
11223126Sktlim@umich.edu    cpu->wakeCPU();
11232292SN/A    cpu->activityThisCycle();
11242292SN/A
11252292SN/A    if (store_idx == storeHead) {
11262292SN/A        do {
11272292SN/A            incrStIdx(storeHead);
11282292SN/A
11292292SN/A            --stores;
11302292SN/A        } while (storeQueue[storeHead].completed &&
11312292SN/A                 storeHead != storeTail);
11322292SN/A
11332292SN/A        iewStage->updateLSQNextCycle = true;
11342292SN/A    }
11352292SN/A
11362329SN/A    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
11372329SN/A            "idx:%i\n",
11382329SN/A            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
11392292SN/A
11402292SN/A    if (isStalled() &&
11412292SN/A        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
11422292SN/A        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
11432292SN/A                "load idx:%i\n",
11442292SN/A                stallingStoreIsn, stallingLoadIdx);
11452292SN/A        stalled = false;
11462292SN/A        stallingStoreIsn = 0;
11472292SN/A        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
11482292SN/A    }
11492316SN/A
11502316SN/A    storeQueue[store_idx].inst->setCompleted();
11512329SN/A
11528727Snilay@cs.wisc.edu    if (needsTSO) {
11538727Snilay@cs.wisc.edu        storeInFlight = false;
11548727Snilay@cs.wisc.edu    }
11558727Snilay@cs.wisc.edu
11562329SN/A    // Tell the checker we've completed this instruction.  Some stores
11572329SN/A    // may get reported twice to the checker, but the checker can
11582329SN/A    // handle that case.
11592316SN/A    if (cpu->checker) {
11602732Sktlim@umich.edu        cpu->checker->verify(storeQueue[store_idx].inst);
11612316SN/A    }
11622292SN/A}
11632292SN/A
11642292SN/Atemplate <class Impl>
11656974Stjones1@inf.ed.ac.ukbool
11666974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::sendStore(PacketPtr data_pkt)
11676974Stjones1@inf.ed.ac.uk{
11688975Sandreas.hansson@arm.com    if (!dcachePort->sendTimingReq(data_pkt)) {
11696974Stjones1@inf.ed.ac.uk        // Need to handle becoming blocked on a store.
11706974Stjones1@inf.ed.ac.uk        isStoreBlocked = true;
11716974Stjones1@inf.ed.ac.uk        ++lsqCacheBlocked;
11726974Stjones1@inf.ed.ac.uk        assert(retryPkt == NULL);
11736974Stjones1@inf.ed.ac.uk        retryPkt = data_pkt;
11746974Stjones1@inf.ed.ac.uk        lsq->setRetryTid(lsqID);
11756974Stjones1@inf.ed.ac.uk        return false;
11766974Stjones1@inf.ed.ac.uk    }
11776974Stjones1@inf.ed.ac.uk    return true;
11786974Stjones1@inf.ed.ac.uk}
11796974Stjones1@inf.ed.ac.uk
11806974Stjones1@inf.ed.ac.uktemplate <class Impl>
11812693Sktlim@umich.eduvoid
11822693Sktlim@umich.eduLSQUnit<Impl>::recvRetry()
11832693Sktlim@umich.edu{
11842698Sktlim@umich.edu    if (isStoreBlocked) {
11854985Sktlim@umich.edu        DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
11862698Sktlim@umich.edu        assert(retryPkt != NULL);
11872693Sktlim@umich.edu
11888587Snilay@cs.wisc.edu        LSQSenderState *state =
11898587Snilay@cs.wisc.edu            dynamic_cast<LSQSenderState *>(retryPkt->senderState);
11908587Snilay@cs.wisc.edu
11918975Sandreas.hansson@arm.com        if (dcachePort->sendTimingReq(retryPkt)) {
11926974Stjones1@inf.ed.ac.uk            // Don't finish the store unless this is the last packet.
11938133SAli.Saidi@ARM.com            if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
11948133SAli.Saidi@ARM.com                    state->pendingPacket == retryPkt) {
11958133SAli.Saidi@ARM.com                state->pktToSend = false;
11966974Stjones1@inf.ed.ac.uk                storePostSend(retryPkt);
11976974Stjones1@inf.ed.ac.uk            }
11982699Sktlim@umich.edu            retryPkt = NULL;
11992693Sktlim@umich.edu            isStoreBlocked = false;
12006221Snate@binkert.org            lsq->setRetryTid(InvalidThreadID);
12016974Stjones1@inf.ed.ac.uk
12026974Stjones1@inf.ed.ac.uk            // Send any outstanding packet.
12036974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
12046974Stjones1@inf.ed.ac.uk                assert(state->pendingPacket);
12056974Stjones1@inf.ed.ac.uk                if (sendStore(state->pendingPacket)) {
12066974Stjones1@inf.ed.ac.uk                    storePostSend(state->pendingPacket);
12076974Stjones1@inf.ed.ac.uk                }
12086974Stjones1@inf.ed.ac.uk            }
12092693Sktlim@umich.edu        } else {
12102693Sktlim@umich.edu            // Still blocked!
12112727Sktlim@umich.edu            ++lsqCacheBlocked;
12122907Sktlim@umich.edu            lsq->setRetryTid(lsqID);
12132693Sktlim@umich.edu        }
12142693Sktlim@umich.edu    } else if (isLoadBlocked) {
12152693Sktlim@umich.edu        DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
12162693Sktlim@umich.edu                "no need to resend packet.\n");
12172693Sktlim@umich.edu    } else {
12182693Sktlim@umich.edu        DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
12192693Sktlim@umich.edu    }
12202693Sktlim@umich.edu}
12212693Sktlim@umich.edu
12222693Sktlim@umich.edutemplate <class Impl>
12232292SN/Ainline void
12249440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrStIdx(int &store_idx) const
12252292SN/A{
12262292SN/A    if (++store_idx >= SQEntries)
12272292SN/A        store_idx = 0;
12282292SN/A}
12292292SN/A
12302292SN/Atemplate <class Impl>
12312292SN/Ainline void
12329440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrStIdx(int &store_idx) const
12332292SN/A{
12342292SN/A    if (--store_idx < 0)
12352292SN/A        store_idx += SQEntries;
12362292SN/A}
12372292SN/A
12382292SN/Atemplate <class Impl>
12392292SN/Ainline void
12409440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrLdIdx(int &load_idx) const
12412292SN/A{
12422292SN/A    if (++load_idx >= LQEntries)
12432292SN/A        load_idx = 0;
12442292SN/A}
12452292SN/A
12462292SN/Atemplate <class Impl>
12472292SN/Ainline void
12489440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrLdIdx(int &load_idx) const
12492292SN/A{
12502292SN/A    if (--load_idx < 0)
12512292SN/A        load_idx += LQEntries;
12522292SN/A}
12532329SN/A
12542329SN/Atemplate <class Impl>
12552329SN/Avoid
12569440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::dumpInsts() const
12572329SN/A{
12582329SN/A    cprintf("Load store queue: Dumping instructions.\n");
12592329SN/A    cprintf("Load queue size: %i\n", loads);
12602329SN/A    cprintf("Load queue: ");
12612329SN/A
12622329SN/A    int load_idx = loadHead;
12632329SN/A
12642329SN/A    while (load_idx != loadTail && loadQueue[load_idx]) {
12659440SAndreas.Sandberg@ARM.com        const DynInstPtr &inst(loadQueue[load_idx]);
12669440SAndreas.Sandberg@ARM.com        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
12672329SN/A
12682329SN/A        incrLdIdx(load_idx);
12692329SN/A    }
12709440SAndreas.Sandberg@ARM.com    cprintf("\n");
12712329SN/A
12722329SN/A    cprintf("Store queue size: %i\n", stores);
12732329SN/A    cprintf("Store queue: ");
12742329SN/A
12752329SN/A    int store_idx = storeHead;
12762329SN/A
12772329SN/A    while (store_idx != storeTail && storeQueue[store_idx].inst) {
12789440SAndreas.Sandberg@ARM.com        const DynInstPtr &inst(storeQueue[store_idx].inst);
12799440SAndreas.Sandberg@ARM.com        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
12802329SN/A
12812329SN/A        incrStIdx(store_idx);
12822329SN/A    }
12832329SN/A
12842329SN/A    cprintf("\n");
12852329SN/A}
1286