lsq_unit_impl.hh revision 10239
19814Sandreas.hansson@arm.com
22292SN/A/*
310030SAli.Saidi@ARM.com * Copyright (c) 2010-2013 ARM Limited
410239Sbinhpham@cs.rutgers.edu * Copyright (c) 2013 Advanced Micro Devices, Inc.
57597Sminkyu.jeong@arm.com * All rights reserved
67597Sminkyu.jeong@arm.com *
77597Sminkyu.jeong@arm.com * The license below extends only to copyright in the software and shall
87597Sminkyu.jeong@arm.com * not be construed as granting a license to any other intellectual
97597Sminkyu.jeong@arm.com * property including but not limited to intellectual property relating
107597Sminkyu.jeong@arm.com * to a hardware implementation of the functionality of the software
117597Sminkyu.jeong@arm.com * licensed hereunder.  You may use the software subject to the license
127597Sminkyu.jeong@arm.com * terms below provided that you ensure that this notice is replicated
137597Sminkyu.jeong@arm.com * unmodified and in its entirety in all distributions of the software,
147597Sminkyu.jeong@arm.com * modified or unmodified, in source code or in binary form.
157597Sminkyu.jeong@arm.com *
162292SN/A * Copyright (c) 2004-2005 The Regents of The University of Michigan
172292SN/A * All rights reserved.
182292SN/A *
192292SN/A * Redistribution and use in source and binary forms, with or without
202292SN/A * modification, are permitted provided that the following conditions are
212292SN/A * met: redistributions of source code must retain the above copyright
222292SN/A * notice, this list of conditions and the following disclaimer;
232292SN/A * redistributions in binary form must reproduce the above copyright
242292SN/A * notice, this list of conditions and the following disclaimer in the
252292SN/A * documentation and/or other materials provided with the distribution;
262292SN/A * neither the name of the copyright holders nor the names of its
272292SN/A * contributors may be used to endorse or promote products derived from
282292SN/A * this software without specific prior written permission.
292292SN/A *
302292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
312292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
322292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
332292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
342292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
352292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
362292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
372292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
382292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
392292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
402292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
412689Sktlim@umich.edu *
422689Sktlim@umich.edu * Authors: Kevin Lim
432689Sktlim@umich.edu *          Korey Sewell
442292SN/A */
452292SN/A
469944Smatt.horsnell@ARM.com#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
479944Smatt.horsnell@ARM.com#define __CPU_O3_LSQ_UNIT_IMPL_HH__
489944Smatt.horsnell@ARM.com
498591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh"
503326Sktlim@umich.edu#include "arch/locked_mem.hh"
518229Snate@binkert.org#include "base/str.hh"
526658Snate@binkert.org#include "config/the_isa.hh"
538887Sgeoffrey.blake@arm.com#include "cpu/checker/cpu.hh"
542907Sktlim@umich.edu#include "cpu/o3/lsq.hh"
552292SN/A#include "cpu/o3/lsq_unit.hh"
568232Snate@binkert.org#include "debug/Activity.hh"
578232Snate@binkert.org#include "debug/IEW.hh"
588232Snate@binkert.org#include "debug/LSQUnit.hh"
599527SMatt.Horsnell@arm.com#include "debug/O3PipeView.hh"
602722Sktlim@umich.edu#include "mem/packet.hh"
612669Sktlim@umich.edu#include "mem/request.hh"
622292SN/A
632669Sktlim@umich.edutemplate<class Impl>
642678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
652678Sktlim@umich.edu                                              LSQUnit *lsq_ptr)
668581Ssteve.reinhardt@amd.com    : Event(Default_Pri, AutoDelete),
678581Ssteve.reinhardt@amd.com      inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
682292SN/A{
692292SN/A}
702292SN/A
712669Sktlim@umich.edutemplate<class Impl>
722292SN/Avoid
732678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::process()
742292SN/A{
759444SAndreas.Sandberg@ARM.com    assert(!lsqPtr->cpu->switchedOut());
769444SAndreas.Sandberg@ARM.com
779444SAndreas.Sandberg@ARM.com    lsqPtr->writeback(inst, pkt);
784319Sktlim@umich.edu
794319Sktlim@umich.edu    if (pkt->senderState)
804319Sktlim@umich.edu        delete pkt->senderState;
814319Sktlim@umich.edu
824319Sktlim@umich.edu    delete pkt->req;
832678Sktlim@umich.edu    delete pkt;
842678Sktlim@umich.edu}
852292SN/A
862678Sktlim@umich.edutemplate<class Impl>
872678Sktlim@umich.educonst char *
885336Shines@cs.fsu.eduLSQUnit<Impl>::WritebackEvent::description() const
892678Sktlim@umich.edu{
904873Sstever@eecs.umich.edu    return "Store writeback";
912678Sktlim@umich.edu}
922292SN/A
932678Sktlim@umich.edutemplate<class Impl>
942678Sktlim@umich.eduvoid
952678Sktlim@umich.eduLSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
962678Sktlim@umich.edu{
972678Sktlim@umich.edu    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
982678Sktlim@umich.edu    DynInstPtr inst = state->inst;
997852SMatt.Horsnell@arm.com    DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
1007852SMatt.Horsnell@arm.com    DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
1012344SN/A
1022678Sktlim@umich.edu    //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
1032678Sktlim@umich.edu
1046974Stjones1@inf.ed.ac.uk    // If this is a split access, wait until all packets are received.
1056974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && !state->complete()) {
1066974Stjones1@inf.ed.ac.uk        delete pkt->req;
1076974Stjones1@inf.ed.ac.uk        delete pkt;
1086974Stjones1@inf.ed.ac.uk        return;
1096974Stjones1@inf.ed.ac.uk    }
1106974Stjones1@inf.ed.ac.uk
1119444SAndreas.Sandberg@ARM.com    assert(!cpu->switchedOut());
1129444SAndreas.Sandberg@ARM.com    if (inst->isSquashed()) {
1132820Sktlim@umich.edu        iewStage->decrWb(inst->seqNum);
1142678Sktlim@umich.edu    } else {
1152678Sktlim@umich.edu        if (!state->noWB) {
1166974Stjones1@inf.ed.ac.uk            if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
1176974Stjones1@inf.ed.ac.uk                !state->isLoad) {
1186974Stjones1@inf.ed.ac.uk                writeback(inst, pkt);
1196974Stjones1@inf.ed.ac.uk            } else {
1206974Stjones1@inf.ed.ac.uk                writeback(inst, state->mainPkt);
1216974Stjones1@inf.ed.ac.uk            }
1222678Sktlim@umich.edu        }
1232678Sktlim@umich.edu
1242678Sktlim@umich.edu        if (inst->isStore()) {
1252678Sktlim@umich.edu            completeStore(state->idx);
1262678Sktlim@umich.edu        }
1272344SN/A    }
1282307SN/A
1296974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
1306974Stjones1@inf.ed.ac.uk        delete state->mainPkt->req;
1316974Stjones1@inf.ed.ac.uk        delete state->mainPkt;
1326974Stjones1@inf.ed.ac.uk    }
13310020Smatt.horsnell@ARM.com
13410020Smatt.horsnell@ARM.com    pkt->req->setAccessLatency();
13510023Smatt.horsnell@ARM.com    cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
13610023Smatt.horsnell@ARM.com
1372678Sktlim@umich.edu    delete state;
1384032Sktlim@umich.edu    delete pkt->req;
1392678Sktlim@umich.edu    delete pkt;
1402292SN/A}
1412292SN/A
1422292SN/Atemplate <class Impl>
1432292SN/ALSQUnit<Impl>::LSQUnit()
1448545Ssaidi@eecs.umich.edu    : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
1452678Sktlim@umich.edu      isStoreBlocked(false), isLoadBlocked(false),
1468727Snilay@cs.wisc.edu      loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
1472292SN/A{
1482292SN/A}
1492292SN/A
1502292SN/Atemplate<class Impl>
1512292SN/Avoid
1525529Snate@binkert.orgLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
1535529Snate@binkert.org        LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
1545529Snate@binkert.org        unsigned id)
1552292SN/A{
1564329Sktlim@umich.edu    cpu = cpu_ptr;
1574329Sktlim@umich.edu    iewStage = iew_ptr;
1584329Sktlim@umich.edu
1592907Sktlim@umich.edu    lsq = lsq_ptr;
1602907Sktlim@umich.edu
1612292SN/A    lsqID = id;
1622292SN/A
16310175SMitch.Hayenga@ARM.com    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
16410175SMitch.Hayenga@ARM.com
1652329SN/A    // Add 1 for the sentinel entry (they are circular queues).
1662329SN/A    LQEntries = maxLQEntries + 1;
1672329SN/A    SQEntries = maxSQEntries + 1;
1682292SN/A
1699936SFaissal.Sleiman@arm.com    //Due to uint8_t index in LSQSenderState
1709936SFaissal.Sleiman@arm.com    assert(LQEntries <= 256);
1719936SFaissal.Sleiman@arm.com    assert(SQEntries <= 256);
1729936SFaissal.Sleiman@arm.com
1732292SN/A    loadQueue.resize(LQEntries);
1742292SN/A    storeQueue.resize(SQEntries);
1752292SN/A
1768199SAli.Saidi@ARM.com    depCheckShift = params->LSQDepCheckShift;
1778199SAli.Saidi@ARM.com    checkLoads = params->LSQCheckLoads;
1789444SAndreas.Sandberg@ARM.com    cachePorts = params->cachePorts;
1799444SAndreas.Sandberg@ARM.com    needsTSO = params->needsTSO;
1809444SAndreas.Sandberg@ARM.com
1819444SAndreas.Sandberg@ARM.com    resetState();
1829444SAndreas.Sandberg@ARM.com}
1839444SAndreas.Sandberg@ARM.com
1849444SAndreas.Sandberg@ARM.com
1859444SAndreas.Sandberg@ARM.comtemplate<class Impl>
1869444SAndreas.Sandberg@ARM.comvoid
1879444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::resetState()
1889444SAndreas.Sandberg@ARM.com{
1899444SAndreas.Sandberg@ARM.com    loads = stores = storesToWB = 0;
1908199SAli.Saidi@ARM.com
1912292SN/A    loadHead = loadTail = 0;
1922292SN/A
1932292SN/A    storeHead = storeWBIdx = storeTail = 0;
1942292SN/A
1952292SN/A    usedPorts = 0;
1962292SN/A
1973492Sktlim@umich.edu    retryPkt = NULL;
1982329SN/A    memDepViolator = NULL;
1992292SN/A
2002292SN/A    blockedLoadSeqNum = 0;
2019444SAndreas.Sandberg@ARM.com
2029444SAndreas.Sandberg@ARM.com    stalled = false;
2039444SAndreas.Sandberg@ARM.com    isLoadBlocked = false;
2049444SAndreas.Sandberg@ARM.com    loadBlockedHandled = false;
2059444SAndreas.Sandberg@ARM.com
2069814Sandreas.hansson@arm.com    cacheBlockMask = ~(cpu->cacheLineSize() - 1);
2072292SN/A}
2082292SN/A
2092292SN/Atemplate<class Impl>
2102292SN/Astd::string
2112292SN/ALSQUnit<Impl>::name() const
2122292SN/A{
2132292SN/A    if (Impl::MaxThreads == 1) {
2142292SN/A        return iewStage->name() + ".lsq";
2152292SN/A    } else {
2168247Snate@binkert.org        return iewStage->name() + ".lsq.thread" + to_string(lsqID);
2172292SN/A    }
2182292SN/A}
2192292SN/A
2202292SN/Atemplate<class Impl>
2212292SN/Avoid
2222727Sktlim@umich.eduLSQUnit<Impl>::regStats()
2232727Sktlim@umich.edu{
2242727Sktlim@umich.edu    lsqForwLoads
2252727Sktlim@umich.edu        .name(name() + ".forwLoads")
2262727Sktlim@umich.edu        .desc("Number of loads that had data forwarded from stores");
2272727Sktlim@umich.edu
2282727Sktlim@umich.edu    invAddrLoads
2292727Sktlim@umich.edu        .name(name() + ".invAddrLoads")
2302727Sktlim@umich.edu        .desc("Number of loads ignored due to an invalid address");
2312727Sktlim@umich.edu
2322727Sktlim@umich.edu    lsqSquashedLoads
2332727Sktlim@umich.edu        .name(name() + ".squashedLoads")
2342727Sktlim@umich.edu        .desc("Number of loads squashed");
2352727Sktlim@umich.edu
2362727Sktlim@umich.edu    lsqIgnoredResponses
2372727Sktlim@umich.edu        .name(name() + ".ignoredResponses")
2382727Sktlim@umich.edu        .desc("Number of memory responses ignored because the instruction is squashed");
2392727Sktlim@umich.edu
2402361SN/A    lsqMemOrderViolation
2412361SN/A        .name(name() + ".memOrderViolation")
2422361SN/A        .desc("Number of memory ordering violations");
2432361SN/A
2442727Sktlim@umich.edu    lsqSquashedStores
2452727Sktlim@umich.edu        .name(name() + ".squashedStores")
2462727Sktlim@umich.edu        .desc("Number of stores squashed");
2472727Sktlim@umich.edu
2482727Sktlim@umich.edu    invAddrSwpfs
2492727Sktlim@umich.edu        .name(name() + ".invAddrSwpfs")
2502727Sktlim@umich.edu        .desc("Number of software prefetches ignored due to an invalid address");
2512727Sktlim@umich.edu
2522727Sktlim@umich.edu    lsqBlockedLoads
2532727Sktlim@umich.edu        .name(name() + ".blockedLoads")
2542727Sktlim@umich.edu        .desc("Number of blocked loads due to partial load-store forwarding");
2552727Sktlim@umich.edu
2562727Sktlim@umich.edu    lsqRescheduledLoads
2572727Sktlim@umich.edu        .name(name() + ".rescheduledLoads")
2582727Sktlim@umich.edu        .desc("Number of loads that were rescheduled");
2592727Sktlim@umich.edu
2602727Sktlim@umich.edu    lsqCacheBlocked
2612727Sktlim@umich.edu        .name(name() + ".cacheBlocked")
2622727Sktlim@umich.edu        .desc("Number of times an access to memory failed due to the cache being blocked");
2632727Sktlim@umich.edu}
2642727Sktlim@umich.edu
2652727Sktlim@umich.edutemplate<class Impl>
2662727Sktlim@umich.eduvoid
2678922Swilliam.wang@arm.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
2684329Sktlim@umich.edu{
2694329Sktlim@umich.edu    dcachePort = dcache_port;
2704329Sktlim@umich.edu}
2714329Sktlim@umich.edu
2724329Sktlim@umich.edutemplate<class Impl>
2734329Sktlim@umich.eduvoid
2742292SN/ALSQUnit<Impl>::clearLQ()
2752292SN/A{
2762292SN/A    loadQueue.clear();
2772292SN/A}
2782292SN/A
2792292SN/Atemplate<class Impl>
2802292SN/Avoid
2812292SN/ALSQUnit<Impl>::clearSQ()
2822292SN/A{
2832292SN/A    storeQueue.clear();
2842292SN/A}
2852292SN/A
2862292SN/Atemplate<class Impl>
2872292SN/Avoid
2889444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::drainSanityCheck() const
2892307SN/A{
2909444SAndreas.Sandberg@ARM.com    for (int i = 0; i < loadQueue.size(); ++i)
2912367SN/A        assert(!loadQueue[i]);
2922307SN/A
2932329SN/A    assert(storesToWB == 0);
2949444SAndreas.Sandberg@ARM.com    assert(!retryPkt);
2952307SN/A}
2962307SN/A
2972307SN/Atemplate<class Impl>
2982307SN/Avoid
2992307SN/ALSQUnit<Impl>::takeOverFrom()
3002307SN/A{
3019444SAndreas.Sandberg@ARM.com    resetState();
3022307SN/A}
3032307SN/A
3042307SN/Atemplate<class Impl>
3052307SN/Avoid
3062292SN/ALSQUnit<Impl>::resizeLQ(unsigned size)
3072292SN/A{
3082329SN/A    unsigned size_plus_sentinel = size + 1;
3092329SN/A    assert(size_plus_sentinel >= LQEntries);
3102292SN/A
3112329SN/A    if (size_plus_sentinel > LQEntries) {
3122329SN/A        while (size_plus_sentinel > loadQueue.size()) {
3132292SN/A            DynInstPtr dummy;
3142292SN/A            loadQueue.push_back(dummy);
3152292SN/A            LQEntries++;
3162292SN/A        }
3172292SN/A    } else {
3182329SN/A        LQEntries = size_plus_sentinel;
3192292SN/A    }
3202292SN/A
3219936SFaissal.Sleiman@arm.com    assert(LQEntries <= 256);
3222292SN/A}
3232292SN/A
3242292SN/Atemplate<class Impl>
3252292SN/Avoid
3262292SN/ALSQUnit<Impl>::resizeSQ(unsigned size)
3272292SN/A{
3282329SN/A    unsigned size_plus_sentinel = size + 1;
3292329SN/A    if (size_plus_sentinel > SQEntries) {
3302329SN/A        while (size_plus_sentinel > storeQueue.size()) {
3312292SN/A            SQEntry dummy;
3322292SN/A            storeQueue.push_back(dummy);
3332292SN/A            SQEntries++;
3342292SN/A        }
3352292SN/A    } else {
3362329SN/A        SQEntries = size_plus_sentinel;
3372292SN/A    }
3389936SFaissal.Sleiman@arm.com
3399936SFaissal.Sleiman@arm.com    assert(SQEntries <= 256);
3402292SN/A}
3412292SN/A
3422292SN/Atemplate <class Impl>
3432292SN/Avoid
3442292SN/ALSQUnit<Impl>::insert(DynInstPtr &inst)
3452292SN/A{
3462292SN/A    assert(inst->isMemRef());
3472292SN/A
3482292SN/A    assert(inst->isLoad() || inst->isStore());
3492292SN/A
3502292SN/A    if (inst->isLoad()) {
3512292SN/A        insertLoad(inst);
3522292SN/A    } else {
3532292SN/A        insertStore(inst);
3542292SN/A    }
3552292SN/A
3562292SN/A    inst->setInLSQ();
3572292SN/A}
3582292SN/A
3592292SN/Atemplate <class Impl>
3602292SN/Avoid
3612292SN/ALSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
3622292SN/A{
3632329SN/A    assert((loadTail + 1) % LQEntries != loadHead);
3642329SN/A    assert(loads < LQEntries);
3652292SN/A
3667720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
3677720Sgblack@eecs.umich.edu            load_inst->pcState(), loadTail, load_inst->seqNum);
3682292SN/A
3692292SN/A    load_inst->lqIdx = loadTail;
3702292SN/A
3712292SN/A    if (stores == 0) {
3722292SN/A        load_inst->sqIdx = -1;
3732292SN/A    } else {
3742292SN/A        load_inst->sqIdx = storeTail;
3752292SN/A    }
3762292SN/A
3772292SN/A    loadQueue[loadTail] = load_inst;
3782292SN/A
3792292SN/A    incrLdIdx(loadTail);
3802292SN/A
3812292SN/A    ++loads;
3822292SN/A}
3832292SN/A
3842292SN/Atemplate <class Impl>
3852292SN/Avoid
3862292SN/ALSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
3872292SN/A{
3882292SN/A    // Make sure it is not full before inserting an instruction.
3892292SN/A    assert((storeTail + 1) % SQEntries != storeHead);
3902292SN/A    assert(stores < SQEntries);
3912292SN/A
3927720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
3937720Sgblack@eecs.umich.edu            store_inst->pcState(), storeTail, store_inst->seqNum);
3942292SN/A
3952292SN/A    store_inst->sqIdx = storeTail;
3962292SN/A    store_inst->lqIdx = loadTail;
3972292SN/A
3982292SN/A    storeQueue[storeTail] = SQEntry(store_inst);
3992292SN/A
4002292SN/A    incrStIdx(storeTail);
4012292SN/A
4022292SN/A    ++stores;
4032292SN/A}
4042292SN/A
4052292SN/Atemplate <class Impl>
4062292SN/Atypename Impl::DynInstPtr
4072292SN/ALSQUnit<Impl>::getMemDepViolator()
4082292SN/A{
4092292SN/A    DynInstPtr temp = memDepViolator;
4102292SN/A
4112292SN/A    memDepViolator = NULL;
4122292SN/A
4132292SN/A    return temp;
4142292SN/A}
4152292SN/A
4162292SN/Atemplate <class Impl>
4172292SN/Aunsigned
41810239Sbinhpham@cs.rutgers.eduLSQUnit<Impl>::numFreeLoadEntries()
4192292SN/A{
42010239Sbinhpham@cs.rutgers.edu        //LQ has an extra dummy entry to differentiate
42110239Sbinhpham@cs.rutgers.edu        //empty/full conditions. Subtract 1 from the free entries.
42210239Sbinhpham@cs.rutgers.edu        DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
42310239Sbinhpham@cs.rutgers.edu        return LQEntries - loads - 1;
42410239Sbinhpham@cs.rutgers.edu}
4252292SN/A
42610239Sbinhpham@cs.rutgers.edutemplate <class Impl>
42710239Sbinhpham@cs.rutgers.eduunsigned
42810239Sbinhpham@cs.rutgers.eduLSQUnit<Impl>::numFreeStoreEntries()
42910239Sbinhpham@cs.rutgers.edu{
43010239Sbinhpham@cs.rutgers.edu        //SQ has an extra dummy entry to differentiate
43110239Sbinhpham@cs.rutgers.edu        //empty/full conditions. Subtract 1 from the free entries.
43210239Sbinhpham@cs.rutgers.edu        DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
43310239Sbinhpham@cs.rutgers.edu        return SQEntries - stores - 1;
43410239Sbinhpham@cs.rutgers.edu
43510239Sbinhpham@cs.rutgers.edu }
4362292SN/A
4372292SN/Atemplate <class Impl>
4388545Ssaidi@eecs.umich.eduvoid
4398545Ssaidi@eecs.umich.eduLSQUnit<Impl>::checkSnoop(PacketPtr pkt)
4408545Ssaidi@eecs.umich.edu{
4418545Ssaidi@eecs.umich.edu    int load_idx = loadHead;
44210030SAli.Saidi@ARM.com    DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
4438545Ssaidi@eecs.umich.edu
4449383SAli.Saidi@ARM.com    // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
4459383SAli.Saidi@ARM.com    // address. The CPU can speculatively execute a LL operation after a pending
4469383SAli.Saidi@ARM.com    // SC operation in the pipeline and that can make the cache monitor the CPU
4479383SAli.Saidi@ARM.com    // is connected to valid while it really shouldn't be.
44810030SAli.Saidi@ARM.com    for (int x = 0; x < cpu->numContexts(); x++) {
4499383SAli.Saidi@ARM.com        ThreadContext *tc = cpu->getContext(x);
4509383SAli.Saidi@ARM.com        bool no_squash = cpu->thread[x]->noSquashFromTC;
4519383SAli.Saidi@ARM.com        cpu->thread[x]->noSquashFromTC = true;
4529383SAli.Saidi@ARM.com        TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
4539383SAli.Saidi@ARM.com        cpu->thread[x]->noSquashFromTC = no_squash;
4549383SAli.Saidi@ARM.com    }
4559383SAli.Saidi@ARM.com
45610030SAli.Saidi@ARM.com    Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
45710030SAli.Saidi@ARM.com
45810030SAli.Saidi@ARM.com    DynInstPtr ld_inst = loadQueue[load_idx];
45910030SAli.Saidi@ARM.com    if (ld_inst) {
46010030SAli.Saidi@ARM.com        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
46110030SAli.Saidi@ARM.com        // Check that this snoop didn't just invalidate our lock flag
46210030SAli.Saidi@ARM.com        if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
46310030SAli.Saidi@ARM.com            ld_inst->memReqFlags & Request::LLSC)
46410030SAli.Saidi@ARM.com            TheISA::handleLockedSnoopHit(ld_inst.get());
46510030SAli.Saidi@ARM.com    }
46610030SAli.Saidi@ARM.com
4678545Ssaidi@eecs.umich.edu    // If this is the only load in the LSQ we don't care
4688545Ssaidi@eecs.umich.edu    if (load_idx == loadTail)
4698545Ssaidi@eecs.umich.edu        return;
47010030SAli.Saidi@ARM.com
4718545Ssaidi@eecs.umich.edu    incrLdIdx(load_idx);
4728545Ssaidi@eecs.umich.edu
47310149Smarco.elver@ed.ac.uk    bool force_squash = false;
47410149Smarco.elver@ed.ac.uk
4758545Ssaidi@eecs.umich.edu    while (load_idx != loadTail) {
4768545Ssaidi@eecs.umich.edu        DynInstPtr ld_inst = loadQueue[load_idx];
4778545Ssaidi@eecs.umich.edu
4789046SAli.Saidi@ARM.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
4798545Ssaidi@eecs.umich.edu            incrLdIdx(load_idx);
4808545Ssaidi@eecs.umich.edu            continue;
4818545Ssaidi@eecs.umich.edu        }
4828545Ssaidi@eecs.umich.edu
4838545Ssaidi@eecs.umich.edu        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
4848545Ssaidi@eecs.umich.edu        DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
4858545Ssaidi@eecs.umich.edu                    ld_inst->seqNum, load_addr, invalidate_addr);
4868545Ssaidi@eecs.umich.edu
48710149Smarco.elver@ed.ac.uk        if (load_addr == invalidate_addr || force_squash) {
48810149Smarco.elver@ed.ac.uk            if (needsTSO) {
48910149Smarco.elver@ed.ac.uk                // If we have a TSO system, as all loads must be ordered with
49010149Smarco.elver@ed.ac.uk                // all other loads, this load as well as *all* subsequent loads
49110149Smarco.elver@ed.ac.uk                // need to be squashed to prevent possible load reordering.
49210149Smarco.elver@ed.ac.uk                force_squash = true;
49310149Smarco.elver@ed.ac.uk            }
49410149Smarco.elver@ed.ac.uk            if (ld_inst->possibleLoadViolation() || force_squash) {
4958545Ssaidi@eecs.umich.edu                DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
49610030SAli.Saidi@ARM.com                        pkt->getAddr(), ld_inst->seqNum);
4978545Ssaidi@eecs.umich.edu
4988545Ssaidi@eecs.umich.edu                // Mark the load for re-execution
4998545Ssaidi@eecs.umich.edu                ld_inst->fault = new ReExec;
5008545Ssaidi@eecs.umich.edu            } else {
50110030SAli.Saidi@ARM.com                DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
50210030SAli.Saidi@ARM.com                        pkt->getAddr(), ld_inst->seqNum);
50310030SAli.Saidi@ARM.com
50410030SAli.Saidi@ARM.com                // Make sure that we don't lose a snoop hitting a LOCKED
50510030SAli.Saidi@ARM.com                // address since the LOCK* flags don't get updated until
50610030SAli.Saidi@ARM.com                // commit.
50710030SAli.Saidi@ARM.com                if (ld_inst->memReqFlags & Request::LLSC)
50810030SAli.Saidi@ARM.com                    TheISA::handleLockedSnoopHit(ld_inst.get());
50910030SAli.Saidi@ARM.com
5108545Ssaidi@eecs.umich.edu                // If a older load checks this and it's true
5118545Ssaidi@eecs.umich.edu                // then we might have missed the snoop
5128545Ssaidi@eecs.umich.edu                // in which case we need to invalidate to be sure
5139046SAli.Saidi@ARM.com                ld_inst->hitExternalSnoop(true);
5148545Ssaidi@eecs.umich.edu            }
5158545Ssaidi@eecs.umich.edu        }
5168545Ssaidi@eecs.umich.edu        incrLdIdx(load_idx);
5178545Ssaidi@eecs.umich.edu    }
5188545Ssaidi@eecs.umich.edu    return;
5198545Ssaidi@eecs.umich.edu}
5208545Ssaidi@eecs.umich.edu
5218545Ssaidi@eecs.umich.edutemplate <class Impl>
5222292SN/AFault
5238199SAli.Saidi@ARM.comLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
5248199SAli.Saidi@ARM.com{
5258199SAli.Saidi@ARM.com    Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
5268199SAli.Saidi@ARM.com    Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
5278199SAli.Saidi@ARM.com
5288199SAli.Saidi@ARM.com    /** @todo in theory you only need to check an instruction that has executed
5298199SAli.Saidi@ARM.com     * however, there isn't a good way in the pipeline at the moment to check
5308199SAli.Saidi@ARM.com     * all instructions that will execute before the store writes back. Thus,
5318199SAli.Saidi@ARM.com     * like the implementation that came before it, we're overly conservative.
5328199SAli.Saidi@ARM.com     */
5338199SAli.Saidi@ARM.com    while (load_idx != loadTail) {
5348199SAli.Saidi@ARM.com        DynInstPtr ld_inst = loadQueue[load_idx];
5359046SAli.Saidi@ARM.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
5368199SAli.Saidi@ARM.com            incrLdIdx(load_idx);
5378199SAli.Saidi@ARM.com            continue;
5388199SAli.Saidi@ARM.com        }
5398199SAli.Saidi@ARM.com
5408199SAli.Saidi@ARM.com        Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
5418199SAli.Saidi@ARM.com        Addr ld_eff_addr2 =
5428199SAli.Saidi@ARM.com            (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
5438199SAli.Saidi@ARM.com
5448272SAli.Saidi@ARM.com        if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
5458545Ssaidi@eecs.umich.edu            if (inst->isLoad()) {
5468545Ssaidi@eecs.umich.edu                // If this load is to the same block as an external snoop
5478545Ssaidi@eecs.umich.edu                // invalidate that we've observed then the load needs to be
5488545Ssaidi@eecs.umich.edu                // squashed as it could have newer data
5499046SAli.Saidi@ARM.com                if (ld_inst->hitExternalSnoop()) {
5508545Ssaidi@eecs.umich.edu                    if (!memDepViolator ||
5518545Ssaidi@eecs.umich.edu                            ld_inst->seqNum < memDepViolator->seqNum) {
5528545Ssaidi@eecs.umich.edu                        DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
5538592Sgblack@eecs.umich.edu                                "and [sn:%lli] at address %#x\n",
5548592Sgblack@eecs.umich.edu                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5558545Ssaidi@eecs.umich.edu                        memDepViolator = ld_inst;
5568199SAli.Saidi@ARM.com
5578545Ssaidi@eecs.umich.edu                        ++lsqMemOrderViolation;
5588199SAli.Saidi@ARM.com
5598591Sgblack@eecs.umich.edu                        return new GenericISA::M5PanicFault(
5608591Sgblack@eecs.umich.edu                                "Detected fault with inst [sn:%lli] and "
5618591Sgblack@eecs.umich.edu                                "[sn:%lli] at address %#x\n",
5628591Sgblack@eecs.umich.edu                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5638545Ssaidi@eecs.umich.edu                    }
5648545Ssaidi@eecs.umich.edu                }
5658199SAli.Saidi@ARM.com
5668545Ssaidi@eecs.umich.edu                // Otherwise, mark the load has a possible load violation
5678545Ssaidi@eecs.umich.edu                // and if we see a snoop before it's commited, we need to squash
5689046SAli.Saidi@ARM.com                ld_inst->possibleLoadViolation(true);
5698545Ssaidi@eecs.umich.edu                DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
5708545Ssaidi@eecs.umich.edu                        " between instructions [sn:%lli] and [sn:%lli]\n",
5718545Ssaidi@eecs.umich.edu                        inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
5728545Ssaidi@eecs.umich.edu            } else {
5738545Ssaidi@eecs.umich.edu                // A load/store incorrectly passed this store.
5748545Ssaidi@eecs.umich.edu                // Check if we already have a violator, or if it's newer
5758545Ssaidi@eecs.umich.edu                // squash and refetch.
5768545Ssaidi@eecs.umich.edu                if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
5778545Ssaidi@eecs.umich.edu                    break;
5788545Ssaidi@eecs.umich.edu
5798592Sgblack@eecs.umich.edu                DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
5808592Sgblack@eecs.umich.edu                        "[sn:%lli] at address %#x\n",
5818592Sgblack@eecs.umich.edu                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5828545Ssaidi@eecs.umich.edu                memDepViolator = ld_inst;
5838545Ssaidi@eecs.umich.edu
5848545Ssaidi@eecs.umich.edu                ++lsqMemOrderViolation;
5858545Ssaidi@eecs.umich.edu
5868591Sgblack@eecs.umich.edu                return new GenericISA::M5PanicFault("Detected fault with "
5878591Sgblack@eecs.umich.edu                        "inst [sn:%lli] and [sn:%lli] at address %#x\n",
5888591Sgblack@eecs.umich.edu                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5898545Ssaidi@eecs.umich.edu            }
5908199SAli.Saidi@ARM.com        }
5918199SAli.Saidi@ARM.com
5928199SAli.Saidi@ARM.com        incrLdIdx(load_idx);
5938199SAli.Saidi@ARM.com    }
5948199SAli.Saidi@ARM.com    return NoFault;
5958199SAli.Saidi@ARM.com}
5968199SAli.Saidi@ARM.com
5978199SAli.Saidi@ARM.com
5988199SAli.Saidi@ARM.com
5998199SAli.Saidi@ARM.com
6008199SAli.Saidi@ARM.comtemplate <class Impl>
6018199SAli.Saidi@ARM.comFault
6022292SN/ALSQUnit<Impl>::executeLoad(DynInstPtr &inst)
6032292SN/A{
6044032Sktlim@umich.edu    using namespace TheISA;
6052292SN/A    // Execute a specific load.
6062292SN/A    Fault load_fault = NoFault;
6072292SN/A
6087720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
6097944SGiacomo.Gabrielli@arm.com            inst->pcState(), inst->seqNum);
6102292SN/A
6114032Sktlim@umich.edu    assert(!inst->isSquashed());
6124032Sktlim@umich.edu
6132669Sktlim@umich.edu    load_fault = inst->initiateAcc();
6142292SN/A
6157944SGiacomo.Gabrielli@arm.com    if (inst->isTranslationDelayed() &&
6167944SGiacomo.Gabrielli@arm.com        load_fault == NoFault)
6177944SGiacomo.Gabrielli@arm.com        return load_fault;
6187944SGiacomo.Gabrielli@arm.com
6197597Sminkyu.jeong@arm.com    // If the instruction faulted or predicated false, then we need to send it
6207597Sminkyu.jeong@arm.com    // along to commit without the instruction completing.
62110231Ssteve.reinhardt@amd.com    if (load_fault != NoFault || !inst->readPredicate()) {
6222329SN/A        // Send this instruction to commit, also make sure iew stage
6232329SN/A        // realizes there is activity.
6242367SN/A        // Mark it as executed unless it is an uncached load that
6252367SN/A        // needs to hit the head of commit.
62610231Ssteve.reinhardt@amd.com        if (!inst->readPredicate())
6277848SAli.Saidi@ARM.com            inst->forwardOldRegs();
6287600Sminkyu.jeong@arm.com        DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
6297600Sminkyu.jeong@arm.com                inst->seqNum,
6307600Sminkyu.jeong@arm.com                (load_fault != NoFault ? "fault" : "predication"));
6314032Sktlim@umich.edu        if (!(inst->hasRequest() && inst->uncacheable()) ||
6323731Sktlim@umich.edu            inst->isAtCommit()) {
6332367SN/A            inst->setExecuted();
6342367SN/A        }
6352292SN/A        iewStage->instToCommit(inst);
6362292SN/A        iewStage->activityThisCycle();
6374032Sktlim@umich.edu    } else if (!loadBlocked()) {
6389046SAli.Saidi@ARM.com        assert(inst->effAddrValid());
6394032Sktlim@umich.edu        int load_idx = inst->lqIdx;
6404032Sktlim@umich.edu        incrLdIdx(load_idx);
6414032Sktlim@umich.edu
6428199SAli.Saidi@ARM.com        if (checkLoads)
6438199SAli.Saidi@ARM.com            return checkViolations(load_idx, inst);
6442292SN/A    }
6452292SN/A
6462292SN/A    return load_fault;
6472292SN/A}
6482292SN/A
6492292SN/Atemplate <class Impl>
6502292SN/AFault
6512292SN/ALSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
6522292SN/A{
6532292SN/A    using namespace TheISA;
6542292SN/A    // Make sure that a store exists.
6552292SN/A    assert(stores != 0);
6562292SN/A
6572292SN/A    int store_idx = store_inst->sqIdx;
6582292SN/A
6597720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
6607720Sgblack@eecs.umich.edu            store_inst->pcState(), store_inst->seqNum);
6612292SN/A
6624032Sktlim@umich.edu    assert(!store_inst->isSquashed());
6634032Sktlim@umich.edu
6642292SN/A    // Check the recently completed loads to see if any match this store's
6652292SN/A    // address.  If so, then we have a memory ordering violation.
6662292SN/A    int load_idx = store_inst->lqIdx;
6672292SN/A
6682292SN/A    Fault store_fault = store_inst->initiateAcc();
6692292SN/A
6707944SGiacomo.Gabrielli@arm.com    if (store_inst->isTranslationDelayed() &&
6717944SGiacomo.Gabrielli@arm.com        store_fault == NoFault)
6727944SGiacomo.Gabrielli@arm.com        return store_fault;
6737944SGiacomo.Gabrielli@arm.com
67410231Ssteve.reinhardt@amd.com    if (!store_inst->readPredicate())
6757848SAli.Saidi@ARM.com        store_inst->forwardOldRegs();
6767848SAli.Saidi@ARM.com
6772329SN/A    if (storeQueue[store_idx].size == 0) {
6787782Sminkyu.jeong@arm.com        DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
6797720Sgblack@eecs.umich.edu                store_inst->pcState(), store_inst->seqNum);
6802292SN/A
6812292SN/A        return store_fault;
68210231Ssteve.reinhardt@amd.com    } else if (!store_inst->readPredicate()) {
6837782Sminkyu.jeong@arm.com        DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
6847782Sminkyu.jeong@arm.com                store_inst->seqNum);
6857782Sminkyu.jeong@arm.com        return store_fault;
6862292SN/A    }
6872292SN/A
6882292SN/A    assert(store_fault == NoFault);
6892292SN/A
6902336SN/A    if (store_inst->isStoreConditional()) {
6912336SN/A        // Store conditionals need to set themselves as able to
6922336SN/A        // writeback if we haven't had a fault by here.
6932329SN/A        storeQueue[store_idx].canWB = true;
6942292SN/A
6952329SN/A        ++storesToWB;
6962292SN/A    }
6972292SN/A
6988199SAli.Saidi@ARM.com    return checkViolations(load_idx, store_inst);
6992292SN/A
7002292SN/A}
7012292SN/A
7022292SN/Atemplate <class Impl>
7032292SN/Avoid
7042292SN/ALSQUnit<Impl>::commitLoad()
7052292SN/A{
7062292SN/A    assert(loadQueue[loadHead]);
7072292SN/A
7087720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
7097720Sgblack@eecs.umich.edu            loadQueue[loadHead]->pcState());
7102292SN/A
7112292SN/A    loadQueue[loadHead] = NULL;
7122292SN/A
7132292SN/A    incrLdIdx(loadHead);
7142292SN/A
7152292SN/A    --loads;
7162292SN/A}
7172292SN/A
7182292SN/Atemplate <class Impl>
7192292SN/Avoid
7202292SN/ALSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
7212292SN/A{
7222292SN/A    assert(loads == 0 || loadQueue[loadHead]);
7232292SN/A
7242292SN/A    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
7252292SN/A        commitLoad();
7262292SN/A    }
7272292SN/A}
7282292SN/A
7292292SN/Atemplate <class Impl>
7302292SN/Avoid
7312292SN/ALSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
7322292SN/A{
7332292SN/A    assert(stores == 0 || storeQueue[storeHead].inst);
7342292SN/A
7352292SN/A    int store_idx = storeHead;
7362292SN/A
7372292SN/A    while (store_idx != storeTail) {
7382292SN/A        assert(storeQueue[store_idx].inst);
7392329SN/A        // Mark any stores that are now committed and have not yet
7402329SN/A        // been marked as able to write back.
7412292SN/A        if (!storeQueue[store_idx].canWB) {
7422292SN/A            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
7432292SN/A                break;
7442292SN/A            }
7452292SN/A            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
7467720Sgblack@eecs.umich.edu                    "%s [sn:%lli]\n",
7477720Sgblack@eecs.umich.edu                    storeQueue[store_idx].inst->pcState(),
7482292SN/A                    storeQueue[store_idx].inst->seqNum);
7492292SN/A
7502292SN/A            storeQueue[store_idx].canWB = true;
7512292SN/A
7522292SN/A            ++storesToWB;
7532292SN/A        }
7542292SN/A
7552292SN/A        incrStIdx(store_idx);
7562292SN/A    }
7572292SN/A}
7582292SN/A
7592292SN/Atemplate <class Impl>
7602292SN/Avoid
7616974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::writebackPendingStore()
7626974Stjones1@inf.ed.ac.uk{
7636974Stjones1@inf.ed.ac.uk    if (hasPendingPkt) {
7646974Stjones1@inf.ed.ac.uk        assert(pendingPkt != NULL);
7656974Stjones1@inf.ed.ac.uk
7666974Stjones1@inf.ed.ac.uk        // If the cache is blocked, this will store the packet for retry.
7676974Stjones1@inf.ed.ac.uk        if (sendStore(pendingPkt)) {
7686974Stjones1@inf.ed.ac.uk            storePostSend(pendingPkt);
7696974Stjones1@inf.ed.ac.uk        }
7706974Stjones1@inf.ed.ac.uk        pendingPkt = NULL;
7716974Stjones1@inf.ed.ac.uk        hasPendingPkt = false;
7726974Stjones1@inf.ed.ac.uk    }
7736974Stjones1@inf.ed.ac.uk}
7746974Stjones1@inf.ed.ac.uk
7756974Stjones1@inf.ed.ac.uktemplate <class Impl>
7766974Stjones1@inf.ed.ac.ukvoid
7772292SN/ALSQUnit<Impl>::writebackStores()
7782292SN/A{
7796974Stjones1@inf.ed.ac.uk    // First writeback the second packet from any split store that didn't
7806974Stjones1@inf.ed.ac.uk    // complete last cycle because there weren't enough cache ports available.
7816974Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc) {
7826974Stjones1@inf.ed.ac.uk        writebackPendingStore();
7836974Stjones1@inf.ed.ac.uk    }
7846974Stjones1@inf.ed.ac.uk
7852292SN/A    while (storesToWB > 0 &&
7862292SN/A           storeWBIdx != storeTail &&
7872292SN/A           storeQueue[storeWBIdx].inst &&
7882292SN/A           storeQueue[storeWBIdx].canWB &&
7898727Snilay@cs.wisc.edu           ((!needsTSO) || (!storeInFlight)) &&
7902292SN/A           usedPorts < cachePorts) {
7912292SN/A
7922907Sktlim@umich.edu        if (isStoreBlocked || lsq->cacheBlocked()) {
7932678Sktlim@umich.edu            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
7942678Sktlim@umich.edu                    " is blocked!\n");
7952678Sktlim@umich.edu            break;
7962678Sktlim@umich.edu        }
7972678Sktlim@umich.edu
7982329SN/A        // Store didn't write any data so no need to write it back to
7992329SN/A        // memory.
8002292SN/A        if (storeQueue[storeWBIdx].size == 0) {
8012292SN/A            completeStore(storeWBIdx);
8022292SN/A
8032292SN/A            incrStIdx(storeWBIdx);
8042292SN/A
8052292SN/A            continue;
8062292SN/A        }
8072678Sktlim@umich.edu
8082292SN/A        ++usedPorts;
8092292SN/A
8102292SN/A        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
8112292SN/A            incrStIdx(storeWBIdx);
8122292SN/A
8132292SN/A            continue;
8142292SN/A        }
8152292SN/A
8162292SN/A        assert(storeQueue[storeWBIdx].req);
8172292SN/A        assert(!storeQueue[storeWBIdx].committed);
8182292SN/A
8196974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
8206974Stjones1@inf.ed.ac.uk            assert(storeQueue[storeWBIdx].sreqLow);
8216974Stjones1@inf.ed.ac.uk            assert(storeQueue[storeWBIdx].sreqHigh);
8226974Stjones1@inf.ed.ac.uk        }
8236974Stjones1@inf.ed.ac.uk
8242669Sktlim@umich.edu        DynInstPtr inst = storeQueue[storeWBIdx].inst;
8252669Sktlim@umich.edu
8262669Sktlim@umich.edu        Request *req = storeQueue[storeWBIdx].req;
8278481Sgblack@eecs.umich.edu        RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
8288481Sgblack@eecs.umich.edu        RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
8298481Sgblack@eecs.umich.edu
8302292SN/A        storeQueue[storeWBIdx].committed = true;
8312292SN/A
8322669Sktlim@umich.edu        assert(!inst->memData);
83310031SAli.Saidi@ARM.com        inst->memData = new uint8_t[req->getSize()];
8343772Sgblack@eecs.umich.edu
83510031SAli.Saidi@ARM.com        if (storeQueue[storeWBIdx].isAllZeros)
83610031SAli.Saidi@ARM.com            memset(inst->memData, 0, req->getSize());
83710031SAli.Saidi@ARM.com        else
83810031SAli.Saidi@ARM.com            memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
8392669Sktlim@umich.edu
8404878Sstever@eecs.umich.edu        MemCmd command =
8414878Sstever@eecs.umich.edu            req->isSwap() ? MemCmd::SwapReq :
8426102Sgblack@eecs.umich.edu            (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
8436974Stjones1@inf.ed.ac.uk        PacketPtr data_pkt;
8446974Stjones1@inf.ed.ac.uk        PacketPtr snd_data_pkt = NULL;
8452292SN/A
8462678Sktlim@umich.edu        LSQSenderState *state = new LSQSenderState;
8472678Sktlim@umich.edu        state->isLoad = false;
8482678Sktlim@umich.edu        state->idx = storeWBIdx;
8492678Sktlim@umich.edu        state->inst = inst;
8506974Stjones1@inf.ed.ac.uk
8516974Stjones1@inf.ed.ac.uk        if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
8526974Stjones1@inf.ed.ac.uk
8536974Stjones1@inf.ed.ac.uk            // Build a single data packet if the store isn't split.
8548949Sandreas.hansson@arm.com            data_pkt = new Packet(req, command);
8556974Stjones1@inf.ed.ac.uk            data_pkt->dataStatic(inst->memData);
8566974Stjones1@inf.ed.ac.uk            data_pkt->senderState = state;
8576974Stjones1@inf.ed.ac.uk        } else {
8586974Stjones1@inf.ed.ac.uk            // Create two packets if the store is split in two.
8598949Sandreas.hansson@arm.com            data_pkt = new Packet(sreqLow, command);
8608949Sandreas.hansson@arm.com            snd_data_pkt = new Packet(sreqHigh, command);
8616974Stjones1@inf.ed.ac.uk
8626974Stjones1@inf.ed.ac.uk            data_pkt->dataStatic(inst->memData);
8636974Stjones1@inf.ed.ac.uk            snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
8646974Stjones1@inf.ed.ac.uk
8656974Stjones1@inf.ed.ac.uk            data_pkt->senderState = state;
8666974Stjones1@inf.ed.ac.uk            snd_data_pkt->senderState = state;
8676974Stjones1@inf.ed.ac.uk
8686974Stjones1@inf.ed.ac.uk            state->isSplit = true;
8696974Stjones1@inf.ed.ac.uk            state->outstanding = 2;
8706974Stjones1@inf.ed.ac.uk
8716974Stjones1@inf.ed.ac.uk            // Can delete the main request now.
8726974Stjones1@inf.ed.ac.uk            delete req;
8736974Stjones1@inf.ed.ac.uk            req = sreqLow;
8746974Stjones1@inf.ed.ac.uk        }
8752678Sktlim@umich.edu
8767720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
8772292SN/A                "to Addr:%#x, data:%#x [sn:%lli]\n",
8787720Sgblack@eecs.umich.edu                storeWBIdx, inst->pcState(),
8793797Sgblack@eecs.umich.edu                req->getPaddr(), (int)*(inst->memData),
8803221Sktlim@umich.edu                inst->seqNum);
8812292SN/A
8822693Sktlim@umich.edu        // @todo: Remove this SC hack once the memory system handles it.
8834350Sgblack@eecs.umich.edu        if (inst->isStoreConditional()) {
8846974Stjones1@inf.ed.ac.uk            assert(!storeQueue[storeWBIdx].isSplit);
8853326Sktlim@umich.edu            // Disable recording the result temporarily.  Writing to
8863326Sktlim@umich.edu            // misc regs normally updates the result, but this is not
8873326Sktlim@umich.edu            // the desired behavior when handling store conditionals.
8889046SAli.Saidi@ARM.com            inst->recordResult(false);
88910030SAli.Saidi@ARM.com            bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
8909046SAli.Saidi@ARM.com            inst->recordResult(true);
8913326Sktlim@umich.edu
8923326Sktlim@umich.edu            if (!success) {
8933326Sktlim@umich.edu                // Instantly complete this store.
8943326Sktlim@umich.edu                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
8953326Sktlim@umich.edu                        "Instantly completing it.\n",
8963326Sktlim@umich.edu                        inst->seqNum);
8973326Sktlim@umich.edu                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
8987823Ssteve.reinhardt@amd.com                cpu->schedule(wb, curTick() + 1);
8998887Sgeoffrey.blake@arm.com                if (cpu->checker) {
9008887Sgeoffrey.blake@arm.com                    // Make sure to set the LLSC data for verification
9018887Sgeoffrey.blake@arm.com                    // if checker is loaded
9028887Sgeoffrey.blake@arm.com                    inst->reqToVerify->setExtraData(0);
9038887Sgeoffrey.blake@arm.com                    inst->completeAcc(data_pkt);
9048887Sgeoffrey.blake@arm.com                }
9053326Sktlim@umich.edu                completeStore(storeWBIdx);
9063326Sktlim@umich.edu                incrStIdx(storeWBIdx);
9073326Sktlim@umich.edu                continue;
9082693Sktlim@umich.edu            }
9092693Sktlim@umich.edu        } else {
9102693Sktlim@umich.edu            // Non-store conditionals do not need a writeback.
9112693Sktlim@umich.edu            state->noWB = true;
9122693Sktlim@umich.edu        }
9132693Sktlim@umich.edu
9148481Sgblack@eecs.umich.edu        bool split =
9158481Sgblack@eecs.umich.edu            TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
9168481Sgblack@eecs.umich.edu
9178481Sgblack@eecs.umich.edu        ThreadContext *thread = cpu->tcBase(lsqID);
9188481Sgblack@eecs.umich.edu
9198481Sgblack@eecs.umich.edu        if (req->isMmappedIpr()) {
9208481Sgblack@eecs.umich.edu            assert(!inst->isStoreConditional());
9218481Sgblack@eecs.umich.edu            TheISA::handleIprWrite(thread, data_pkt);
9228481Sgblack@eecs.umich.edu            delete data_pkt;
9238481Sgblack@eecs.umich.edu            if (split) {
9248481Sgblack@eecs.umich.edu                assert(snd_data_pkt->req->isMmappedIpr());
9258481Sgblack@eecs.umich.edu                TheISA::handleIprWrite(thread, snd_data_pkt);
9268481Sgblack@eecs.umich.edu                delete snd_data_pkt;
9278481Sgblack@eecs.umich.edu                delete sreqLow;
9288481Sgblack@eecs.umich.edu                delete sreqHigh;
9298481Sgblack@eecs.umich.edu            }
9308481Sgblack@eecs.umich.edu            delete state;
9318481Sgblack@eecs.umich.edu            delete req;
9328481Sgblack@eecs.umich.edu            completeStore(storeWBIdx);
9338481Sgblack@eecs.umich.edu            incrStIdx(storeWBIdx);
9348481Sgblack@eecs.umich.edu        } else if (!sendStore(data_pkt)) {
9354032Sktlim@umich.edu            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
9363221Sktlim@umich.edu                    "retry later\n",
9373221Sktlim@umich.edu                    inst->seqNum);
9386974Stjones1@inf.ed.ac.uk
9396974Stjones1@inf.ed.ac.uk            // Need to store the second packet, if split.
9408481Sgblack@eecs.umich.edu            if (split) {
9416974Stjones1@inf.ed.ac.uk                state->pktToSend = true;
9426974Stjones1@inf.ed.ac.uk                state->pendingPacket = snd_data_pkt;
9436974Stjones1@inf.ed.ac.uk            }
9442669Sktlim@umich.edu        } else {
9456974Stjones1@inf.ed.ac.uk
9466974Stjones1@inf.ed.ac.uk            // If split, try to send the second packet too
9478481Sgblack@eecs.umich.edu            if (split) {
9486974Stjones1@inf.ed.ac.uk                assert(snd_data_pkt);
9496974Stjones1@inf.ed.ac.uk
9506974Stjones1@inf.ed.ac.uk                // Ensure there are enough ports to use.
9516974Stjones1@inf.ed.ac.uk                if (usedPorts < cachePorts) {
9526974Stjones1@inf.ed.ac.uk                    ++usedPorts;
9536974Stjones1@inf.ed.ac.uk                    if (sendStore(snd_data_pkt)) {
9546974Stjones1@inf.ed.ac.uk                        storePostSend(snd_data_pkt);
9556974Stjones1@inf.ed.ac.uk                    } else {
9566974Stjones1@inf.ed.ac.uk                        DPRINTF(IEW, "D-Cache became blocked when writing"
9576974Stjones1@inf.ed.ac.uk                                " [sn:%lli] second packet, will retry later\n",
9586974Stjones1@inf.ed.ac.uk                                inst->seqNum);
9596974Stjones1@inf.ed.ac.uk                    }
9606974Stjones1@inf.ed.ac.uk                } else {
9616974Stjones1@inf.ed.ac.uk
9626974Stjones1@inf.ed.ac.uk                    // Store the packet for when there's free ports.
9636974Stjones1@inf.ed.ac.uk                    assert(pendingPkt == NULL);
9646974Stjones1@inf.ed.ac.uk                    pendingPkt = snd_data_pkt;
9656974Stjones1@inf.ed.ac.uk                    hasPendingPkt = true;
9666974Stjones1@inf.ed.ac.uk                }
9676974Stjones1@inf.ed.ac.uk            } else {
9686974Stjones1@inf.ed.ac.uk
9696974Stjones1@inf.ed.ac.uk                // Not a split store.
9706974Stjones1@inf.ed.ac.uk                storePostSend(data_pkt);
9716974Stjones1@inf.ed.ac.uk            }
9722292SN/A        }
9732292SN/A    }
9742292SN/A
9752292SN/A    // Not sure this should set it to 0.
9762292SN/A    usedPorts = 0;
9772292SN/A
9782292SN/A    assert(stores >= 0 && storesToWB >= 0);
9792292SN/A}
9802292SN/A
9812292SN/A/*template <class Impl>
9822292SN/Avoid
9832292SN/ALSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
9842292SN/A{
9852292SN/A    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
9862292SN/A                                              mshrSeqNums.end(),
9872292SN/A                                              seqNum);
9882292SN/A
9892292SN/A    if (mshr_it != mshrSeqNums.end()) {
9902292SN/A        mshrSeqNums.erase(mshr_it);
9912292SN/A        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
9922292SN/A    }
9932292SN/A}*/
9942292SN/A
9952292SN/Atemplate <class Impl>
9962292SN/Avoid
9972292SN/ALSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
9982292SN/A{
9992292SN/A    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
10002329SN/A            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
10012292SN/A
10022292SN/A    int load_idx = loadTail;
10032292SN/A    decrLdIdx(load_idx);
10042292SN/A
10052292SN/A    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
10067720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
10072292SN/A                "[sn:%lli]\n",
10087720Sgblack@eecs.umich.edu                loadQueue[load_idx]->pcState(),
10092292SN/A                loadQueue[load_idx]->seqNum);
10102292SN/A
10112292SN/A        if (isStalled() && load_idx == stallingLoadIdx) {
10122292SN/A            stalled = false;
10132292SN/A            stallingStoreIsn = 0;
10142292SN/A            stallingLoadIdx = 0;
10152292SN/A        }
10162292SN/A
10172329SN/A        // Clear the smart pointer to make sure it is decremented.
10182731Sktlim@umich.edu        loadQueue[load_idx]->setSquashed();
10192292SN/A        loadQueue[load_idx] = NULL;
10202292SN/A        --loads;
10212292SN/A
10222292SN/A        // Inefficient!
10232292SN/A        loadTail = load_idx;
10242292SN/A
10252292SN/A        decrLdIdx(load_idx);
10262727Sktlim@umich.edu        ++lsqSquashedLoads;
10272292SN/A    }
10282292SN/A
10292292SN/A    if (isLoadBlocked) {
10302292SN/A        if (squashed_num < blockedLoadSeqNum) {
10312292SN/A            isLoadBlocked = false;
10322292SN/A            loadBlockedHandled = false;
10332292SN/A            blockedLoadSeqNum = 0;
10342292SN/A        }
10352292SN/A    }
10362292SN/A
10374032Sktlim@umich.edu    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
10384032Sktlim@umich.edu        memDepViolator = NULL;
10394032Sktlim@umich.edu    }
10404032Sktlim@umich.edu
10412292SN/A    int store_idx = storeTail;
10422292SN/A    decrStIdx(store_idx);
10432292SN/A
10442292SN/A    while (stores != 0 &&
10452292SN/A           storeQueue[store_idx].inst->seqNum > squashed_num) {
10462329SN/A        // Instructions marked as can WB are already committed.
10472292SN/A        if (storeQueue[store_idx].canWB) {
10482292SN/A            break;
10492292SN/A        }
10502292SN/A
10517720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
10522292SN/A                "idx:%i [sn:%lli]\n",
10537720Sgblack@eecs.umich.edu                storeQueue[store_idx].inst->pcState(),
10542292SN/A                store_idx, storeQueue[store_idx].inst->seqNum);
10552292SN/A
10562329SN/A        // I don't think this can happen.  It should have been cleared
10572329SN/A        // by the stalling load.
10582292SN/A        if (isStalled() &&
10592292SN/A            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
10602292SN/A            panic("Is stalled should have been cleared by stalling load!\n");
10612292SN/A            stalled = false;
10622292SN/A            stallingStoreIsn = 0;
10632292SN/A        }
10642292SN/A
10652329SN/A        // Clear the smart pointer to make sure it is decremented.
10662731Sktlim@umich.edu        storeQueue[store_idx].inst->setSquashed();
10672292SN/A        storeQueue[store_idx].inst = NULL;
10682292SN/A        storeQueue[store_idx].canWB = 0;
10692292SN/A
10704032Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
10714032Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
10724032Sktlim@umich.edu        // place to really handle request deletes.
10734032Sktlim@umich.edu        delete storeQueue[store_idx].req;
10746974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
10756974Stjones1@inf.ed.ac.uk            delete storeQueue[store_idx].sreqLow;
10766974Stjones1@inf.ed.ac.uk            delete storeQueue[store_idx].sreqHigh;
10776974Stjones1@inf.ed.ac.uk
10786974Stjones1@inf.ed.ac.uk            storeQueue[store_idx].sreqLow = NULL;
10796974Stjones1@inf.ed.ac.uk            storeQueue[store_idx].sreqHigh = NULL;
10806974Stjones1@inf.ed.ac.uk        }
10814032Sktlim@umich.edu
10822292SN/A        storeQueue[store_idx].req = NULL;
10832292SN/A        --stores;
10842292SN/A
10852292SN/A        // Inefficient!
10862292SN/A        storeTail = store_idx;
10872292SN/A
10882292SN/A        decrStIdx(store_idx);
10892727Sktlim@umich.edu        ++lsqSquashedStores;
10902292SN/A    }
10912292SN/A}
10922292SN/A
10932292SN/Atemplate <class Impl>
10942292SN/Avoid
10953349Sbinkertn@umich.eduLSQUnit<Impl>::storePostSend(PacketPtr pkt)
10962693Sktlim@umich.edu{
10972693Sktlim@umich.edu    if (isStalled() &&
10982693Sktlim@umich.edu        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
10992693Sktlim@umich.edu        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
11002693Sktlim@umich.edu                "load idx:%i\n",
11012693Sktlim@umich.edu                stallingStoreIsn, stallingLoadIdx);
11022693Sktlim@umich.edu        stalled = false;
11032693Sktlim@umich.edu        stallingStoreIsn = 0;
11042693Sktlim@umich.edu        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
11052693Sktlim@umich.edu    }
11062693Sktlim@umich.edu
11072693Sktlim@umich.edu    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
11082693Sktlim@umich.edu        // The store is basically completed at this time. This
11092693Sktlim@umich.edu        // only works so long as the checker doesn't try to
11102693Sktlim@umich.edu        // verify the value in memory for stores.
11112693Sktlim@umich.edu        storeQueue[storeWBIdx].inst->setCompleted();
11128887Sgeoffrey.blake@arm.com
11132693Sktlim@umich.edu        if (cpu->checker) {
11142732Sktlim@umich.edu            cpu->checker->verify(storeQueue[storeWBIdx].inst);
11152693Sktlim@umich.edu        }
11162693Sktlim@umich.edu    }
11172693Sktlim@umich.edu
11188727Snilay@cs.wisc.edu    if (needsTSO) {
11198727Snilay@cs.wisc.edu        storeInFlight = true;
11208727Snilay@cs.wisc.edu    }
11218727Snilay@cs.wisc.edu
11222693Sktlim@umich.edu    incrStIdx(storeWBIdx);
11232693Sktlim@umich.edu}
11242693Sktlim@umich.edu
11252693Sktlim@umich.edutemplate <class Impl>
11262693Sktlim@umich.eduvoid
11272678Sktlim@umich.eduLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
11282678Sktlim@umich.edu{
11292678Sktlim@umich.edu    iewStage->wakeCPU();
11302678Sktlim@umich.edu
11312678Sktlim@umich.edu    // Squashed instructions do not need to complete their access.
11322678Sktlim@umich.edu    if (inst->isSquashed()) {
11332927Sktlim@umich.edu        iewStage->decrWb(inst->seqNum);
11342678Sktlim@umich.edu        assert(!inst->isStore());
11352727Sktlim@umich.edu        ++lsqIgnoredResponses;
11362678Sktlim@umich.edu        return;
11372678Sktlim@umich.edu    }
11382678Sktlim@umich.edu
11392678Sktlim@umich.edu    if (!inst->isExecuted()) {
11402678Sktlim@umich.edu        inst->setExecuted();
11412678Sktlim@umich.edu
11422678Sktlim@umich.edu        // Complete access to copy data to proper place.
11432678Sktlim@umich.edu        inst->completeAcc(pkt);
11442678Sktlim@umich.edu    }
11452678Sktlim@umich.edu
11462678Sktlim@umich.edu    // Need to insert instruction into queue to commit
11472678Sktlim@umich.edu    iewStage->instToCommit(inst);
11482678Sktlim@umich.edu
11492678Sktlim@umich.edu    iewStage->activityThisCycle();
11507598Sminkyu.jeong@arm.com
11517598Sminkyu.jeong@arm.com    // see if this load changed the PC
11527598Sminkyu.jeong@arm.com    iewStage->checkMisprediction(inst);
11532678Sktlim@umich.edu}
11542678Sktlim@umich.edu
11552678Sktlim@umich.edutemplate <class Impl>
11562678Sktlim@umich.eduvoid
11572292SN/ALSQUnit<Impl>::completeStore(int store_idx)
11582292SN/A{
11592292SN/A    assert(storeQueue[store_idx].inst);
11602292SN/A    storeQueue[store_idx].completed = true;
11612292SN/A    --storesToWB;
11622292SN/A    // A bit conservative because a store completion may not free up entries,
11632292SN/A    // but hopefully avoids two store completions in one cycle from making
11642292SN/A    // the CPU tick twice.
11653126Sktlim@umich.edu    cpu->wakeCPU();
11662292SN/A    cpu->activityThisCycle();
11672292SN/A
11682292SN/A    if (store_idx == storeHead) {
11692292SN/A        do {
11702292SN/A            incrStIdx(storeHead);
11712292SN/A
11722292SN/A            --stores;
11732292SN/A        } while (storeQueue[storeHead].completed &&
11742292SN/A                 storeHead != storeTail);
11752292SN/A
11762292SN/A        iewStage->updateLSQNextCycle = true;
11772292SN/A    }
11782292SN/A
11792329SN/A    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
11802329SN/A            "idx:%i\n",
11812329SN/A            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
11822292SN/A
11839527SMatt.Horsnell@arm.com#if TRACING_ON
11849527SMatt.Horsnell@arm.com    if (DTRACE(O3PipeView)) {
11859527SMatt.Horsnell@arm.com        storeQueue[store_idx].inst->storeTick =
11869527SMatt.Horsnell@arm.com            curTick() - storeQueue[store_idx].inst->fetchTick;
11879527SMatt.Horsnell@arm.com    }
11889527SMatt.Horsnell@arm.com#endif
11899527SMatt.Horsnell@arm.com
11902292SN/A    if (isStalled() &&
11912292SN/A        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
11922292SN/A        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
11932292SN/A                "load idx:%i\n",
11942292SN/A                stallingStoreIsn, stallingLoadIdx);
11952292SN/A        stalled = false;
11962292SN/A        stallingStoreIsn = 0;
11972292SN/A        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
11982292SN/A    }
11992316SN/A
12002316SN/A    storeQueue[store_idx].inst->setCompleted();
12012329SN/A
12028727Snilay@cs.wisc.edu    if (needsTSO) {
12038727Snilay@cs.wisc.edu        storeInFlight = false;
12048727Snilay@cs.wisc.edu    }
12058727Snilay@cs.wisc.edu
12062329SN/A    // Tell the checker we've completed this instruction.  Some stores
12072329SN/A    // may get reported twice to the checker, but the checker can
12082329SN/A    // handle that case.
12092316SN/A    if (cpu->checker) {
12102732Sktlim@umich.edu        cpu->checker->verify(storeQueue[store_idx].inst);
12112316SN/A    }
12122292SN/A}
12132292SN/A
12142292SN/Atemplate <class Impl>
12156974Stjones1@inf.ed.ac.ukbool
12166974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::sendStore(PacketPtr data_pkt)
12176974Stjones1@inf.ed.ac.uk{
12188975Sandreas.hansson@arm.com    if (!dcachePort->sendTimingReq(data_pkt)) {
12196974Stjones1@inf.ed.ac.uk        // Need to handle becoming blocked on a store.
12206974Stjones1@inf.ed.ac.uk        isStoreBlocked = true;
12216974Stjones1@inf.ed.ac.uk        ++lsqCacheBlocked;
12226974Stjones1@inf.ed.ac.uk        assert(retryPkt == NULL);
12236974Stjones1@inf.ed.ac.uk        retryPkt = data_pkt;
12246974Stjones1@inf.ed.ac.uk        lsq->setRetryTid(lsqID);
12256974Stjones1@inf.ed.ac.uk        return false;
12266974Stjones1@inf.ed.ac.uk    }
12276974Stjones1@inf.ed.ac.uk    return true;
12286974Stjones1@inf.ed.ac.uk}
12296974Stjones1@inf.ed.ac.uk
12306974Stjones1@inf.ed.ac.uktemplate <class Impl>
12312693Sktlim@umich.eduvoid
12322693Sktlim@umich.eduLSQUnit<Impl>::recvRetry()
12332693Sktlim@umich.edu{
12342698Sktlim@umich.edu    if (isStoreBlocked) {
12354985Sktlim@umich.edu        DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
12362698Sktlim@umich.edu        assert(retryPkt != NULL);
12372693Sktlim@umich.edu
12388587Snilay@cs.wisc.edu        LSQSenderState *state =
12398587Snilay@cs.wisc.edu            dynamic_cast<LSQSenderState *>(retryPkt->senderState);
12408587Snilay@cs.wisc.edu
12418975Sandreas.hansson@arm.com        if (dcachePort->sendTimingReq(retryPkt)) {
12426974Stjones1@inf.ed.ac.uk            // Don't finish the store unless this is the last packet.
12438133SAli.Saidi@ARM.com            if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
12448133SAli.Saidi@ARM.com                    state->pendingPacket == retryPkt) {
12458133SAli.Saidi@ARM.com                state->pktToSend = false;
12466974Stjones1@inf.ed.ac.uk                storePostSend(retryPkt);
12476974Stjones1@inf.ed.ac.uk            }
12482699Sktlim@umich.edu            retryPkt = NULL;
12492693Sktlim@umich.edu            isStoreBlocked = false;
12506221Snate@binkert.org            lsq->setRetryTid(InvalidThreadID);
12516974Stjones1@inf.ed.ac.uk
12526974Stjones1@inf.ed.ac.uk            // Send any outstanding packet.
12536974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
12546974Stjones1@inf.ed.ac.uk                assert(state->pendingPacket);
12556974Stjones1@inf.ed.ac.uk                if (sendStore(state->pendingPacket)) {
12566974Stjones1@inf.ed.ac.uk                    storePostSend(state->pendingPacket);
12576974Stjones1@inf.ed.ac.uk                }
12586974Stjones1@inf.ed.ac.uk            }
12592693Sktlim@umich.edu        } else {
12602693Sktlim@umich.edu            // Still blocked!
12612727Sktlim@umich.edu            ++lsqCacheBlocked;
12622907Sktlim@umich.edu            lsq->setRetryTid(lsqID);
12632693Sktlim@umich.edu        }
12642693Sktlim@umich.edu    } else if (isLoadBlocked) {
12652693Sktlim@umich.edu        DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
12662693Sktlim@umich.edu                "no need to resend packet.\n");
12672693Sktlim@umich.edu    } else {
12682693Sktlim@umich.edu        DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
12692693Sktlim@umich.edu    }
12702693Sktlim@umich.edu}
12712693Sktlim@umich.edu
12722693Sktlim@umich.edutemplate <class Impl>
12732292SN/Ainline void
12749440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrStIdx(int &store_idx) const
12752292SN/A{
12762292SN/A    if (++store_idx >= SQEntries)
12772292SN/A        store_idx = 0;
12782292SN/A}
12792292SN/A
12802292SN/Atemplate <class Impl>
12812292SN/Ainline void
12829440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrStIdx(int &store_idx) const
12832292SN/A{
12842292SN/A    if (--store_idx < 0)
12852292SN/A        store_idx += SQEntries;
12862292SN/A}
12872292SN/A
12882292SN/Atemplate <class Impl>
12892292SN/Ainline void
12909440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrLdIdx(int &load_idx) const
12912292SN/A{
12922292SN/A    if (++load_idx >= LQEntries)
12932292SN/A        load_idx = 0;
12942292SN/A}
12952292SN/A
12962292SN/Atemplate <class Impl>
12972292SN/Ainline void
12989440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrLdIdx(int &load_idx) const
12992292SN/A{
13002292SN/A    if (--load_idx < 0)
13012292SN/A        load_idx += LQEntries;
13022292SN/A}
13032329SN/A
13042329SN/Atemplate <class Impl>
13052329SN/Avoid
13069440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::dumpInsts() const
13072329SN/A{
13082329SN/A    cprintf("Load store queue: Dumping instructions.\n");
13092329SN/A    cprintf("Load queue size: %i\n", loads);
13102329SN/A    cprintf("Load queue: ");
13112329SN/A
13122329SN/A    int load_idx = loadHead;
13132329SN/A
13142329SN/A    while (load_idx != loadTail && loadQueue[load_idx]) {
13159440SAndreas.Sandberg@ARM.com        const DynInstPtr &inst(loadQueue[load_idx]);
13169440SAndreas.Sandberg@ARM.com        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
13172329SN/A
13182329SN/A        incrLdIdx(load_idx);
13192329SN/A    }
13209440SAndreas.Sandberg@ARM.com    cprintf("\n");
13212329SN/A
13222329SN/A    cprintf("Store queue size: %i\n", stores);
13232329SN/A    cprintf("Store queue: ");
13242329SN/A
13252329SN/A    int store_idx = storeHead;
13262329SN/A
13272329SN/A    while (store_idx != storeTail && storeQueue[store_idx].inst) {
13289440SAndreas.Sandberg@ARM.com        const DynInstPtr &inst(storeQueue[store_idx].inst);
13299440SAndreas.Sandberg@ARM.com        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
13302329SN/A
13312329SN/A        incrStIdx(store_idx);
13322329SN/A    }
13332329SN/A
13342329SN/A    cprintf("\n");
13352329SN/A}
13369944Smatt.horsnell@ARM.com
13379944Smatt.horsnell@ARM.com#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
1338