lsq_unit_impl.hh revision 11097
16657Snate@binkert.org
26657Snate@binkert.org/*
36657Snate@binkert.org * Copyright (c) 2010-2014 ARM Limited
46657Snate@binkert.org * Copyright (c) 2013 Advanced Micro Devices, Inc.
56657Snate@binkert.org * All rights reserved
66657Snate@binkert.org *
76657Snate@binkert.org * The license below extends only to copyright in the software and shall
86657Snate@binkert.org * not be construed as granting a license to any other intellectual
96657Snate@binkert.org * property including but not limited to intellectual property relating
106657Snate@binkert.org * to a hardware implementation of the functionality of the software
116657Snate@binkert.org * licensed hereunder.  You may use the software subject to the license
126657Snate@binkert.org * terms below provided that you ensure that this notice is replicated
136657Snate@binkert.org * unmodified and in its entirety in all distributions of the software,
146657Snate@binkert.org * modified or unmodified, in source code or in binary form.
156657Snate@binkert.org *
166657Snate@binkert.org * Copyright (c) 2004-2005 The Regents of The University of Michigan
176657Snate@binkert.org * All rights reserved.
186657Snate@binkert.org *
196657Snate@binkert.org * Redistribution and use in source and binary forms, with or without
206657Snate@binkert.org * modification, are permitted provided that the following conditions are
216657Snate@binkert.org * met: redistributions of source code must retain the above copyright
226657Snate@binkert.org * notice, this list of conditions and the following disclaimer;
236657Snate@binkert.org * redistributions in binary form must reproduce the above copyright
246657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
256657Snate@binkert.org * documentation and/or other materials provided with the distribution;
266657Snate@binkert.org * neither the name of the copyright holders nor the names of its
276657Snate@binkert.org * contributors may be used to endorse or promote products derived from
286999Snate@binkert.org * this software without specific prior written permission.
296657Snate@binkert.org *
306657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
316657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
326657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
338189SLisa.Hsu@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
346657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
359499Snilay@cs.wisc.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
369499Snilay@cs.wisc.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
379364Snilay@cs.wisc.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
387055Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
396882SBrad.Beckmann@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
406882SBrad.Beckmann@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
418191SLisa.Hsu@amd.com *
426882SBrad.Beckmann@amd.com * Authors: Kevin Lim
436882SBrad.Beckmann@amd.com *          Korey Sewell
449102SNuwan.Jayasena@amd.com */
459366Snilay@cs.wisc.edu
469499Snilay@cs.wisc.edu#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
479499Snilay@cs.wisc.edu#define __CPU_O3_LSQ_UNIT_IMPL_HH__
489499Snilay@cs.wisc.edu
496882SBrad.Beckmann@amd.com#include "arch/generic/debugfaults.hh"
506657Snate@binkert.org#include "arch/locked_mem.hh"
516657Snate@binkert.org#include "base/str.hh"
526657Snate@binkert.org#include "config/the_isa.hh"
536657Snate@binkert.org#include "cpu/checker/cpu.hh"
546657Snate@binkert.org#include "cpu/o3/lsq.hh"
559366Snilay@cs.wisc.edu#include "cpu/o3/lsq_unit.hh"
567839Snilay@cs.wisc.edu#include "debug/Activity.hh"
576657Snate@binkert.org#include "debug/IEW.hh"
586882SBrad.Beckmann@amd.com#include "debug/LSQUnit.hh"
596882SBrad.Beckmann@amd.com#include "debug/O3PipeView.hh"
606882SBrad.Beckmann@amd.com#include "mem/packet.hh"
616882SBrad.Beckmann@amd.com#include "mem/request.hh"
626882SBrad.Beckmann@amd.com
636882SBrad.Beckmann@amd.comtemplate<class Impl>
646657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
659366Snilay@cs.wisc.edu                                              LSQUnit *lsq_ptr)
669366Snilay@cs.wisc.edu    : Event(Default_Pri, AutoDelete),
676657Snate@binkert.org      inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
686657Snate@binkert.org{
696657Snate@binkert.org}
706657Snate@binkert.org
719104Shestness@cs.utexas.edutemplate<class Impl>
726657Snate@binkert.orgvoid
736657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::process()
746657Snate@binkert.org{
756657Snate@binkert.org    assert(!lsqPtr->cpu->switchedOut());
767839Snilay@cs.wisc.edu
777839Snilay@cs.wisc.edu    lsqPtr->writeback(inst, pkt);
786657Snate@binkert.org
796657Snate@binkert.org    if (pkt->senderState)
806657Snate@binkert.org        delete pkt->senderState;
816657Snate@binkert.org
826657Snate@binkert.org    delete pkt->req;
836657Snate@binkert.org    delete pkt;
846657Snate@binkert.org}
856657Snate@binkert.org
866657Snate@binkert.orgtemplate<class Impl>
876657Snate@binkert.orgconst char *
886657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::description() const
896657Snate@binkert.org{
906657Snate@binkert.org    return "Store writeback";
916657Snate@binkert.org}
926657Snate@binkert.org
936657Snate@binkert.orgtemplate<class Impl>
946657Snate@binkert.orgvoid
956657Snate@binkert.orgLSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
966779SBrad.Beckmann@amd.com{
976657Snate@binkert.org    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
986657Snate@binkert.org    DynInstPtr inst = state->inst;
996657Snate@binkert.org    DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
1006657Snate@binkert.org    DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
1016657Snate@binkert.org
1026657Snate@binkert.org    if (state->cacheBlocked) {
1036657Snate@binkert.org        // This is the first half of a previous split load,
1046657Snate@binkert.org        // where the 2nd half blocked, ignore this response
1056657Snate@binkert.org        DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier "
1069104Shestness@cs.utexas.edu                "blocked split load recieved. Ignoring.\n", inst->seqNum);
1079104Shestness@cs.utexas.edu        delete state;
1089104Shestness@cs.utexas.edu        return;
1099104Shestness@cs.utexas.edu    }
1106657Snate@binkert.org
1116657Snate@binkert.org    // If this is a split access, wait until all packets are received.
1126657Snate@binkert.org    if (TheISA::HasUnalignedMemAcc && !state->complete()) {
1136657Snate@binkert.org        return;
1146657Snate@binkert.org    }
1156657Snate@binkert.org
1166657Snate@binkert.org    assert(!cpu->switchedOut());
1176657Snate@binkert.org    if (!inst->isSquashed()) {
1186657Snate@binkert.org        if (!state->noWB) {
1196657Snate@binkert.org            if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
1206657Snate@binkert.org                !state->isLoad) {
1216657Snate@binkert.org                writeback(inst, pkt);
1226657Snate@binkert.org            } else {
1236657Snate@binkert.org                writeback(inst, state->mainPkt);
1246657Snate@binkert.org            }
1257839Snilay@cs.wisc.edu        }
1267839Snilay@cs.wisc.edu
1277839Snilay@cs.wisc.edu        if (inst->isStore()) {
1287839Snilay@cs.wisc.edu            completeStore(state->idx);
1297839Snilay@cs.wisc.edu        }
1307839Snilay@cs.wisc.edu    }
1317839Snilay@cs.wisc.edu
1327839Snilay@cs.wisc.edu    if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
1337839Snilay@cs.wisc.edu        delete state->mainPkt->req;
1347839Snilay@cs.wisc.edu        delete state->mainPkt;
1357839Snilay@cs.wisc.edu    }
1367839Snilay@cs.wisc.edu
1377839Snilay@cs.wisc.edu    pkt->req->setAccessLatency();
1387839Snilay@cs.wisc.edu    cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
1397839Snilay@cs.wisc.edu
1406657Snate@binkert.org    delete state;
1416657Snate@binkert.org}
1426657Snate@binkert.org
1436657Snate@binkert.orgtemplate <class Impl>
1446657Snate@binkert.orgLSQUnit<Impl>::LSQUnit()
1456657Snate@binkert.org    : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
1466657Snate@binkert.org      isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false)
1476657Snate@binkert.org{
1486657Snate@binkert.org}
1496657Snate@binkert.org
1506657Snate@binkert.orgtemplate<class Impl>
1516657Snate@binkert.orgvoid
1526657Snate@binkert.orgLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
1536657Snate@binkert.org        LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
1546657Snate@binkert.org        unsigned id)
1556657Snate@binkert.org{
1566657Snate@binkert.org    cpu = cpu_ptr;
1576657Snate@binkert.org    iewStage = iew_ptr;
1586657Snate@binkert.org
1596657Snate@binkert.org    lsq = lsq_ptr;
1606657Snate@binkert.org
1616657Snate@binkert.org    lsqID = id;
1626657Snate@binkert.org
1636657Snate@binkert.org    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
1646657Snate@binkert.org
1656657Snate@binkert.org    // Add 1 for the sentinel entry (they are circular queues).
1666657Snate@binkert.org    LQEntries = maxLQEntries + 1;
1676657Snate@binkert.org    SQEntries = maxSQEntries + 1;
1686657Snate@binkert.org
1696657Snate@binkert.org    //Due to uint8_t index in LSQSenderState
1709219Spower.jg@gmail.com    assert(LQEntries <= 256);
1716877Ssteve.reinhardt@amd.com    assert(SQEntries <= 256);
1726657Snate@binkert.org
1739219Spower.jg@gmail.com    loadQueue.resize(LQEntries);
1746657Snate@binkert.org    storeQueue.resize(SQEntries);
1759219Spower.jg@gmail.com
1766657Snate@binkert.org    depCheckShift = params->LSQDepCheckShift;
1776877Ssteve.reinhardt@amd.com    checkLoads = params->LSQCheckLoads;
1786999Snate@binkert.org    cachePorts = params->cachePorts;
1796877Ssteve.reinhardt@amd.com    needsTSO = params->needsTSO;
1806877Ssteve.reinhardt@amd.com
1816877Ssteve.reinhardt@amd.com    resetState();
1826877Ssteve.reinhardt@amd.com}
1836877Ssteve.reinhardt@amd.com
1846877Ssteve.reinhardt@amd.com
1856877Ssteve.reinhardt@amd.comtemplate<class Impl>
1866877Ssteve.reinhardt@amd.comvoid
1876877Ssteve.reinhardt@amd.comLSQUnit<Impl>::resetState()
1886877Ssteve.reinhardt@amd.com{
1899338SAndreas.Sandberg@arm.com    loads = stores = storesToWB = 0;
1906877Ssteve.reinhardt@amd.com
1916877Ssteve.reinhardt@amd.com    loadHead = loadTail = 0;
1926877Ssteve.reinhardt@amd.com
1936877Ssteve.reinhardt@amd.com    storeHead = storeWBIdx = storeTail = 0;
1946877Ssteve.reinhardt@amd.com
1956877Ssteve.reinhardt@amd.com    usedPorts = 0;
1966882SBrad.Beckmann@amd.com
1976882SBrad.Beckmann@amd.com    retryPkt = NULL;
1986882SBrad.Beckmann@amd.com    memDepViolator = NULL;
1996882SBrad.Beckmann@amd.com
2006882SBrad.Beckmann@amd.com    stalled = false;
2016882SBrad.Beckmann@amd.com
2026882SBrad.Beckmann@amd.com    cacheBlockMask = ~(cpu->cacheLineSize() - 1);
2036877Ssteve.reinhardt@amd.com}
2046877Ssteve.reinhardt@amd.com
2056877Ssteve.reinhardt@amd.comtemplate<class Impl>
2066877Ssteve.reinhardt@amd.comstd::string
2076657Snate@binkert.orgLSQUnit<Impl>::name() const
2086657Snate@binkert.org{
2096999Snate@binkert.org    if (Impl::MaxThreads == 1) {
2106657Snate@binkert.org        return iewStage->name() + ".lsq";
2116657Snate@binkert.org    } else {
2126657Snate@binkert.org        return iewStage->name() + ".lsq.thread" + std::to_string(lsqID);
2136657Snate@binkert.org    }
2147007Snate@binkert.org}
2156657Snate@binkert.org
2166657Snate@binkert.orgtemplate<class Impl>
2176657Snate@binkert.orgvoid
2186657Snate@binkert.orgLSQUnit<Impl>::regStats()
2196657Snate@binkert.org{
2207007Snate@binkert.org    lsqForwLoads
2217007Snate@binkert.org        .name(name() + ".forwLoads")
2226657Snate@binkert.org        .desc("Number of loads that had data forwarded from stores");
2237002Snate@binkert.org
2247002Snate@binkert.org    invAddrLoads
2257002Snate@binkert.org        .name(name() + ".invAddrLoads")
2267002Snate@binkert.org        .desc("Number of loads ignored due to an invalid address");
2276657Snate@binkert.org
2286657Snate@binkert.org    lsqSquashedLoads
2298229Snate@binkert.org        .name(name() + ".squashedLoads")
2308229Snate@binkert.org        .desc("Number of loads squashed");
2318229Snate@binkert.org
2328229Snate@binkert.org    lsqIgnoredResponses
2336657Snate@binkert.org        .name(name() + ".ignoredResponses")
2346657Snate@binkert.org        .desc("Number of memory responses ignored because the instruction is squashed");
2356657Snate@binkert.org
2369595Snilay@cs.wisc.edu    lsqMemOrderViolation
2376657Snate@binkert.org        .name(name() + ".memOrderViolation")
2386793SBrad.Beckmann@amd.com        .desc("Number of memory ordering violations");
2396657Snate@binkert.org
2409595Snilay@cs.wisc.edu    lsqSquashedStores
2419595Snilay@cs.wisc.edu        .name(name() + ".squashedStores")
2426657Snate@binkert.org        .desc("Number of stores squashed");
2436657Snate@binkert.org
2446657Snate@binkert.org    invAddrSwpfs
2456657Snate@binkert.org        .name(name() + ".invAddrSwpfs")
2467002Snate@binkert.org        .desc("Number of software prefetches ignored due to an invalid address");
2476657Snate@binkert.org
2487007Snate@binkert.org    lsqBlockedLoads
2497007Snate@binkert.org        .name(name() + ".blockedLoads")
2509271Snilay@cs.wisc.edu        .desc("Number of blocked loads due to partial load-store forwarding");
2516877Ssteve.reinhardt@amd.com
2526877Ssteve.reinhardt@amd.com    lsqRescheduledLoads
2536657Snate@binkert.org        .name(name() + ".rescheduledLoads")
2546877Ssteve.reinhardt@amd.com        .desc("Number of loads that were rescheduled");
2556657Snate@binkert.org
2567002Snate@binkert.org    lsqCacheBlocked
2579745Snilay@cs.wisc.edu        .name(name() + ".cacheBlocked")
2587002Snate@binkert.org        .desc("Number of times an access to memory failed due to the cache being blocked");
2596657Snate@binkert.org}
2606902SBrad.Beckmann@amd.com
2619745Snilay@cs.wisc.edutemplate<class Impl>
2629745Snilay@cs.wisc.eduvoid
2639745Snilay@cs.wisc.eduLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
2648683Snilay@cs.wisc.edu{
2658683Snilay@cs.wisc.edu    dcachePort = dcache_port;
2667007Snate@binkert.org}
2679302Snilay@cs.wisc.edu
2689302Snilay@cs.wisc.edutemplate<class Impl>
2699302Snilay@cs.wisc.eduvoid
2709745Snilay@cs.wisc.eduLSQUnit<Impl>::clearLQ()
2719745Snilay@cs.wisc.edu{
2729745Snilay@cs.wisc.edu    loadQueue.clear();
2739745Snilay@cs.wisc.edu}
2749745Snilay@cs.wisc.edu
2759745Snilay@cs.wisc.edutemplate<class Impl>
2766657Snate@binkert.orgvoid
2776657Snate@binkert.orgLSQUnit<Impl>::clearSQ()
2786657Snate@binkert.org{
2796657Snate@binkert.org    storeQueue.clear();
2806657Snate@binkert.org}
2816657Snate@binkert.org
2826882SBrad.Beckmann@amd.comtemplate<class Impl>
2836882SBrad.Beckmann@amd.comvoid
2846882SBrad.Beckmann@amd.comLSQUnit<Impl>::drainSanityCheck() const
2856882SBrad.Beckmann@amd.com{
2866657Snate@binkert.org    for (int i = 0; i < loadQueue.size(); ++i)
2876657Snate@binkert.org        assert(!loadQueue[i]);
2887007Snate@binkert.org
2897839Snilay@cs.wisc.edu    assert(storesToWB == 0);
2907839Snilay@cs.wisc.edu    assert(!retryPkt);
2917839Snilay@cs.wisc.edu}
2927839Snilay@cs.wisc.edu
2937839Snilay@cs.wisc.edutemplate<class Impl>
2947839Snilay@cs.wisc.eduvoid
2957839Snilay@cs.wisc.eduLSQUnit<Impl>::takeOverFrom()
2967839Snilay@cs.wisc.edu{
2977839Snilay@cs.wisc.edu    resetState();
2987839Snilay@cs.wisc.edu}
2997839Snilay@cs.wisc.edu
3007839Snilay@cs.wisc.edutemplate<class Impl>
3017007Snate@binkert.orgvoid
3027007Snate@binkert.orgLSQUnit<Impl>::resizeLQ(unsigned size)
3037007Snate@binkert.org{
3047007Snate@binkert.org    unsigned size_plus_sentinel = size + 1;
3057007Snate@binkert.org    assert(size_plus_sentinel >= LQEntries);
3067839Snilay@cs.wisc.edu
3077839Snilay@cs.wisc.edu    if (size_plus_sentinel > LQEntries) {
3087839Snilay@cs.wisc.edu        while (size_plus_sentinel > loadQueue.size()) {
3097839Snilay@cs.wisc.edu            DynInstPtr dummy;
3107839Snilay@cs.wisc.edu            loadQueue.push_back(dummy);
3117839Snilay@cs.wisc.edu            LQEntries++;
3127839Snilay@cs.wisc.edu        }
3137839Snilay@cs.wisc.edu    } else {
3147839Snilay@cs.wisc.edu        LQEntries = size_plus_sentinel;
3157839Snilay@cs.wisc.edu    }
3167839Snilay@cs.wisc.edu
3177839Snilay@cs.wisc.edu    assert(LQEntries <= 256);
3187007Snate@binkert.org}
3197007Snate@binkert.org
3209745Snilay@cs.wisc.edutemplate<class Impl>
3219745Snilay@cs.wisc.eduvoid
3229745Snilay@cs.wisc.eduLSQUnit<Impl>::resizeSQ(unsigned size)
3239745Snilay@cs.wisc.edu{
3249745Snilay@cs.wisc.edu    unsigned size_plus_sentinel = size + 1;
3259745Snilay@cs.wisc.edu    if (size_plus_sentinel > SQEntries) {
3266657Snate@binkert.org        while (size_plus_sentinel > storeQueue.size()) {
3277007Snate@binkert.org            SQEntry dummy;
3286657Snate@binkert.org            storeQueue.push_back(dummy);
3296657Snate@binkert.org            SQEntries++;
3306657Snate@binkert.org        }
3316657Snate@binkert.org    } else {
3326657Snate@binkert.org        SQEntries = size_plus_sentinel;
3336657Snate@binkert.org    }
3346657Snate@binkert.org
3356657Snate@binkert.org    assert(SQEntries <= 256);
3369595Snilay@cs.wisc.edu}
3379595Snilay@cs.wisc.edu
3387839Snilay@cs.wisc.edutemplate <class Impl>
3397839Snilay@cs.wisc.eduvoid
3407839Snilay@cs.wisc.eduLSQUnit<Impl>::insert(DynInstPtr &inst)
3417839Snilay@cs.wisc.edu{
3427839Snilay@cs.wisc.edu    assert(inst->isMemRef());
3437839Snilay@cs.wisc.edu
3447839Snilay@cs.wisc.edu    assert(inst->isLoad() || inst->isStore());
3457839Snilay@cs.wisc.edu
3467839Snilay@cs.wisc.edu    if (inst->isLoad()) {
3477839Snilay@cs.wisc.edu        insertLoad(inst);
3487839Snilay@cs.wisc.edu    } else {
3497839Snilay@cs.wisc.edu        insertStore(inst);
3507839Snilay@cs.wisc.edu    }
3517839Snilay@cs.wisc.edu
3527839Snilay@cs.wisc.edu    inst->setInLSQ();
3537839Snilay@cs.wisc.edu}
3546657Snate@binkert.org
3556657Snate@binkert.orgtemplate <class Impl>
3566657Snate@binkert.orgvoid
3576657Snate@binkert.orgLSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
3587839Snilay@cs.wisc.edu{
3597839Snilay@cs.wisc.edu    assert((loadTail + 1) % LQEntries != loadHead);
3607839Snilay@cs.wisc.edu    assert(loads < LQEntries);
3617839Snilay@cs.wisc.edu
3627839Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
3637839Snilay@cs.wisc.edu            load_inst->pcState(), loadTail, load_inst->seqNum);
3647839Snilay@cs.wisc.edu
3657839Snilay@cs.wisc.edu    load_inst->lqIdx = loadTail;
3667839Snilay@cs.wisc.edu
3677839Snilay@cs.wisc.edu    if (stores == 0) {
3687839Snilay@cs.wisc.edu        load_inst->sqIdx = -1;
3697839Snilay@cs.wisc.edu    } else {
3707839Snilay@cs.wisc.edu        load_inst->sqIdx = storeTail;
3717839Snilay@cs.wisc.edu    }
3727839Snilay@cs.wisc.edu
3737839Snilay@cs.wisc.edu    loadQueue[loadTail] = load_inst;
3746657Snate@binkert.org
3756657Snate@binkert.org    incrLdIdx(loadTail);
3766657Snate@binkert.org
3776657Snate@binkert.org    ++loads;
3787007Snate@binkert.org}
3796657Snate@binkert.org
3806657Snate@binkert.orgtemplate <class Impl>
3819273Snilay@cs.wisc.eduvoid
3826657Snate@binkert.orgLSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
3836657Snate@binkert.org{
3846657Snate@binkert.org    // Make sure it is not full before inserting an instruction.
3856657Snate@binkert.org    assert((storeTail + 1) % SQEntries != storeHead);
3867007Snate@binkert.org    assert(stores < SQEntries);
3876657Snate@binkert.org
3886657Snate@binkert.org    DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
3899219Spower.jg@gmail.com            store_inst->pcState(), storeTail, store_inst->seqNum);
3906657Snate@binkert.org
3916657Snate@binkert.org    store_inst->sqIdx = storeTail;
3926999Snate@binkert.org    store_inst->lqIdx = loadTail;
3936657Snate@binkert.org
3946657Snate@binkert.org    storeQueue[storeTail] = SQEntry(store_inst);
3959595Snilay@cs.wisc.edu
3966657Snate@binkert.org    incrStIdx(storeTail);
3976657Snate@binkert.org
3987007Snate@binkert.org    ++stores;
3996657Snate@binkert.org}
4006657Snate@binkert.org
4016657Snate@binkert.orgtemplate <class Impl>
4026657Snate@binkert.orgtypename Impl::DynInstPtr
4036657Snate@binkert.orgLSQUnit<Impl>::getMemDepViolator()
4048946Sandreas.hansson@arm.com{
4058946Sandreas.hansson@arm.com    DynInstPtr temp = memDepViolator;
4068946Sandreas.hansson@arm.com
4077832Snate@binkert.org    memDepViolator = NULL;
4087002Snate@binkert.org
4097002Snate@binkert.org    return temp;
4107002Snate@binkert.org}
4118641Snate@binkert.org
4127056Snate@binkert.orgtemplate <class Impl>
4138232Snate@binkert.orgunsigned
4148232Snate@binkert.orgLSQUnit<Impl>::numFreeLoadEntries()
4156657Snate@binkert.org{
4168229Snate@binkert.org        //LQ has an extra dummy entry to differentiate
4176657Snate@binkert.org        //empty/full conditions. Subtract 1 from the free entries.
4186657Snate@binkert.org        DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
4197056Snate@binkert.org        return LQEntries - loads - 1;
4206657Snate@binkert.org}
4219219Spower.jg@gmail.com
4229219Spower.jg@gmail.comtemplate <class Impl>
4239219Spower.jg@gmail.comunsigned
4249219Spower.jg@gmail.comLSQUnit<Impl>::numFreeStoreEntries()
4259219Spower.jg@gmail.com{
4267002Snate@binkert.org        //SQ has an extra dummy entry to differentiate
4277002Snate@binkert.org        //empty/full conditions. Subtract 1 from the free entries.
4286657Snate@binkert.org        DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
4296657Snate@binkert.org        return SQEntries - stores - 1;
4306657Snate@binkert.org
4316657Snate@binkert.org }
4326657Snate@binkert.org
4336793SBrad.Beckmann@amd.comtemplate <class Impl>
4346657Snate@binkert.orgvoid
4356657Snate@binkert.orgLSQUnit<Impl>::checkSnoop(PacketPtr pkt)
4366657Snate@binkert.org{
4376657Snate@binkert.org    int load_idx = loadHead;
4386877Ssteve.reinhardt@amd.com    DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
4396877Ssteve.reinhardt@amd.com
4406877Ssteve.reinhardt@amd.com    // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
4416877Ssteve.reinhardt@amd.com    // address. The CPU can speculatively execute a LL operation after a pending
4426877Ssteve.reinhardt@amd.com    // SC operation in the pipeline and that can make the cache monitor the CPU
4436877Ssteve.reinhardt@amd.com    // is connected to valid while it really shouldn't be.
4446657Snate@binkert.org    for (int x = 0; x < cpu->numContexts(); x++) {
4459745Snilay@cs.wisc.edu        ThreadContext *tc = cpu->getContext(x);
4469745Snilay@cs.wisc.edu        bool no_squash = cpu->thread[x]->noSquashFromTC;
4476657Snate@binkert.org        cpu->thread[x]->noSquashFromTC = true;
4487007Snate@binkert.org        TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
4496657Snate@binkert.org        cpu->thread[x]->noSquashFromTC = no_squash;
4509801Snilay@cs.wisc.edu    }
4519801Snilay@cs.wisc.edu
4526657Snate@binkert.org    Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
4539801Snilay@cs.wisc.edu
4549801Snilay@cs.wisc.edu    DynInstPtr ld_inst = loadQueue[load_idx];
4559801Snilay@cs.wisc.edu    if (ld_inst) {
4567007Snate@binkert.org        Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
4576657Snate@binkert.org        Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
4586877Ssteve.reinhardt@amd.com
4596877Ssteve.reinhardt@amd.com        // Check that this snoop didn't just invalidate our lock flag
4606657Snate@binkert.org        if (ld_inst->effAddrValid() && (load_addr_low == invalidate_addr
4618532SLisa.Hsu@amd.com                                        || load_addr_high == invalidate_addr)
4626657Snate@binkert.org            && ld_inst->memReqFlags & Request::LLSC)
4639996Snilay@cs.wisc.edu            TheISA::handleLockedSnoopHit(ld_inst.get());
4649996Snilay@cs.wisc.edu    }
4656657Snate@binkert.org
4666882SBrad.Beckmann@amd.com    // If this is the only load in the LSQ we don't care
4676882SBrad.Beckmann@amd.com    if (load_idx == loadTail)
4686882SBrad.Beckmann@amd.com        return;
4696882SBrad.Beckmann@amd.com
4706882SBrad.Beckmann@amd.com    incrLdIdx(load_idx);
4716882SBrad.Beckmann@amd.com
4726882SBrad.Beckmann@amd.com    bool force_squash = false;
4738189SLisa.Hsu@amd.com
4748189SLisa.Hsu@amd.com    while (load_idx != loadTail) {
4756877Ssteve.reinhardt@amd.com        DynInstPtr ld_inst = loadQueue[load_idx];
4768189SLisa.Hsu@amd.com
4778189SLisa.Hsu@amd.com        if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
4788189SLisa.Hsu@amd.com            incrLdIdx(load_idx);
4798189SLisa.Hsu@amd.com            continue;
4806882SBrad.Beckmann@amd.com        }
4816882SBrad.Beckmann@amd.com
4826882SBrad.Beckmann@amd.com        Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
4836882SBrad.Beckmann@amd.com        Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
4846882SBrad.Beckmann@amd.com
4856882SBrad.Beckmann@amd.com        DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
4866882SBrad.Beckmann@amd.com                    ld_inst->seqNum, load_addr_low, invalidate_addr);
4876882SBrad.Beckmann@amd.com
4886882SBrad.Beckmann@amd.com        if ((load_addr_low == invalidate_addr
4899597Snilay@cs.wisc.edu             || load_addr_high == invalidate_addr) || force_squash) {
4909597Snilay@cs.wisc.edu            if (needsTSO) {
4918938SLisa.Hsu@amd.com                // If we have a TSO system, as all loads must be ordered with
4928938SLisa.Hsu@amd.com                // all other loads, this load as well as *all* subsequent loads
4938938SLisa.Hsu@amd.com                // need to be squashed to prevent possible load reordering.
4946888SBrad.Beckmann@amd.com                force_squash = true;
4956888SBrad.Beckmann@amd.com            }
4966888SBrad.Beckmann@amd.com            if (ld_inst->possibleLoadViolation() || force_squash) {
4976888SBrad.Beckmann@amd.com                DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
4986888SBrad.Beckmann@amd.com                        pkt->getAddr(), ld_inst->seqNum);
4998189SLisa.Hsu@amd.com
5006888SBrad.Beckmann@amd.com                // Mark the load for re-execution
5016888SBrad.Beckmann@amd.com                ld_inst->fault = std::make_shared<ReExec>();
5026657Snate@binkert.org            } else {
5036888SBrad.Beckmann@amd.com                DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
5046888SBrad.Beckmann@amd.com                        pkt->getAddr(), ld_inst->seqNum);
5056888SBrad.Beckmann@amd.com
5066888SBrad.Beckmann@amd.com                // Make sure that we don't lose a snoop hitting a LOCKED
5076657Snate@binkert.org                // address since the LOCK* flags don't get updated until
5086657Snate@binkert.org                // commit.
5096657Snate@binkert.org                if (ld_inst->memReqFlags & Request::LLSC)
5109508Snilay@cs.wisc.edu                    TheISA::handleLockedSnoopHit(ld_inst.get());
5119508Snilay@cs.wisc.edu
5129508Snilay@cs.wisc.edu                // If a older load checks this and it's true
5139508Snilay@cs.wisc.edu                // then we might have missed the snoop
5149595Snilay@cs.wisc.edu                // in which case we need to invalidate to be sure
5159595Snilay@cs.wisc.edu                ld_inst->hitExternalSnoop(true);
5169595Snilay@cs.wisc.edu            }
5179595Snilay@cs.wisc.edu        }
5189595Snilay@cs.wisc.edu        incrLdIdx(load_idx);
5199595Snilay@cs.wisc.edu    }
5209595Snilay@cs.wisc.edu    return;
5219595Snilay@cs.wisc.edu}
5229595Snilay@cs.wisc.edu
5236657Snate@binkert.orgtemplate <class Impl>
5249595Snilay@cs.wisc.eduFault
5259595Snilay@cs.wisc.eduLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
5269595Snilay@cs.wisc.edu{
5279745Snilay@cs.wisc.edu    Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
5289745Snilay@cs.wisc.edu    Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
5299745Snilay@cs.wisc.edu
5309745Snilay@cs.wisc.edu    /** @todo in theory you only need to check an instruction that has executed
5319745Snilay@cs.wisc.edu     * however, there isn't a good way in the pipeline at the moment to check
5329745Snilay@cs.wisc.edu     * all instructions that will execute before the store writes back. Thus,
5339745Snilay@cs.wisc.edu     * like the implementation that came before it, we're overly conservative.
5349745Snilay@cs.wisc.edu     */
5359745Snilay@cs.wisc.edu    while (load_idx != loadTail) {
5369745Snilay@cs.wisc.edu        DynInstPtr ld_inst = loadQueue[load_idx];
5379595Snilay@cs.wisc.edu        if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
5386657Snate@binkert.org            incrLdIdx(load_idx);
5396657Snate@binkert.org            continue;
5406657Snate@binkert.org        }
5416657Snate@binkert.org
5427007Snate@binkert.org        Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
5437007Snate@binkert.org        Addr ld_eff_addr2 =
5446657Snate@binkert.org            (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
5459745Snilay@cs.wisc.edu
54610008Snilay@cs.wisc.edu        if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
5477007Snate@binkert.org            if (inst->isLoad()) {
5486657Snate@binkert.org                // If this load is to the same block as an external snoop
5496657Snate@binkert.org                // invalidate that we've observed then the load needs to be
5506657Snate@binkert.org                // squashed as it could have newer data
5517007Snate@binkert.org                if (ld_inst->hitExternalSnoop()) {
5527007Snate@binkert.org                    if (!memDepViolator ||
5536657Snate@binkert.org                            ld_inst->seqNum < memDepViolator->seqNum) {
5546657Snate@binkert.org                        DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
5556657Snate@binkert.org                                "and [sn:%lli] at address %#x\n",
5566657Snate@binkert.org                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5576657Snate@binkert.org                        memDepViolator = ld_inst;
5586657Snate@binkert.org
5596657Snate@binkert.org                        ++lsqMemOrderViolation;
5606657Snate@binkert.org
5616657Snate@binkert.org                        return std::make_shared<GenericISA::M5PanicFault>(
5626657Snate@binkert.org                            "Detected fault with inst [sn:%lli] and "
5636657Snate@binkert.org                            "[sn:%lli] at address %#x\n",
5646657Snate@binkert.org                            inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5656657Snate@binkert.org                    }
5666657Snate@binkert.org                }
5679595Snilay@cs.wisc.edu
5689273Snilay@cs.wisc.edu                // Otherwise, mark the load has a possible load violation
5696657Snate@binkert.org                // and if we see a snoop before it's commited, we need to squash
5706657Snate@binkert.org                ld_inst->possibleLoadViolation(true);
5716657Snate@binkert.org                DPRINTF(LSQUnit, "Found possible load violation at addr: %#x"
5729364Snilay@cs.wisc.edu                        " between instructions [sn:%lli] and [sn:%lli]\n",
5737007Snate@binkert.org                        inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
5746657Snate@binkert.org            } else {
5756657Snate@binkert.org                // A load/store incorrectly passed this store.
5766657Snate@binkert.org                // Check if we already have a violator, or if it's newer
5776657Snate@binkert.org                // squash and refetch.
5787007Snate@binkert.org                if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
5796657Snate@binkert.org                    break;
5807007Snate@binkert.org
5817007Snate@binkert.org                DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
5826657Snate@binkert.org                        "[sn:%lli] at address %#x\n",
5836657Snate@binkert.org                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5849508Snilay@cs.wisc.edu                memDepViolator = ld_inst;
5856657Snate@binkert.org
5866657Snate@binkert.org                ++lsqMemOrderViolation;
5876657Snate@binkert.org
5886657Snate@binkert.org                return std::make_shared<GenericISA::M5PanicFault>(
5896657Snate@binkert.org                    "Detected fault with "
5906657Snate@binkert.org                    "inst [sn:%lli] and [sn:%lli] at address %#x\n",
5916657Snate@binkert.org                    inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
5926657Snate@binkert.org            }
5936657Snate@binkert.org        }
5949508Snilay@cs.wisc.edu
5956657Snate@binkert.org        incrLdIdx(load_idx);
5967566SBrad.Beckmann@amd.com    }
5979508Snilay@cs.wisc.edu    return NoFault;
5989508Snilay@cs.wisc.edu}
5999508Snilay@cs.wisc.edu
6009508Snilay@cs.wisc.edu
6019508Snilay@cs.wisc.edu
6029508Snilay@cs.wisc.edu
6039604Snilay@cs.wisc.edutemplate <class Impl>
6049604Snilay@cs.wisc.eduFault
6059604Snilay@cs.wisc.eduLSQUnit<Impl>::executeLoad(DynInstPtr &inst)
6069508Snilay@cs.wisc.edu{
6076657Snate@binkert.org    using namespace TheISA;
6086657Snate@binkert.org    // Execute a specific load.
6096657Snate@binkert.org    Fault load_fault = NoFault;
6106657Snate@binkert.org
6116657Snate@binkert.org    DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
6129595Snilay@cs.wisc.edu            inst->pcState(), inst->seqNum);
6139595Snilay@cs.wisc.edu
6149595Snilay@cs.wisc.edu    assert(!inst->isSquashed());
6159595Snilay@cs.wisc.edu
6169595Snilay@cs.wisc.edu    load_fault = inst->initiateAcc();
6179595Snilay@cs.wisc.edu
6188308Stushar@csail.mit.edu    if (inst->isTranslationDelayed() &&
6199595Snilay@cs.wisc.edu        load_fault == NoFault)
6206657Snate@binkert.org        return load_fault;
6216657Snate@binkert.org
6229595Snilay@cs.wisc.edu    // If the instruction faulted or predicated false, then we need to send it
6239595Snilay@cs.wisc.edu    // along to commit without the instruction completing.
6249595Snilay@cs.wisc.edu    if (load_fault != NoFault || !inst->readPredicate()) {
6259595Snilay@cs.wisc.edu        // Send this instruction to commit, also make sure iew stage
6269595Snilay@cs.wisc.edu        // realizes there is activity.  Mark it as executed unless it
6279508Snilay@cs.wisc.edu        // is a strictly ordered load that needs to hit the head of
6286657Snate@binkert.org        // commit.
6296657Snate@binkert.org        if (!inst->readPredicate())
6306657Snate@binkert.org            inst->forwardOldRegs();
6316657Snate@binkert.org        DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
6326657Snate@binkert.org                inst->seqNum,
6336657Snate@binkert.org                (load_fault != NoFault ? "fault" : "predication"));
6346657Snate@binkert.org        if (!(inst->hasRequest() && inst->strictlyOrdered()) ||
6356657Snate@binkert.org            inst->isAtCommit()) {
6368187SLisa.Hsu@amd.com            inst->setExecuted();
6376657Snate@binkert.org        }
6386657Snate@binkert.org        iewStage->instToCommit(inst);
6396657Snate@binkert.org        iewStage->activityThisCycle();
6406657Snate@binkert.org    } else {
6416657Snate@binkert.org        assert(inst->effAddrValid());
6426657Snate@binkert.org        int load_idx = inst->lqIdx;
6436657Snate@binkert.org        incrLdIdx(load_idx);
6446657Snate@binkert.org
6456657Snate@binkert.org        if (checkLoads)
6467454Snate@binkert.org            return checkViolations(load_idx, inst);
6476657Snate@binkert.org    }
6486657Snate@binkert.org
6496657Snate@binkert.org    return load_fault;
6506657Snate@binkert.org}
6517007Snate@binkert.org
6527056Snate@binkert.orgtemplate <class Impl>
6537007Snate@binkert.orgFault
6547007Snate@binkert.orgLSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
6556657Snate@binkert.org{
6567566SBrad.Beckmann@amd.com    using namespace TheISA;
6577566SBrad.Beckmann@amd.com    // Make sure that a store exists.
6589499Snilay@cs.wisc.edu    assert(stores != 0);
6599499Snilay@cs.wisc.edu
6607566SBrad.Beckmann@amd.com    int store_idx = store_inst->sqIdx;
6617566SBrad.Beckmann@amd.com
6627566SBrad.Beckmann@amd.com    DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
6639366Snilay@cs.wisc.edu            store_inst->pcState(), store_inst->seqNum);
6649366Snilay@cs.wisc.edu
6659366Snilay@cs.wisc.edu    assert(!store_inst->isSquashed());
6669366Snilay@cs.wisc.edu
6677566SBrad.Beckmann@amd.com    // Check the recently completed loads to see if any match this store's
6687672Snate@binkert.org    // address.  If so, then we have a memory ordering violation.
6696657Snate@binkert.org    int load_idx = store_inst->lqIdx;
6709465Snilay@cs.wisc.edu
6716657Snate@binkert.org    Fault store_fault = store_inst->initiateAcc();
6729465Snilay@cs.wisc.edu
6737056Snate@binkert.org    if (store_inst->isTranslationDelayed() &&
6746657Snate@binkert.org        store_fault == NoFault)
6756657Snate@binkert.org        return store_fault;
6767672Snate@binkert.org
6776657Snate@binkert.org    if (!store_inst->readPredicate())
6786657Snate@binkert.org        store_inst->forwardOldRegs();
6796657Snate@binkert.org
6806657Snate@binkert.org    if (storeQueue[store_idx].size == 0) {
6816657Snate@binkert.org        DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
6826657Snate@binkert.org                store_inst->pcState(), store_inst->seqNum);
6836657Snate@binkert.org
6846657Snate@binkert.org        return store_fault;
6856657Snate@binkert.org    } else if (!store_inst->readPredicate()) {
6866657Snate@binkert.org        DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
6876657Snate@binkert.org                store_inst->seqNum);
6889745Snilay@cs.wisc.edu        return store_fault;
6896657Snate@binkert.org    }
6906657Snate@binkert.org
6919496Snilay@cs.wisc.edu    assert(store_fault == NoFault);
6929496Snilay@cs.wisc.edu
6939496Snilay@cs.wisc.edu    if (store_inst->isStoreConditional()) {
6949496Snilay@cs.wisc.edu        // Store conditionals need to set themselves as able to
6959496Snilay@cs.wisc.edu        // writeback if we haven't had a fault by here.
6966657Snate@binkert.org        storeQueue[store_idx].canWB = true;
6976657Snate@binkert.org
6986657Snate@binkert.org        ++storesToWB;
6996657Snate@binkert.org    }
7006657Snate@binkert.org
7016657Snate@binkert.org    return checkViolations(load_idx, store_inst);
7026657Snate@binkert.org
7036657Snate@binkert.org}
7046657Snate@binkert.org
7056657Snate@binkert.orgtemplate <class Impl>
7066657Snate@binkert.orgvoid
7078683Snilay@cs.wisc.eduLSQUnit<Impl>::commitLoad()
7088683Snilay@cs.wisc.edu{
7098683Snilay@cs.wisc.edu    assert(loadQueue[loadHead]);
7108683Snilay@cs.wisc.edu
7118683Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
7128683Snilay@cs.wisc.edu            loadQueue[loadHead]->pcState());
7136657Snate@binkert.org
7149745Snilay@cs.wisc.edu    loadQueue[loadHead] = NULL;
7159745Snilay@cs.wisc.edu
7169745Snilay@cs.wisc.edu    incrLdIdx(loadHead);
7179745Snilay@cs.wisc.edu
7189745Snilay@cs.wisc.edu    --loads;
7199745Snilay@cs.wisc.edu}
7209745Snilay@cs.wisc.edu
7219745Snilay@cs.wisc.edutemplate <class Impl>
7229745Snilay@cs.wisc.eduvoid
7239745Snilay@cs.wisc.eduLSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
7249745Snilay@cs.wisc.edu{
7259745Snilay@cs.wisc.edu    assert(loads == 0 || loadQueue[loadHead]);
7269745Snilay@cs.wisc.edu
7279745Snilay@cs.wisc.edu    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
7289745Snilay@cs.wisc.edu        commitLoad();
7299745Snilay@cs.wisc.edu    }
7309745Snilay@cs.wisc.edu}
7319745Snilay@cs.wisc.edu
7329745Snilay@cs.wisc.edutemplate <class Impl>
7339745Snilay@cs.wisc.eduvoid
7349745Snilay@cs.wisc.eduLSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
7359745Snilay@cs.wisc.edu{
7369745Snilay@cs.wisc.edu    assert(stores == 0 || storeQueue[storeHead].inst);
7379745Snilay@cs.wisc.edu
7389745Snilay@cs.wisc.edu    int store_idx = storeHead;
7399745Snilay@cs.wisc.edu
7409745Snilay@cs.wisc.edu    while (store_idx != storeTail) {
7419745Snilay@cs.wisc.edu        assert(storeQueue[store_idx].inst);
7429745Snilay@cs.wisc.edu        // Mark any stores that are now committed and have not yet
7439745Snilay@cs.wisc.edu        // been marked as able to write back.
7449745Snilay@cs.wisc.edu        if (!storeQueue[store_idx].canWB) {
7459745Snilay@cs.wisc.edu            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
7469745Snilay@cs.wisc.edu                break;
7479745Snilay@cs.wisc.edu            }
7489745Snilay@cs.wisc.edu            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
7499745Snilay@cs.wisc.edu                    "%s [sn:%lli]\n",
7509745Snilay@cs.wisc.edu                    storeQueue[store_idx].inst->pcState(),
7519745Snilay@cs.wisc.edu                    storeQueue[store_idx].inst->seqNum);
7529745Snilay@cs.wisc.edu
7539745Snilay@cs.wisc.edu            storeQueue[store_idx].canWB = true;
7549745Snilay@cs.wisc.edu
7559745Snilay@cs.wisc.edu            ++storesToWB;
7569745Snilay@cs.wisc.edu        }
7579745Snilay@cs.wisc.edu
7589745Snilay@cs.wisc.edu        incrStIdx(store_idx);
7599745Snilay@cs.wisc.edu    }
7609745Snilay@cs.wisc.edu}
7619745Snilay@cs.wisc.edu
7629745Snilay@cs.wisc.edutemplate <class Impl>
7639745Snilay@cs.wisc.eduvoid
7649745Snilay@cs.wisc.eduLSQUnit<Impl>::writebackPendingStore()
7659745Snilay@cs.wisc.edu{
7669745Snilay@cs.wisc.edu    if (hasPendingPkt) {
7679745Snilay@cs.wisc.edu        assert(pendingPkt != NULL);
7689745Snilay@cs.wisc.edu
7699745Snilay@cs.wisc.edu        // If the cache is blocked, this will store the packet for retry.
7709745Snilay@cs.wisc.edu        if (sendStore(pendingPkt)) {
7719745Snilay@cs.wisc.edu            storePostSend(pendingPkt);
7729745Snilay@cs.wisc.edu        }
7739745Snilay@cs.wisc.edu        pendingPkt = NULL;
7749745Snilay@cs.wisc.edu        hasPendingPkt = false;
7759745Snilay@cs.wisc.edu    }
7769745Snilay@cs.wisc.edu}
7779745Snilay@cs.wisc.edu
7789745Snilay@cs.wisc.edutemplate <class Impl>
7799745Snilay@cs.wisc.eduvoid
7809745Snilay@cs.wisc.eduLSQUnit<Impl>::writebackStores()
7819745Snilay@cs.wisc.edu{
7829745Snilay@cs.wisc.edu    // First writeback the second packet from any split store that didn't
7839745Snilay@cs.wisc.edu    // complete last cycle because there weren't enough cache ports available.
7849745Snilay@cs.wisc.edu    if (TheISA::HasUnalignedMemAcc) {
7859745Snilay@cs.wisc.edu        writebackPendingStore();
7869745Snilay@cs.wisc.edu    }
7879745Snilay@cs.wisc.edu
7889745Snilay@cs.wisc.edu    while (storesToWB > 0 &&
7899745Snilay@cs.wisc.edu           storeWBIdx != storeTail &&
7909745Snilay@cs.wisc.edu           storeQueue[storeWBIdx].inst &&
7919745Snilay@cs.wisc.edu           storeQueue[storeWBIdx].canWB &&
7929745Snilay@cs.wisc.edu           ((!needsTSO) || (!storeInFlight)) &&
7939745Snilay@cs.wisc.edu           usedPorts < cachePorts) {
7949745Snilay@cs.wisc.edu
7959745Snilay@cs.wisc.edu        if (isStoreBlocked) {
7969745Snilay@cs.wisc.edu            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
7979745Snilay@cs.wisc.edu                    " is blocked!\n");
7989745Snilay@cs.wisc.edu            break;
7999745Snilay@cs.wisc.edu        }
8009745Snilay@cs.wisc.edu
8019745Snilay@cs.wisc.edu        // Store didn't write any data so no need to write it back to
8029745Snilay@cs.wisc.edu        // memory.
8039745Snilay@cs.wisc.edu        if (storeQueue[storeWBIdx].size == 0) {
8049745Snilay@cs.wisc.edu            completeStore(storeWBIdx);
8059745Snilay@cs.wisc.edu
8069745Snilay@cs.wisc.edu            incrStIdx(storeWBIdx);
8079745Snilay@cs.wisc.edu
8089745Snilay@cs.wisc.edu            continue;
8099745Snilay@cs.wisc.edu        }
8109745Snilay@cs.wisc.edu
8119745Snilay@cs.wisc.edu        ++usedPorts;
8129745Snilay@cs.wisc.edu
8139745Snilay@cs.wisc.edu        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
8149745Snilay@cs.wisc.edu            incrStIdx(storeWBIdx);
8157007Snate@binkert.org
8167007Snate@binkert.org            continue;
8177007Snate@binkert.org        }
8186657Snate@binkert.org
8196657Snate@binkert.org        assert(storeQueue[storeWBIdx].req);
8206657Snate@binkert.org        assert(!storeQueue[storeWBIdx].committed);
8217007Snate@binkert.org
8227007Snate@binkert.org        if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
8237007Snate@binkert.org            assert(storeQueue[storeWBIdx].sreqLow);
8246657Snate@binkert.org            assert(storeQueue[storeWBIdx].sreqHigh);
8256657Snate@binkert.org        }
8266657Snate@binkert.org
8278683Snilay@cs.wisc.edu        DynInstPtr inst = storeQueue[storeWBIdx].inst;
8288683Snilay@cs.wisc.edu
8298683Snilay@cs.wisc.edu        Request *req = storeQueue[storeWBIdx].req;
8308683Snilay@cs.wisc.edu        RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
8318683Snilay@cs.wisc.edu        RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
8328683Snilay@cs.wisc.edu
8337007Snate@binkert.org        storeQueue[storeWBIdx].committed = true;
8347007Snate@binkert.org
8357007Snate@binkert.org        assert(!inst->memData);
8366657Snate@binkert.org        inst->memData = new uint8_t[req->getSize()];
8376657Snate@binkert.org
8386657Snate@binkert.org        if (storeQueue[storeWBIdx].isAllZeros)
8397007Snate@binkert.org            memset(inst->memData, 0, req->getSize());
8407007Snate@binkert.org        else
8417007Snate@binkert.org            memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
8427007Snate@binkert.org
8437007Snate@binkert.org        PacketPtr data_pkt;
8446657Snate@binkert.org        PacketPtr snd_data_pkt = NULL;
8459745Snilay@cs.wisc.edu
8469745Snilay@cs.wisc.edu        LSQSenderState *state = new LSQSenderState;
8479745Snilay@cs.wisc.edu        state->isLoad = false;
8489745Snilay@cs.wisc.edu        state->idx = storeWBIdx;
8499745Snilay@cs.wisc.edu        state->inst = inst;
8509745Snilay@cs.wisc.edu
8519745Snilay@cs.wisc.edu        if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
8526902SBrad.Beckmann@amd.com
8539745Snilay@cs.wisc.edu            // Build a single data packet if the store isn't split.
8549745Snilay@cs.wisc.edu            data_pkt = Packet::createWrite(req);
8559745Snilay@cs.wisc.edu            data_pkt->dataStatic(inst->memData);
8569745Snilay@cs.wisc.edu            data_pkt->senderState = state;
8579496Snilay@cs.wisc.edu        } else {
8586902SBrad.Beckmann@amd.com            // Create two packets if the store is split in two.
8597839Snilay@cs.wisc.edu            data_pkt = Packet::createWrite(sreqLow);
8607839Snilay@cs.wisc.edu            snd_data_pkt = Packet::createWrite(sreqHigh);
8617839Snilay@cs.wisc.edu
8627839Snilay@cs.wisc.edu            data_pkt->dataStatic(inst->memData);
8637839Snilay@cs.wisc.edu            snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
8647839Snilay@cs.wisc.edu
8657839Snilay@cs.wisc.edu            data_pkt->senderState = state;
8667839Snilay@cs.wisc.edu            snd_data_pkt->senderState = state;
8677839Snilay@cs.wisc.edu
8687839Snilay@cs.wisc.edu            state->isSplit = true;
8697839Snilay@cs.wisc.edu            state->outstanding = 2;
8707839Snilay@cs.wisc.edu
8717839Snilay@cs.wisc.edu            // Can delete the main request now.
8727839Snilay@cs.wisc.edu            delete req;
8737839Snilay@cs.wisc.edu            req = sreqLow;
8747839Snilay@cs.wisc.edu        }
8757839Snilay@cs.wisc.edu
8767839Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
8777839Snilay@cs.wisc.edu                "to Addr:%#x, data:%#x [sn:%lli]\n",
8787839Snilay@cs.wisc.edu                storeWBIdx, inst->pcState(),
8797839Snilay@cs.wisc.edu                req->getPaddr(), (int)*(inst->memData),
8807839Snilay@cs.wisc.edu                inst->seqNum);
8817839Snilay@cs.wisc.edu
8827839Snilay@cs.wisc.edu        // @todo: Remove this SC hack once the memory system handles it.
8837839Snilay@cs.wisc.edu        if (inst->isStoreConditional()) {
8847839Snilay@cs.wisc.edu            assert(!storeQueue[storeWBIdx].isSplit);
8857839Snilay@cs.wisc.edu            // Disable recording the result temporarily.  Writing to
8867839Snilay@cs.wisc.edu            // misc regs normally updates the result, but this is not
8877839Snilay@cs.wisc.edu            // the desired behavior when handling store conditionals.
8887839Snilay@cs.wisc.edu            inst->recordResult(false);
8897839Snilay@cs.wisc.edu            bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
8907839Snilay@cs.wisc.edu            inst->recordResult(true);
8917839Snilay@cs.wisc.edu
8927839Snilay@cs.wisc.edu            if (!success) {
8937839Snilay@cs.wisc.edu                // Instantly complete this store.
8947839Snilay@cs.wisc.edu                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
8957839Snilay@cs.wisc.edu                        "Instantly completing it.\n",
8966902SBrad.Beckmann@amd.com                        inst->seqNum);
8978683Snilay@cs.wisc.edu                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
8988683Snilay@cs.wisc.edu                cpu->schedule(wb, curTick() + 1);
8998683Snilay@cs.wisc.edu                if (cpu->checker) {
9008683Snilay@cs.wisc.edu                    // Make sure to set the LLSC data for verification
9018683Snilay@cs.wisc.edu                    // if checker is loaded
9028683Snilay@cs.wisc.edu                    inst->reqToVerify->setExtraData(0);
9038683Snilay@cs.wisc.edu                    inst->completeAcc(data_pkt);
9048683Snilay@cs.wisc.edu                }
9058683Snilay@cs.wisc.edu                completeStore(storeWBIdx);
9068683Snilay@cs.wisc.edu                incrStIdx(storeWBIdx);
9078683Snilay@cs.wisc.edu                continue;
9088683Snilay@cs.wisc.edu            }
9098683Snilay@cs.wisc.edu        } else {
9108683Snilay@cs.wisc.edu            // Non-store conditionals do not need a writeback.
9118683Snilay@cs.wisc.edu            state->noWB = true;
9128683Snilay@cs.wisc.edu        }
9138683Snilay@cs.wisc.edu
9146657Snate@binkert.org        bool split =
9156657Snate@binkert.org            TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
9167839Snilay@cs.wisc.edu
9177839Snilay@cs.wisc.edu        ThreadContext *thread = cpu->tcBase(lsqID);
9187839Snilay@cs.wisc.edu
9197839Snilay@cs.wisc.edu        if (req->isMmappedIpr()) {
9206657Snate@binkert.org            assert(!inst->isStoreConditional());
9217839Snilay@cs.wisc.edu            TheISA::handleIprWrite(thread, data_pkt);
9227839Snilay@cs.wisc.edu            delete data_pkt;
9237839Snilay@cs.wisc.edu            if (split) {
9247839Snilay@cs.wisc.edu                assert(snd_data_pkt->req->isMmappedIpr());
9257839Snilay@cs.wisc.edu                TheISA::handleIprWrite(thread, snd_data_pkt);
9268055Sksewell@umich.edu                delete snd_data_pkt;
9277839Snilay@cs.wisc.edu                delete sreqLow;
9287839Snilay@cs.wisc.edu                delete sreqHigh;
9296657Snate@binkert.org            }
9307839Snilay@cs.wisc.edu            delete state;
9317839Snilay@cs.wisc.edu            delete req;
9327839Snilay@cs.wisc.edu            completeStore(storeWBIdx);
9337839Snilay@cs.wisc.edu            incrStIdx(storeWBIdx);
9347839Snilay@cs.wisc.edu        } else if (!sendStore(data_pkt)) {
9357839Snilay@cs.wisc.edu            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
9367839Snilay@cs.wisc.edu                    "retry later\n",
9377839Snilay@cs.wisc.edu                    inst->seqNum);
9387839Snilay@cs.wisc.edu
9397839Snilay@cs.wisc.edu            // Need to store the second packet, if split.
9407839Snilay@cs.wisc.edu            if (split) {
9418055Sksewell@umich.edu                state->pktToSend = true;
9427839Snilay@cs.wisc.edu                state->pendingPacket = snd_data_pkt;
9437839Snilay@cs.wisc.edu            }
9447839Snilay@cs.wisc.edu        } else {
9457839Snilay@cs.wisc.edu
9467839Snilay@cs.wisc.edu            // If split, try to send the second packet too
9477839Snilay@cs.wisc.edu            if (split) {
9487839Snilay@cs.wisc.edu                assert(snd_data_pkt);
9497839Snilay@cs.wisc.edu
9507839Snilay@cs.wisc.edu                // Ensure there are enough ports to use.
9517839Snilay@cs.wisc.edu                if (usedPorts < cachePorts) {
9527839Snilay@cs.wisc.edu                    ++usedPorts;
9537839Snilay@cs.wisc.edu                    if (sendStore(snd_data_pkt)) {
9547839Snilay@cs.wisc.edu                        storePostSend(snd_data_pkt);
9557839Snilay@cs.wisc.edu                    } else {
9568055Sksewell@umich.edu                        DPRINTF(IEW, "D-Cache became blocked when writing"
9577839Snilay@cs.wisc.edu                                " [sn:%lli] second packet, will retry later\n",
9587839Snilay@cs.wisc.edu                                inst->seqNum);
9597839Snilay@cs.wisc.edu                    }
9607839Snilay@cs.wisc.edu                } else {
9617839Snilay@cs.wisc.edu
9627839Snilay@cs.wisc.edu                    // Store the packet for when there's free ports.
9637839Snilay@cs.wisc.edu                    assert(pendingPkt == NULL);
9647839Snilay@cs.wisc.edu                    pendingPkt = snd_data_pkt;
9657839Snilay@cs.wisc.edu                    hasPendingPkt = true;
9667839Snilay@cs.wisc.edu                }
9676657Snate@binkert.org            } else {
9687007Snate@binkert.org
9697007Snate@binkert.org                // Not a split store.
9706657Snate@binkert.org                storePostSend(data_pkt);
9718055Sksewell@umich.edu            }
9726657Snate@binkert.org        }
9736657Snate@binkert.org    }
9746657Snate@binkert.org
9756657Snate@binkert.org    // Not sure this should set it to 0.
9768478Snilay@cs.wisc.edu    usedPorts = 0;
9778478Snilay@cs.wisc.edu
9788478Snilay@cs.wisc.edu    assert(stores >= 0 && storesToWB >= 0);
9799302Snilay@cs.wisc.edu}
9809302Snilay@cs.wisc.edu
9819302Snilay@cs.wisc.edu/*template <class Impl>
9829302Snilay@cs.wisc.eduvoid
9839302Snilay@cs.wisc.eduLSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
9849302Snilay@cs.wisc.edu{
9859302Snilay@cs.wisc.edu    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
9869302Snilay@cs.wisc.edu                                              mshrSeqNums.end(),
9879302Snilay@cs.wisc.edu                                              seqNum);
9889302Snilay@cs.wisc.edu
9899302Snilay@cs.wisc.edu    if (mshr_it != mshrSeqNums.end()) {
9909302Snilay@cs.wisc.edu        mshrSeqNums.erase(mshr_it);
9919302Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
9929302Snilay@cs.wisc.edu    }
9939302Snilay@cs.wisc.edu}*/
9949302Snilay@cs.wisc.edu
9959302Snilay@cs.wisc.edutemplate <class Impl>
9969302Snilay@cs.wisc.eduvoid
9979302Snilay@cs.wisc.eduLSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
9989302Snilay@cs.wisc.edu{
9999302Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
10009302Snilay@cs.wisc.edu            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
10019302Snilay@cs.wisc.edu
10029302Snilay@cs.wisc.edu    int load_idx = loadTail;
10039302Snilay@cs.wisc.edu    decrLdIdx(load_idx);
10049302Snilay@cs.wisc.edu
10059302Snilay@cs.wisc.edu    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
10069302Snilay@cs.wisc.edu        DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
10079302Snilay@cs.wisc.edu                "[sn:%lli]\n",
10089302Snilay@cs.wisc.edu                loadQueue[load_idx]->pcState(),
10099302Snilay@cs.wisc.edu                loadQueue[load_idx]->seqNum);
10109302Snilay@cs.wisc.edu
10119302Snilay@cs.wisc.edu        if (isStalled() && load_idx == stallingLoadIdx) {
10129595Snilay@cs.wisc.edu            stalled = false;
10139595Snilay@cs.wisc.edu            stallingStoreIsn = 0;
10149595Snilay@cs.wisc.edu            stallingLoadIdx = 0;
10159595Snilay@cs.wisc.edu        }
10169595Snilay@cs.wisc.edu
10179595Snilay@cs.wisc.edu        // Clear the smart pointer to make sure it is decremented.
10189595Snilay@cs.wisc.edu        loadQueue[load_idx]->setSquashed();
10199595Snilay@cs.wisc.edu        loadQueue[load_idx] = NULL;
10209595Snilay@cs.wisc.edu        --loads;
10219595Snilay@cs.wisc.edu
10229595Snilay@cs.wisc.edu        // Inefficient!
10239595Snilay@cs.wisc.edu        loadTail = load_idx;
10249595Snilay@cs.wisc.edu
10259595Snilay@cs.wisc.edu        decrLdIdx(load_idx);
10269595Snilay@cs.wisc.edu        ++lsqSquashedLoads;
10279595Snilay@cs.wisc.edu    }
10289595Snilay@cs.wisc.edu
10299595Snilay@cs.wisc.edu    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
10309595Snilay@cs.wisc.edu        memDepViolator = NULL;
10319595Snilay@cs.wisc.edu    }
10326657Snate@binkert.org
10336657Snate@binkert.org    int store_idx = storeTail;
10349219Spower.jg@gmail.com    decrStIdx(store_idx);
10356657Snate@binkert.org
10366657Snate@binkert.org    while (stores != 0 &&
10376999Snate@binkert.org           storeQueue[store_idx].inst->seqNum > squashed_num) {
10386657Snate@binkert.org        // Instructions marked as can WB are already committed.
10396657Snate@binkert.org        if (storeQueue[store_idx].canWB) {
10409104Shestness@cs.utexas.edu            break;
10419104Shestness@cs.utexas.edu        }
10429104Shestness@cs.utexas.edu
10439104Shestness@cs.utexas.edu        DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
10446657Snate@binkert.org                "idx:%i [sn:%lli]\n",
10456657Snate@binkert.org                storeQueue[store_idx].inst->pcState(),
10466657Snate@binkert.org                store_idx, storeQueue[store_idx].inst->seqNum);
10476657Snate@binkert.org
10488946Sandreas.hansson@arm.com        // I don't think this can happen.  It should have been cleared
10498946Sandreas.hansson@arm.com        // by the stalling load.
10508946Sandreas.hansson@arm.com        if (isStalled() &&
10517832Snate@binkert.org            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
10527832Snate@binkert.org            panic("Is stalled should have been cleared by stalling load!\n");
10537007Snate@binkert.org            stalled = false;
10548232Snate@binkert.org            stallingStoreIsn = 0;
10558229Snate@binkert.org        }
10568229Snate@binkert.org
10578229Snate@binkert.org        // Clear the smart pointer to make sure it is decremented.
10589104Shestness@cs.utexas.edu        storeQueue[store_idx].inst->setSquashed();
10599104Shestness@cs.utexas.edu        storeQueue[store_idx].inst = NULL;
10609104Shestness@cs.utexas.edu        storeQueue[store_idx].canWB = 0;
10619104Shestness@cs.utexas.edu
10629104Shestness@cs.utexas.edu        // Must delete request now that it wasn't handed off to
10639104Shestness@cs.utexas.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
10648229Snate@binkert.org        // place to really handle request deletes.
10656657Snate@binkert.org        delete storeQueue[store_idx].req;
10666657Snate@binkert.org        if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
10679219Spower.jg@gmail.com            delete storeQueue[store_idx].sreqLow;
10689219Spower.jg@gmail.com            delete storeQueue[store_idx].sreqHigh;
10699219Spower.jg@gmail.com
10709219Spower.jg@gmail.com            storeQueue[store_idx].sreqLow = NULL;
10719219Spower.jg@gmail.com            storeQueue[store_idx].sreqHigh = NULL;
10729219Spower.jg@gmail.com        }
10739219Spower.jg@gmail.com
10746657Snate@binkert.org        storeQueue[store_idx].req = NULL;
10757055Snate@binkert.org        --stores;
10767055Snate@binkert.org
10777007Snate@binkert.org        // Inefficient!
10787007Snate@binkert.org        storeTail = store_idx;
10796657Snate@binkert.org
10806657Snate@binkert.org        decrStIdx(store_idx);
10816657Snate@binkert.org        ++lsqSquashedStores;
10826657Snate@binkert.org    }
10836657Snate@binkert.org}
10846657Snate@binkert.org
10857007Snate@binkert.orgtemplate <class Impl>
10869496Snilay@cs.wisc.eduvoid
10877007Snate@binkert.orgLSQUnit<Impl>::storePostSend(PacketPtr pkt)
10887007Snate@binkert.org{
10899499Snilay@cs.wisc.edu    if (isStalled() &&
10906657Snate@binkert.org        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
10916657Snate@binkert.org        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
10926657Snate@binkert.org                "load idx:%i\n",
10936657Snate@binkert.org                stallingStoreIsn, stallingLoadIdx);
10946657Snate@binkert.org        stalled = false;
10956657Snate@binkert.org        stallingStoreIsn = 0;
10966657Snate@binkert.org        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
10976657Snate@binkert.org    }
10986657Snate@binkert.org
10996657Snate@binkert.org    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
11006657Snate@binkert.org        // The store is basically completed at this time. This
11016657Snate@binkert.org        // only works so long as the checker doesn't try to
11027567SBrad.Beckmann@amd.com        // verify the value in memory for stores.
11039996Snilay@cs.wisc.edu        storeQueue[storeWBIdx].inst->setCompleted();
11047567SBrad.Beckmann@amd.com
11059996Snilay@cs.wisc.edu        if (cpu->checker) {
11066657Snate@binkert.org            cpu->checker->verify(storeQueue[storeWBIdx].inst);
11076657Snate@binkert.org        }
11086657Snate@binkert.org    }
11096657Snate@binkert.org
11106657Snate@binkert.org    if (needsTSO) {
11116657Snate@binkert.org        storeInFlight = true;
11126657Snate@binkert.org    }
11136657Snate@binkert.org
11146657Snate@binkert.org    incrStIdx(storeWBIdx);
11156657Snate@binkert.org}
11166657Snate@binkert.org
11176657Snate@binkert.orgtemplate <class Impl>
11186657Snate@binkert.orgvoid
11196657Snate@binkert.orgLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
11206657Snate@binkert.org{
11216657Snate@binkert.org    iewStage->wakeCPU();
11226657Snate@binkert.org
11236657Snate@binkert.org    // Squashed instructions do not need to complete their access.
11246999Snate@binkert.org    if (inst->isSquashed()) {
11256657Snate@binkert.org        assert(!inst->isStore());
11266657Snate@binkert.org        ++lsqIgnoredResponses;
11276657Snate@binkert.org        return;
11286657Snate@binkert.org    }
11296657Snate@binkert.org
11306657Snate@binkert.org    if (!inst->isExecuted()) {
11317832Snate@binkert.org        inst->setExecuted();
11327832Snate@binkert.org
11337805Snilay@cs.wisc.edu        if (inst->fault == NoFault) {
11347832Snate@binkert.org            // Complete access to copy data to proper place.
11358232Snate@binkert.org            inst->completeAcc(pkt);
11368232Snate@binkert.org        } else {
11378229Snate@binkert.org            // If the instruction has an outstanding fault, we cannot complete
11388229Snate@binkert.org            // the access as this discards the current fault.
11398229Snate@binkert.org
11408229Snate@binkert.org            // If we have an outstanding fault, the fault should only be of
11416657Snate@binkert.org            // type ReExec.
11426657Snate@binkert.org            assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr);
11436657Snate@binkert.org
11446657Snate@binkert.org            DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access "
11456657Snate@binkert.org                    "due to pending fault.\n", inst->seqNum);
11466657Snate@binkert.org        }
11476657Snate@binkert.org    }
11486657Snate@binkert.org
11497007Snate@binkert.org    // Need to insert instruction into queue to commit
11507007Snate@binkert.org    iewStage->instToCommit(inst);
11517839Snilay@cs.wisc.edu
11527839Snilay@cs.wisc.edu    iewStage->activityThisCycle();
11537839Snilay@cs.wisc.edu
11547839Snilay@cs.wisc.edu    // see if this load changed the PC
11557839Snilay@cs.wisc.edu    iewStage->checkMisprediction(inst);
11567839Snilay@cs.wisc.edu}
11577839Snilay@cs.wisc.edu
11587839Snilay@cs.wisc.edutemplate <class Impl>
11597839Snilay@cs.wisc.eduvoid
11607839Snilay@cs.wisc.eduLSQUnit<Impl>::completeStore(int store_idx)
11617007Snate@binkert.org{
11626657Snate@binkert.org    assert(storeQueue[store_idx].inst);
11637839Snilay@cs.wisc.edu    storeQueue[store_idx].completed = true;
11647839Snilay@cs.wisc.edu    --storesToWB;
11658337Snilay@cs.wisc.edu    // A bit conservative because a store completion may not free up entries,
11667839Snilay@cs.wisc.edu    // but hopefully avoids two store completions in one cycle from making
11678337Snilay@cs.wisc.edu    // the CPU tick twice.
11687839Snilay@cs.wisc.edu    cpu->wakeCPU();
11698337Snilay@cs.wisc.edu    cpu->activityThisCycle();
11707839Snilay@cs.wisc.edu
11718337Snilay@cs.wisc.edu    if (store_idx == storeHead) {
11727839Snilay@cs.wisc.edu        do {
11737839Snilay@cs.wisc.edu            incrStIdx(storeHead);
11746657Snate@binkert.org
11756657Snate@binkert.org            --stores;
11767780Snilay@cs.wisc.edu        } while (storeQueue[storeHead].completed &&
11779465Snilay@cs.wisc.edu                 storeHead != storeTail);
11789171Snilay@cs.wisc.edu
11796657Snate@binkert.org        iewStage->updateLSQNextCycle = true;
11807007Snate@binkert.org    }
11817839Snilay@cs.wisc.edu
11827839Snilay@cs.wisc.edu    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
11837839Snilay@cs.wisc.edu            "idx:%i\n",
11847839Snilay@cs.wisc.edu            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
11857839Snilay@cs.wisc.edu
11867839Snilay@cs.wisc.edu#if TRACING_ON
11877839Snilay@cs.wisc.edu    if (DTRACE(O3PipeView)) {
11887839Snilay@cs.wisc.edu        storeQueue[store_idx].inst->storeTick =
11897839Snilay@cs.wisc.edu            curTick() - storeQueue[store_idx].inst->fetchTick;
11906657Snate@binkert.org    }
11917839Snilay@cs.wisc.edu#endif
11926657Snate@binkert.org
11937780Snilay@cs.wisc.edu    if (isStalled() &&
11947780Snilay@cs.wisc.edu        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
11959745Snilay@cs.wisc.edu        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
11968266Sksewell@umich.edu                "load idx:%i\n",
11978266Sksewell@umich.edu                stallingStoreIsn, stallingLoadIdx);
11988266Sksewell@umich.edu        stalled = false;
11998266Sksewell@umich.edu        stallingStoreIsn = 0;
12008266Sksewell@umich.edu        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
12018266Sksewell@umich.edu    }
12026657Snate@binkert.org
12037832Snate@binkert.org    storeQueue[store_idx].inst->setCompleted();
12047839Snilay@cs.wisc.edu
12057839Snilay@cs.wisc.edu    if (needsTSO) {
12068337Snilay@cs.wisc.edu        storeInFlight = false;
12078341Snilay@cs.wisc.edu    }
12087839Snilay@cs.wisc.edu
12098337Snilay@cs.wisc.edu    // Tell the checker we've completed this instruction.  Some stores
12108341Snilay@cs.wisc.edu    // may get reported twice to the checker, but the checker can
12117839Snilay@cs.wisc.edu    // handle that case.
12128337Snilay@cs.wisc.edu    if (cpu->checker) {
12138341Snilay@cs.wisc.edu        cpu->checker->verify(storeQueue[store_idx].inst);
12147839Snilay@cs.wisc.edu    }
12158337Snilay@cs.wisc.edu}
12168341Snilay@cs.wisc.edu
12177839Snilay@cs.wisc.edutemplate <class Impl>
12187839Snilay@cs.wisc.edubool
12196657Snate@binkert.orgLSQUnit<Impl>::sendStore(PacketPtr data_pkt)
12208266Sksewell@umich.edu{
12218266Sksewell@umich.edu    if (!dcachePort->sendTimingReq(data_pkt)) {
12228266Sksewell@umich.edu        // Need to handle becoming blocked on a store.
12238266Sksewell@umich.edu        isStoreBlocked = true;
12248266Sksewell@umich.edu        ++lsqCacheBlocked;
12258266Sksewell@umich.edu        assert(retryPkt == NULL);
12266657Snate@binkert.org        retryPkt = data_pkt;
12277780Snilay@cs.wisc.edu        return false;
12288266Sksewell@umich.edu    }
12298266Sksewell@umich.edu    return true;
12308266Sksewell@umich.edu}
12318266Sksewell@umich.edu
12328266Sksewell@umich.edutemplate <class Impl>
12338266Sksewell@umich.eduvoid
12346657Snate@binkert.orgLSQUnit<Impl>::recvRetry()
12356657Snate@binkert.org{
12366657Snate@binkert.org    if (isStoreBlocked) {
12376657Snate@binkert.org        DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
12386657Snate@binkert.org        assert(retryPkt != NULL);
12397007Snate@binkert.org
12407007Snate@binkert.org        LSQSenderState *state =
12417007Snate@binkert.org            dynamic_cast<LSQSenderState *>(retryPkt->senderState);
12427007Snate@binkert.org
12437839Snilay@cs.wisc.edu        if (dcachePort->sendTimingReq(retryPkt)) {
12447839Snilay@cs.wisc.edu            // Don't finish the store unless this is the last packet.
12457839Snilay@cs.wisc.edu            if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
12467839Snilay@cs.wisc.edu                    state->pendingPacket == retryPkt) {
12477839Snilay@cs.wisc.edu                state->pktToSend = false;
12487839Snilay@cs.wisc.edu                storePostSend(retryPkt);
12497839Snilay@cs.wisc.edu            }
12507839Snilay@cs.wisc.edu            retryPkt = NULL;
12517839Snilay@cs.wisc.edu            isStoreBlocked = false;
12527839Snilay@cs.wisc.edu
12537839Snilay@cs.wisc.edu            // Send any outstanding packet.
12547007Snate@binkert.org            if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
12556657Snate@binkert.org                assert(state->pendingPacket);
12566657Snate@binkert.org                if (sendStore(state->pendingPacket)) {
12576657Snate@binkert.org                    storePostSend(state->pendingPacket);
12586657Snate@binkert.org                }
12596657Snate@binkert.org            }
12606657Snate@binkert.org        } else {
12616657Snate@binkert.org            // Still blocked!
12626657Snate@binkert.org            ++lsqCacheBlocked;
12636657Snate@binkert.org        }
12646657Snate@binkert.org    }
12656657Snate@binkert.org}
12666999Snate@binkert.org
12676657Snate@binkert.orgtemplate <class Impl>
12686657Snate@binkert.orginline void
12696657Snate@binkert.orgLSQUnit<Impl>::incrStIdx(int &store_idx) const
12706657Snate@binkert.org{
12716657Snate@binkert.org    if (++store_idx >= SQEntries)
12726657Snate@binkert.org        store_idx = 0;
12739104Shestness@cs.utexas.edu}
12746657Snate@binkert.org
12756657Snate@binkert.orgtemplate <class Impl>
12766657Snate@binkert.orginline void
12776657Snate@binkert.orgLSQUnit<Impl>::decrStIdx(int &store_idx) const
12786657Snate@binkert.org{
12796657Snate@binkert.org    if (--store_idx < 0)
12806657Snate@binkert.org        store_idx += SQEntries;
12817007Snate@binkert.org}
12826657Snate@binkert.org
12836657Snate@binkert.orgtemplate <class Impl>
12846657Snate@binkert.orginline void
12856657Snate@binkert.orgLSQUnit<Impl>::incrLdIdx(int &load_idx) const
12869105SBrad.Beckmann@amd.com{
12879105SBrad.Beckmann@amd.com    if (++load_idx >= LQEntries)
12889105SBrad.Beckmann@amd.com        load_idx = 0;
12899105SBrad.Beckmann@amd.com}
12909105SBrad.Beckmann@amd.com
12919105SBrad.Beckmann@amd.comtemplate <class Impl>
12929105SBrad.Beckmann@amd.cominline void
12939105SBrad.Beckmann@amd.comLSQUnit<Impl>::decrLdIdx(int &load_idx) const
12946657Snate@binkert.org{
12956657Snate@binkert.org    if (--load_idx < 0)
12966657Snate@binkert.org        load_idx += LQEntries;
12976657Snate@binkert.org}
12986657Snate@binkert.org
12996657Snate@binkert.orgtemplate <class Impl>
13006657Snate@binkert.orgvoid
13019104Shestness@cs.utexas.eduLSQUnit<Impl>::dumpInsts() const
13029104Shestness@cs.utexas.edu{
13039104Shestness@cs.utexas.edu    cprintf("Load store queue: Dumping instructions.\n");
13049104Shestness@cs.utexas.edu    cprintf("Load queue size: %i\n", loads);
13056657Snate@binkert.org    cprintf("Load queue: ");
13066657Snate@binkert.org
13076657Snate@binkert.org    int load_idx = loadHead;
13086657Snate@binkert.org
13096657Snate@binkert.org    while (load_idx != loadTail && loadQueue[load_idx]) {
13106657Snate@binkert.org        const DynInstPtr &inst(loadQueue[load_idx]);
13116657Snate@binkert.org        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
13126657Snate@binkert.org
13136657Snate@binkert.org        incrLdIdx(load_idx);
13146657Snate@binkert.org    }
13157839Snilay@cs.wisc.edu    cprintf("\n");
13167839Snilay@cs.wisc.edu
13177839Snilay@cs.wisc.edu    cprintf("Store queue size: %i\n", stores);
13187839Snilay@cs.wisc.edu    cprintf("Store queue: ");
13197839Snilay@cs.wisc.edu
13207839Snilay@cs.wisc.edu    int store_idx = storeHead;
13217839Snilay@cs.wisc.edu
13227839Snilay@cs.wisc.edu    while (store_idx != storeTail && storeQueue[store_idx].inst) {
13237839Snilay@cs.wisc.edu        const DynInstPtr &inst(storeQueue[store_idx].inst);
13247839Snilay@cs.wisc.edu        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
13257839Snilay@cs.wisc.edu
13267839Snilay@cs.wisc.edu        incrStIdx(store_idx);
13276657Snate@binkert.org    }
13286657Snate@binkert.org
13296657Snate@binkert.org    cprintf("\n");
13306657Snate@binkert.org}
13316657Snate@binkert.org
13326657Snate@binkert.org#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
13336657Snate@binkert.org